From b1332cbbda2df00c8cdbe9929270afb179a45fb6 Mon Sep 17 00:00:00 2001 From: litiliu <38579068+litiliu@users.noreply.github.com> Date: Mon, 11 Mar 2024 10:16:08 +0800 Subject: [PATCH 01/59] [Doc][Improve]fix dockerfile for kubernetes (#6475) --- docs/en/start-v2/kubernetes/kubernetes.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/en/start-v2/kubernetes/kubernetes.mdx b/docs/en/start-v2/kubernetes/kubernetes.mdx index dc913478ab4..15dd1f503a1 100644 --- a/docs/en/start-v2/kubernetes/kubernetes.mdx +++ b/docs/en/start-v2/kubernetes/kubernetes.mdx @@ -51,7 +51,7 @@ RUN wget https://dlcdn.apache.org/seatunnel/${SEATUNNEL_VERSION}/apache-seatunne RUN tar -xzvf apache-seatunnel-${SEATUNNEL_VERSION}-bin.tar.gz RUN mv apache-seatunnel-${SEATUNNEL_VERSION} ${SEATUNNEL_HOME} -RUN cd ${SEATUNNEL_HOME}||sh bin/install-plugin.sh ${SEATUNNEL_VERSION} +RUN cd ${SEATUNNEL_HOME} && sh bin/install-plugin.sh ${SEATUNNEL_VERSION} ``` Then run the following commands to build the image: @@ -79,7 +79,7 @@ RUN wget https://dlcdn.apache.org/seatunnel/${SEATUNNEL_VERSION}/apache-seatunne RUN tar -xzvf apache-seatunnel-${SEATUNNEL_VERSION}-bin.tar.gz RUN mv apache-seatunnel-${SEATUNNEL_VERSION} ${SEATUNNEL_HOME} -RUN cd ${SEATUNNEL_HOME}||sh bin/install-plugin.sh ${SEATUNNEL_VERSION} +RUN cd ${SEATUNNEL_HOME} && sh bin/install-plugin.sh ${SEATUNNEL_VERSION} ``` Then run the following commands to build the image: @@ -107,7 +107,7 @@ RUN wget https://dlcdn.apache.org/seatunnel/${SEATUNNEL_VERSION}/apache-seatunne RUN tar -xzvf apache-seatunnel-${SEATUNNEL_VERSION}-bin.tar.gz RUN mv apache-seatunnel-${SEATUNNEL_VERSION} ${SEATUNNEL_HOME} RUN mkdir -p $SEATUNNEL_HOME/logs -RUN cd ${SEATUNNEL_HOME}||sh bin/install-plugin.sh ${SEATUNNEL_VERSION} +RUN cd ${SEATUNNEL_HOME} && sh bin/install-plugin.sh ${SEATUNNEL_VERSION} ``` Then run the following commands to build the image: From 1ea27afa8764cf32edc07abe665507482f1d4b18 Mon Sep 17 00:00:00 2001 From: lightzhao <40714172+lightzhao@users.noreply.github.com> Date: Mon, 11 Mar 2024 10:49:30 +0800 Subject: [PATCH 02/59] [Improve][CDC-Connector]Fix CDC option rule. (#6454) --- .../cdc/mysql/source/MySqlIncrementalSourceFactory.java | 2 +- .../cdc/oracle/source/OracleIncrementalSourceFactory.java | 6 ++---- .../postgres/source/PostgresIncrementalSourceFactory.java | 2 +- .../source/source/SqlServerIncrementalSourceFactory.java | 2 +- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/source/MySqlIncrementalSourceFactory.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/source/MySqlIncrementalSourceFactory.java index defe0a6ab98..8147dfe737f 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/source/MySqlIncrementalSourceFactory.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/source/MySqlIncrementalSourceFactory.java @@ -57,8 +57,8 @@ public OptionRule optionRule() { .required( JdbcSourceOptions.USERNAME, JdbcSourceOptions.PASSWORD, - CatalogOptions.TABLE_NAMES, JdbcCatalogOptions.BASE_URL) + .exclusive(CatalogOptions.TABLE_NAMES, CatalogOptions.TABLE_PATTERN) .optional( JdbcSourceOptions.DATABASE_NAMES, JdbcSourceOptions.SERVER_ID, diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/OracleIncrementalSourceFactory.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/OracleIncrementalSourceFactory.java index c80f0dc7cea..2a0dc6b2907 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/OracleIncrementalSourceFactory.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/oracle/source/OracleIncrementalSourceFactory.java @@ -54,10 +54,8 @@ public String factoryIdentifier() { @Override public OptionRule optionRule() { return JdbcSourceOptions.getBaseRule() - .required( - JdbcSourceOptions.USERNAME, - JdbcSourceOptions.PASSWORD, - CatalogOptions.TABLE_NAMES) + .required(JdbcSourceOptions.USERNAME, JdbcSourceOptions.PASSWORD) + .exclusive(CatalogOptions.TABLE_NAMES, CatalogOptions.TABLE_PATTERN) .bundled(JdbcSourceOptions.HOSTNAME, JdbcSourceOptions.PORT) .optional( JdbcCatalogOptions.BASE_URL, diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-postgres/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/postgres/source/PostgresIncrementalSourceFactory.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-postgres/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/postgres/source/PostgresIncrementalSourceFactory.java index 7d9ddbb5b22..e75c3505ef1 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-postgres/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/postgres/source/PostgresIncrementalSourceFactory.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-postgres/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/postgres/source/PostgresIncrementalSourceFactory.java @@ -56,8 +56,8 @@ public OptionRule optionRule() { .required( JdbcSourceOptions.USERNAME, JdbcSourceOptions.PASSWORD, - CatalogOptions.TABLE_NAMES, JdbcCatalogOptions.BASE_URL) + .exclusive(CatalogOptions.TABLE_NAMES, CatalogOptions.TABLE_PATTERN) .optional( JdbcSourceOptions.DATABASE_NAMES, JdbcSourceOptions.SERVER_TIME_ZONE, diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-sqlserver/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/sqlserver/source/source/SqlServerIncrementalSourceFactory.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-sqlserver/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/sqlserver/source/source/SqlServerIncrementalSourceFactory.java index 95031e9b9ff..09a12c6e4ed 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-sqlserver/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/sqlserver/source/source/SqlServerIncrementalSourceFactory.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-sqlserver/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/sqlserver/source/source/SqlServerIncrementalSourceFactory.java @@ -58,8 +58,8 @@ public OptionRule optionRule() { .required( JdbcSourceOptions.USERNAME, JdbcSourceOptions.PASSWORD, - CatalogOptions.TABLE_NAMES, JdbcCatalogOptions.BASE_URL) + .exclusive(CatalogOptions.TABLE_NAMES, CatalogOptions.TABLE_PATTERN) .optional( JdbcSourceOptions.DATABASE_NAMES, JdbcSourceOptions.SERVER_TIME_ZONE, From 7f051b2deb9cc523b3a42c5b1f9cbe51ddc84989 Mon Sep 17 00:00:00 2001 From: dzygoon <96854451+dzygoon@users.noreply.github.com> Date: Mon, 11 Mar 2024 11:14:15 +0800 Subject: [PATCH 03/59] [Doc][Improve]Support Chinese for /seatunnel-engine/rest-api.md and local-mode.md and cluster-mode.md and checkpoint-storage.md (#6445) --- .../zh/seatunnel-engine/checkpoint-storage.md | 187 +++++++++ docs/zh/seatunnel-engine/cluster-mode.md | 21 + docs/zh/seatunnel-engine/local-mode.md | 25 ++ docs/zh/seatunnel-engine/rest-api.md | 384 ++++++++++++++++++ 4 files changed, 617 insertions(+) create mode 100644 docs/zh/seatunnel-engine/checkpoint-storage.md create mode 100644 docs/zh/seatunnel-engine/cluster-mode.md create mode 100644 docs/zh/seatunnel-engine/local-mode.md create mode 100644 docs/zh/seatunnel-engine/rest-api.md diff --git a/docs/zh/seatunnel-engine/checkpoint-storage.md b/docs/zh/seatunnel-engine/checkpoint-storage.md new file mode 100644 index 00000000000..795e7bf63b5 --- /dev/null +++ b/docs/zh/seatunnel-engine/checkpoint-storage.md @@ -0,0 +1,187 @@ +--- + +sidebar_position: 7 +------------------- + +# 检查点存储 + +## 简介 + +检查点是一种容错恢复机制。这种机制确保程序在运行时,即使突然遇到异常,也能自行恢复。 + +### 检查点存储 + +检查点存储是一种存储检查点数据的存储机制。 + +SeaTunnel Engine支持以下检查点存储类型: + +- HDFS (OSS,S3,HDFS,LocalFile) +- LocalFile (本地),(已弃用: 使用Hdfs(LocalFile)替代). + +我们使用微内核设计模式将检查点存储模块从引擎中分离出来。这允许用户实现他们自己的检查点存储模块。 + +`checkpoint-storage-api`是检查点存储模块API,它定义了检查点存储模块的接口。 + +如果你想实现你自己的检查点存储模块,你需要实现`CheckpointStorage`并提供相应的`CheckpointStorageFactory`实现。 + +### 检查点存储配置 + +`seatunnel-server`模块的配置在`seatunnel.yaml`文件中。 + +```yaml + +seatunnel: + engine: + checkpoint: + storage: + type: hdfs #检查点存储的插件名称,支持hdfs(S3, local, hdfs), 默认为localfile (本地文件), 但这种方式已弃用 + # 插件配置 + plugin-config: + namespace: #检查点存储父路径,默认值为/seatunnel/checkpoint/ + K1: V1 # 插件其它配置 + K2: V2 # 插件其它配置 +``` + +注意: namespace必须以"/"结尾。 + +#### OSS + +阿里云oss是基于hdfs-file,所以你可以参考[hadoop oss文档](https://hadoop.apache.org/docs/stable/hadoop-aliyun/tools/hadoop-aliyun/index.html)来配置oss. + +除了与oss buckets交互外,oss客户端需要与buckets交互所需的凭据。 +客户端支持多种身份验证机制,并且可以配置使用哪种机制及其使用顺序。也可以使用of org.apache.hadoop.fs.aliyun.oss.AliyunCredentialsProvider的自定义实现。 +如果您使用AliyunCredentialsProvider(可以从阿里云访问密钥管理中获得),它们包括一个access key和一个secret key。 +你可以这样配置: + +```yaml +seatunnel: + engine: + checkpoint: + interval: 6000 + timeout: 7000 + storage: + type: hdfs + max-retained: 3 + plugin-config: + storage.type: oss + oss.bucket: your-bucket + fs.oss.accessKeyId: your-access-key + fs.oss.accessKeySecret: your-secret-key + fs.oss.endpoint: endpoint address + fs.oss.credentials.provider: org.apache.hadoop.fs.aliyun.oss.AliyunCredentialsProvider +``` + +有关Hadoop Credential Provider API的更多信息,请参见: [Credential Provider API](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/CredentialProviderAPI.html). + +阿里云oss凭证提供程序实现见: [验证凭证提供](https://github.com/aliyun/aliyun-oss-java-sdk/tree/master/src/main/java/com/aliyun/oss/common/auth) + +#### S3 + +S3基于hdfs-file,所以你可以参考[hadoop s3文档](https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html)来配置s3。 + +除了与公共S3 buckets交互之外,S3A客户端需要与buckets交互所需的凭据。 +客户端支持多种身份验证机制,并且可以配置使用哪种机制及其使用顺序。也可以使用com.amazonaws.auth.AWSCredentialsProvider的自定义实现。 +如果您使用SimpleAWSCredentialsProvider(可以从Amazon Security Token服务中获得),它们包括一个access key和一个secret key。 +您可以这样配置: + +```yaml +``` yaml + +seatunnel: + engine: + checkpoint: + interval: 6000 + timeout: 7000 + storage: + type: hdfs + max-retained: 3 + plugin-config: + storage.type: s3 + s3.bucket: your-bucket + fs.s3a.access.key: your-access-key + fs.s3a.secret.key: your-secret-key + fs.s3a.aws.credentials.provider: org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider + + +``` + +如果您使用`InstanceProfileCredentialsProvider`,它支持在EC2 VM中运行时使用实例配置文件凭据,您可以检查[iam-roles-for-amazon-ec2](https://docs.aws.amazon.com/zh_cn/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html). +您可以这样配置: + +```yaml + +seatunnel: + engine: + checkpoint: + interval: 6000 + timeout: 7000 + storage: + type: hdfs + max-retained: 3 + plugin-config: + storage.type: s3 + s3.bucket: your-bucket + fs.s3a.endpoint: your-endpoint + fs.s3a.aws.credentials.provider: org.apache.hadoop.fs.s3a.InstanceProfileCredentialsProvider +``` + +有关Hadoop Credential Provider API的更多信息,请参见: [Credential Provider API](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/CredentialProviderAPI.html). + +#### HDFS + +如果您使用HDFS,您可以这样配置: + +```yaml +seatunnel: + engine: + checkpoint: + storage: + type: hdfs + max-retained: 3 + plugin-config: + storage.type: hdfs + fs.defaultFS: hdfs://localhost:9000 + // 如果您使用kerberos,您可以这样配置: + kerberosPrincipal: your-kerberos-principal + kerberosKeytabFilePath: your-kerberos-keytab +``` + +如果HDFS是HA模式,您可以这样配置: + +```yaml +seatunnel: + engine: + checkpoint: + storage: + type: hdfs + max-retained: 3 + plugin-config: + storage.type: hdfs + fs.defaultFS: hdfs://usdp-bing + seatunnel.hadoop.dfs.nameservices: usdp-bing + seatunnel.hadoop.dfs.ha.namenodes.usdp-bing: nn1,nn2 + seatunnel.hadoop.dfs.namenode.rpc-address.usdp-bing.nn1: usdp-bing-nn1:8020 + seatunnel.hadoop.dfs.namenode.rpc-address.usdp-bing.nn2: usdp-bing-nn2:8020 + seatunnel.hadoop.dfs.client.failover.proxy.provider.usdp-bing: org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider + +``` + +如果HDFS在`hdfs-site.xml`或`core-site.xml`中有其他配置,只需使用`seatunnel.hadoop.`前缀设置HDFS配置即可。 + +#### 本地文件 + +```yaml +seatunnel: + engine: + checkpoint: + interval: 6000 + timeout: 7000 + storage: + type: hdfs + max-retained: 3 + plugin-config: + storage.type: hdfs + fs.defaultFS: file:/// # 请确保该目录具有写权限 + +``` + diff --git a/docs/zh/seatunnel-engine/cluster-mode.md b/docs/zh/seatunnel-engine/cluster-mode.md new file mode 100644 index 00000000000..a0b11cd1dfa --- /dev/null +++ b/docs/zh/seatunnel-engine/cluster-mode.md @@ -0,0 +1,21 @@ +--- + +sidebar_position: 3 +------------------- + +# 以集群模式运行作业 + +这是最推荐的在生产环境中使用SeaTunnel Engine的方法。此模式支持SeaTunnel Engine的全部功能,集群模式将具有更好的性能和稳定性。 + +在集群模式下,首先需要部署SeaTunnel Engine集群,然后客户端将作业提交给SeaTunnel Engine集群运行。 + +## 部署SeaTunnel Engine集群 + +部署SeaTunnel Engine集群参考[SeaTunnel Engine集群部署](../../en/seatunnel-engine/deployment.md) + +## 提交作业 + +```shell +$SEATUNNEL_HOME/bin/seatunnel.sh --config $SEATUNNEL_HOME/config/v2.batch.config.template +``` + diff --git a/docs/zh/seatunnel-engine/local-mode.md b/docs/zh/seatunnel-engine/local-mode.md new file mode 100644 index 00000000000..3738721fa79 --- /dev/null +++ b/docs/zh/seatunnel-engine/local-mode.md @@ -0,0 +1,25 @@ +--- + +sidebar_position: 2 +------------------- + +# 以本地模式运行作业 + +仅用于测试。 + +最推荐在生产环境中使用SeaTunnel Engine的方式为[集群模式](cluster-mode.md). + +## 本地模式部署SeaTunnel Engine + +[部署SeaTunnel Engine本地模式参考](../../en/start-v2/locally/deployment.md) + +## 修改SeaTunnel Engine配置 + +将$SEATUNNEL_HOME/config/hazelcast.yaml中的自动增量更新为true + +## 提交作业 + +```shell +$SEATUNNEL_HOME/bin/seatunnel.sh --config $SEATUNNEL_HOME/config/v2.batch.config.template -e local +``` + diff --git a/docs/zh/seatunnel-engine/rest-api.md b/docs/zh/seatunnel-engine/rest-api.md new file mode 100644 index 00000000000..a3f8d10d190 --- /dev/null +++ b/docs/zh/seatunnel-engine/rest-api.md @@ -0,0 +1,384 @@ +--- + +sidebar_position: 7 +------------------- + +# REST API + +SeaTunnel有一个用于监控的API,可用于查询运行作业的状态和统计信息,以及最近完成的作业。监控API是REST-ful风格的,它接受HTTP请求并使用JSON数据格式进行响应。 + +## 概述 + +监控API是由运行的web服务提供的,它是节点运行的一部分,每个节点成员都可以提供rest API功能。 +默认情况下,该服务监听端口为5801,该端口可以在hazelcast.yaml中配置,如下所示: + +```yaml +network: + rest-api: + enabled: true + endpoint-groups: + CLUSTER_WRITE: + enabled: true + DATA: + enabled: true + join: + tcp-ip: + enabled: true + member-list: + - localhost + port: + auto-increment: true + port-count: 100 + port: 5801 +``` + +## API参考 + +### 返回所有作业及其当前状态的概览。 + +
+ GET /hazelcast/rest/maps/running-jobs (返回所有作业及其当前状态的概览。) + +#### 参数 + +#### 响应 + +```json +[ + { + "jobId": "", + "jobName": "", + "jobStatus": "", + "envOptions": { + }, + "createTime": "", + "jobDag": { + "vertices": [ + ], + "edges": [ + ] + }, + "pluginJarsUrls": [ + ], + "isStartWithSavePoint": false, + "metrics": { + "sourceReceivedCount": "", + "sinkWriteCount": "" + } + } +] +``` + +
+ +------------------------------------------------------------------------------------------ + +### 返回作业的详细信息。 + +
+ GET /hazelcast/rest/maps/running-job/:jobId (返回作业的详细信息。) + +#### 参数 + +> | name | type | data type | description | +> |-------|----------|-----------|-------------| +> | jobId | required | long | job id | + +#### 响应 + +```json +{ + "jobId": "", + "jobName": "", + "jobStatus": "", + "envOptions": { + }, + "createTime": "", + "jobDag": { + "vertices": [ + ], + "edges": [ + ] + }, + "pluginJarsUrls": [ + ], + "isStartWithSavePoint": false, + "metrics": { + "sourceReceivedCount": "", + "sinkWriteCount": "" + } +} +``` + +
+ +------------------------------------------------------------------------------------------ + +### 返回所有已完成的作业信息。 + +
+ GET /hazelcast/rest/maps/finished-jobs/:state (返回所有已完成的作业信息。) + +#### 参数 + +> | name | type | data type | description | +> |-------|----------|-----------|------------------------------------------------------------------| +> | state | optional | string | finished job status. `FINISHED`,`CANCELED`,`FAILED`,`UNKNOWABLE` | + +#### 响应 + +```json +[ + { + "jobId": "", + "jobName": "", + "jobStatus": "", + "errorMsg": null, + "createTime": "", + "finishTime": "", + "jobDag": "", + "metrics": "" + } +] +``` + +
+ +------------------------------------------------------------------------------------------ + +### 返回系统监控信息。 + +
+ GET /hazelcast/rest/maps/system-monitoring-information (返回系统监控信息。) + +#### 参数 + +#### 响应 + +```json +[ + { + "processors":"8", + "physical.memory.total":"16.0G", + "physical.memory.free":"16.3M", + "swap.space.total":"0", + "swap.space.free":"0", + "heap.memory.used":"135.7M", + "heap.memory.free":"440.8M", + "heap.memory.total":"576.5M", + "heap.memory.max":"3.6G", + "heap.memory.used/total":"23.54%", + "heap.memory.used/max":"3.73%", + "minor.gc.count":"6", + "minor.gc.time":"110ms", + "major.gc.count":"2", + "major.gc.time":"73ms", + "load.process":"24.78%", + "load.system":"60.00%", + "load.systemAverage":"2.07", + "thread.count":"117", + "thread.peakCount":"118", + "cluster.timeDiff":"0", + "event.q.size":"0", + "executor.q.async.size":"0", + "executor.q.client.size":"0", + "executor.q.client.query.size":"0", + "executor.q.client.blocking.size":"0", + "executor.q.query.size":"0", + "executor.q.scheduled.size":"0", + "executor.q.io.size":"0", + "executor.q.system.size":"0", + "executor.q.operations.size":"0", + "executor.q.priorityOperation.size":"0", + "operations.completed.count":"10", + "executor.q.mapLoad.size":"0", + "executor.q.mapLoadAllKeys.size":"0", + "executor.q.cluster.size":"0", + "executor.q.response.size":"0", + "operations.running.count":"0", + "operations.pending.invocations.percentage":"0.00%", + "operations.pending.invocations.count":"0", + "proxy.count":"8", + "clientEndpoint.count":"0", + "connection.active.count":"2", + "client.connection.count":"0", + "connection.count":"0" + } +] +``` + +
+ +------------------------------------------------------------------------------------------ + +### 提交作业。 + +
+POST /hazelcast/rest/maps/submit-job (如果作业提交成功,返回jobId和jobName。) + +#### 参数 + +> | name | type | data type | description | +> |----------------------|----------|-----------|-----------------------------------| +> | jobId | optional | string | job id | +> | jobName | optional | string | job name | +> | isStartWithSavePoint | optional | string | if job is started with save point | + +#### 请求体 + +```json +{ + "env": { + "job.mode": "batch" + }, + "source": [ + { + "plugin_name": "FakeSource", + "result_table_name": "fake", + "row.num": 100, + "schema": { + "fields": { + "name": "string", + "age": "int", + "card": "int" + } + } + } + ], + "transform": [ + ], + "sink": [ + { + "plugin_name": "Console", + "source_table_name": ["fake"] + } + ] +} +``` + +#### 响应 + +```json +{ + "jobId": 733584788375666689, + "jobName": "rest_api_test" +} +``` + +
+ +------------------------------------------------------------------------------------------ + +### 停止作业。 + +
+POST /hazelcast/rest/maps/stop-job (如果作业成功停止,返回jobId。) + +#### 请求体 + +```json +{ + "jobId": 733584788375666689, + "isStopWithSavePoint": false # if job is stopped with save point +} +``` + +#### 响应 + +```json +{ +"jobId": 733584788375666689 +} +``` + +
+ +------------------------------------------------------------------------------------------ + +### 加密配置。 + +
+POST /hazelcast/rest/maps/encrypt-config (如果配置加密成功,则返回加密后的配置。) +有关自定义加密的更多信息,请参阅文档[配置-加密-解密](../connector-v2/Config-Encryption-Decryption.md). + +#### 请求体 + +```json +{ + "env": { + "parallelism": 1, + "shade.identifier":"base64" + }, + "source": [ + { + "plugin_name": "MySQL-CDC", + "schema" : { + "fields": { + "name": "string", + "age": "int" + } + }, + "result_table_name": "fake", + "parallelism": 1, + "hostname": "127.0.0.1", + "username": "seatunnel", + "password": "seatunnel_password", + "table-name": "inventory_vwyw0n" + } + ], + "transform": [ + ], + "sink": [ + { + "plugin_name": "Clickhouse", + "host": "localhost:8123", + "database": "default", + "table": "fake_all", + "username": "seatunnel", + "password": "seatunnel_password" + } + ] +} +``` + +#### 响应 + +```json +{ + "env": { + "parallelism": 1, + "shade.identifier": "base64" + }, + "source": [ + { + "plugin_name": "MySQL-CDC", + "schema": { + "fields": { + "name": "string", + "age": "int" + } + }, + "result_table_name": "fake", + "parallelism": 1, + "hostname": "127.0.0.1", + "username": "c2VhdHVubmVs", + "password": "c2VhdHVubmVsX3Bhc3N3b3Jk", + "table-name": "inventory_vwyw0n" + } + ], + "transform": [], + "sink": [ + { + "plugin_name": "Clickhouse", + "host": "localhost:8123", + "database": "default", + "table": "fake_all", + "username": "c2VhdHVubmVs", + "password": "c2VhdHVubmVsX3Bhc3N3b3Jk" + } + ] +} +``` + +
+ From 2658131170599f959c0e85635588b980ec4eed7e Mon Sep 17 00:00:00 2001 From: dzygoon <96854451+dzygoon@users.noreply.github.com> Date: Mon, 11 Mar 2024 15:02:44 +0800 Subject: [PATCH 04/59] [Doc][Improve]Add Support Chinese for start-v2/locally/quick-start-seatunnel-engine.md and start-v2/locally/deployment.md (#6406) --- docs/zh/start-v2/locally/deployment.md | 81 ++++++++++++++++ .../locally/quick-start-seatunnel-engine.md | 96 +++++++++++++++++++ 2 files changed, 177 insertions(+) create mode 100644 docs/zh/start-v2/locally/deployment.md create mode 100644 docs/zh/start-v2/locally/quick-start-seatunnel-engine.md diff --git a/docs/zh/start-v2/locally/deployment.md b/docs/zh/start-v2/locally/deployment.md new file mode 100644 index 00000000000..543156e22ab --- /dev/null +++ b/docs/zh/start-v2/locally/deployment.md @@ -0,0 +1,81 @@ +--- + +sidebar_position: 1 +------------------- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# 本地部署 + +## 步骤 1: 准备工作 + +在开始本地运行前,您需要确保您已经安装了SeaTunnel所需要的以下软件: + +* 安装[Java](https://www.java.com/en/download/) (Java 8 或 11, 其他高于Java 8的版本理论上也可以工作) 以及设置 `JAVA_HOME`。 + +## 步骤 2: 下载 SeaTunnel + +进入[seatunnel下载页面](https://seatunnel.apache.org/download)下载最新版本的发布版安装包`seatunnel--bin.tar.gz` + +或者您也可以通过终端下载 + +```shell +export version="2.3.5" +wget "https://archive.apache.org/dist/seatunnel/${version}/apache-seatunnel-${version}-bin.tar.gz" +tar -xzvf "apache-seatunnel-${version}-bin.tar.gz" +``` + +## 步骤 3: 安装连接器插件 + +从2.2.0-beta版本开始,二进制包不再默认提供连接器依赖,因此在第一次使用它时,您需要执行以下命令来安装连接器:(当然,您也可以从 [Apache Maven Repository](https://repo.maven.apache.org/maven2/org/apache/seatunnel/) 手动下载连接器,然后将其移动至`connectors/seatunnel`目录下)。 + +```bash +sh bin/install-plugin.sh 2.3.5 +``` + +如果您需要指定的连接器版本,以2.3.5为例,您需要执行如下命令 + +```bash +sh bin/install-plugin.sh 2.3.5 +``` + +通常您并不需要所有的连接器插件,所以您可以通过配置`config/plugin_config`来指定您所需要的插件,例如,您只需要`connector-console`插件,那么您可以修改plugin.properties配置文件如下 + +```plugin_config +--seatunnel-connectors-- +connector-console +--end-- +``` + +如果您希望示例应用程序能正常工作,那么您需要添加以下插件 + +```plugin_config +--seatunnel-connectors-- +connector-fake +connector-console +--end-- +``` + +您可以在`${SEATUNNEL_HOME}/connectors/plugins-mapping.properties`下找到所有支持的连接器和相应的plugin_config配置名称。 + +:::提示 + +如果您想通过手动下载连接器的方式来安装连接器插件,则需要特别注意以下事项 + +连接器目录包含以下子目录,如果他们不存在,则需要手动创建它们 + +``` +flink +flink-sql +seatunnel +spark +``` + +如果您想手动安装V2连接器插件,您只需要下载您所需要的连接器插件即可,并将它们放在seatunnel目录下 + +::: + +## 此外 + +现在,您已经完成了SeaTunnel部署。您可以按照[快速开始](quick-start-seatunnel-engine.md)来配置并运行数据同步作业了。 diff --git a/docs/zh/start-v2/locally/quick-start-seatunnel-engine.md b/docs/zh/start-v2/locally/quick-start-seatunnel-engine.md new file mode 100644 index 00000000000..ae460740c45 --- /dev/null +++ b/docs/zh/start-v2/locally/quick-start-seatunnel-engine.md @@ -0,0 +1,96 @@ +--- + +sidebar_position: 2 +------------------- + +# SeaTunnel Engine快速开始 + +## 步骤 1: 部署SeaTunnel及连接器 + +在开始前,请确保您已经按照[部署](deployment.md)中的描述下载并部署了SeaTunnel + +## 步骤 2: 添加作业配置文件来定义作业 + +编辑`config/v2.batch.config.template`,它决定了当seatunnel启动后数据输入、处理和输出的方式及逻辑。 +下面是配置文件的示例,它与上面提到的示例应用程序相同。 + +```hocon +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + FakeSource { + result_table_name = "fake" + row.num = 16 + schema = { + fields { + name = "string" + age = "int" + } + } + } +} + +transform { + FieldMapper { + source_table_name = "fake" + result_table_name = "fake1" + field_mapper = { + age = age + name = new_name + } + } +} + +sink { + Console { + source_table_name = "fake1" + } +} + +``` + +关于配置的更多信息请查看[配置的基本概念](../../../en/concept/config.md) + +## 步骤 3: 运行SeaTunnel应用程序 + +您可以通过以下命令启动应用程序 + +```shell +cd "apache-seatunnel-${version}" +./bin/seatunnel.sh --config ./config/v2.batch.config.template -e local + +``` + +**查看输出**: 当您运行该命令时,您可以在控制台中看到它的输出。您可以认为这是命令运行成功或失败的标志。 + +SeaTunnel控制台将会打印一些如下日志信息: + +```shell +2022-12-19 11:01:45,417 INFO org.apache.seatunnel.connectors.seatunnel.console.sink.ConsoleSinkWriter - output rowType: name, age +2022-12-19 11:01:46,489 INFO org.apache.seatunnel.connectors.seatunnel.console.sink.ConsoleSinkWriter - subtaskIndex=0 rowIndex=1: SeaTunnelRow#tableId=-1 SeaTunnelRow#kind=INSERT: CpiOd, 8520946 +2022-12-19 11:01:46,490 INFO org.apache.seatunnel.connectors.seatunnel.console.sink.ConsoleSinkWriter - subtaskIndex=0 rowIndex=2: SeaTunnelRow#tableId=-1 SeaTunnelRow#kind=INSERT: eQqTs, 1256802974 +2022-12-19 11:01:46,490 INFO org.apache.seatunnel.connectors.seatunnel.console.sink.ConsoleSinkWriter - subtaskIndex=0 rowIndex=3: SeaTunnelRow#tableId=-1 SeaTunnelRow#kind=INSERT: UsRgO, 2053193072 +2022-12-19 11:01:46,490 INFO org.apache.seatunnel.connectors.seatunnel.console.sink.ConsoleSinkWriter - subtaskIndex=0 rowIndex=4: SeaTunnelRow#tableId=-1 SeaTunnelRow#kind=INSERT: jDQJj, 1993016602 +2022-12-19 11:01:46,490 INFO org.apache.seatunnel.connectors.seatunnel.console.sink.ConsoleSinkWriter - subtaskIndex=0 rowIndex=5: SeaTunnelRow#tableId=-1 SeaTunnelRow#kind=INSERT: rqdKp, 1392682764 +2022-12-19 11:01:46,490 INFO org.apache.seatunnel.connectors.seatunnel.console.sink.ConsoleSinkWriter - subtaskIndex=0 rowIndex=6: SeaTunnelRow#tableId=-1 SeaTunnelRow#kind=INSERT: wCoWN, 986999925 +2022-12-19 11:01:46,490 INFO org.apache.seatunnel.connectors.seatunnel.console.sink.ConsoleSinkWriter - subtaskIndex=0 rowIndex=7: SeaTunnelRow#tableId=-1 SeaTunnelRow#kind=INSERT: qomTU, 72775247 +2022-12-19 11:01:46,490 INFO org.apache.seatunnel.connectors.seatunnel.console.sink.ConsoleSinkWriter - subtaskIndex=0 rowIndex=8: SeaTunnelRow#tableId=-1 SeaTunnelRow#kind=INSERT: jcqXR, 1074529204 +2022-12-19 11:01:46,490 INFO org.apache.seatunnel.connectors.seatunnel.console.sink.ConsoleSinkWriter - subtaskIndex=0 rowIndex=9: SeaTunnelRow#tableId=-1 SeaTunnelRow#kind=INSERT: AkWIO, 1961723427 +2022-12-19 11:01:46,490 INFO org.apache.seatunnel.connectors.seatunnel.console.sink.ConsoleSinkWriter - subtaskIndex=0 rowIndex=10: SeaTunnelRow#tableId=-1 SeaTunnelRow#kind=INSERT: hBoib, 929089763 +2022-12-19 11:01:46,490 INFO org.apache.seatunnel.connectors.seatunnel.console.sink.ConsoleSinkWriter - subtaskIndex=0 rowIndex=11: SeaTunnelRow#tableId=-1 SeaTunnelRow#kind=INSERT: GSvzm, 827085798 +2022-12-19 11:01:46,491 INFO org.apache.seatunnel.connectors.seatunnel.console.sink.ConsoleSinkWriter - subtaskIndex=0 rowIndex=12: SeaTunnelRow#tableId=-1 SeaTunnelRow#kind=INSERT: NNAYI, 94307133 +2022-12-19 11:01:46,491 INFO org.apache.seatunnel.connectors.seatunnel.console.sink.ConsoleSinkWriter - subtaskIndex=0 rowIndex=13: SeaTunnelRow#tableId=-1 SeaTunnelRow#kind=INSERT: EexFl, 1823689599 +2022-12-19 11:01:46,491 INFO org.apache.seatunnel.connectors.seatunnel.console.sink.ConsoleSinkWriter - subtaskIndex=0 rowIndex=14: SeaTunnelRow#tableId=-1 SeaTunnelRow#kind=INSERT: CBXUb, 869582787 +2022-12-19 11:01:46,491 INFO org.apache.seatunnel.connectors.seatunnel.console.sink.ConsoleSinkWriter - subtaskIndex=0 rowIndex=15: SeaTunnelRow#tableId=-1 SeaTunnelRow#kind=INSERT: Wbxtm, 1469371353 +2022-12-19 11:01:46,491 INFO org.apache.seatunnel.connectors.seatunnel.console.sink.ConsoleSinkWriter - subtaskIndex=0 rowIndex=16: SeaTunnelRow#tableId=-1 SeaTunnelRow#kind=INSERT: mIJDt, 995616438 +``` + +## 此外 + +现在,您已经快速浏览了SeaTunnel,你可以通过[连接器](../../../en/connector-v2/source/FakeSource.md)来找到SeaTunnel所支持的所有source和sink。 +如果您想要了解更多关于信息,请参阅[SeaTunnel引擎](../../../en/seatunnel-engine/about.md). + +SeaTunnel还支持在Spark/Flink中运行作业。您可以查看[Spark快速开始](../../../en/start-v2/locally/quick-start-spark.md)或[Flink快速开始](../../../en/start-v2/locally/quick-start-flink.md)。 From 27d971a78210b5b9add87aea6dd30f80c7257d52 Mon Sep 17 00:00:00 2001 From: Jarvis Date: Mon, 11 Mar 2024 16:49:40 +0800 Subject: [PATCH 05/59] [Improve] add icon for idea (#6394) --- .idea/icon.png | Bin 0 -> 211862 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 .idea/icon.png diff --git a/.idea/icon.png b/.idea/icon.png new file mode 100644 index 0000000000000000000000000000000000000000..aafed25d366c5319069145cc24803b877ba9f6bd GIT binary patch literal 211862 zcmeFZ_dk{YA3uJqh>VJaL_=1nI4Z=EqLPul580cnWE>;3q=h0Yd&|r`Mp4P$^BCDX z>u}chc^&mypYQGS2YhbtA3AiouIKade9Ze}T<5X6suCkTCp`=XW4w9eng$F;MTEhq zZ_@1pS6ns*o`WA0ZW>BgVR@~`=D>fhS?b-iQc-~kf#2z16k)b7YUmd5j}!a@k0(;V zsK8GO=vpG>pTANO6RH0EPJI))5qp6DHVlS<-Ml8Rb)RB!gyDtJjQ8k{6UztKElEX* z#d}7TPw%isPL>?LZKTg%LBsg1@bs%QiXTJ_+{@3siR?}*Jg>y2d(62ssIk|nxbjY9 zsHf-QN{f%Sy2pg=pHPJ)-uQlqYvRj7>?N1Q{*T=Druj=ArXLzwg1W zkD3~?wVIUf-hL&$ZD(`^7q>5DW;l)G`kmnY6x1vT*grq!1q<@y8}4e+@dm&s|M_7` zNsX$Z{pS_vhHkxpG2~=ibNhdmVd2HDbN_o~7DQqrB{hk2#V9oN-%AI;>JnD|dx;W$ zgcsX)f98V3e@Ubx`qKV)yNr*pI&`CSbkd3ckd0WKk^ax607N(q9T7fSet_|xCx?~{ zlEeJ>61Ck~#A^OptEJ6<&(4D1VfrtZ)Xgb^P~l#p3JU+?DhncDtj+5`mtcXM3@mv2 zDmMVK=q|5*KRt@)o;|Hoqf=Rp4L`2OcW{^vmcxQzdo!lEfE zys7b@07M|Q7ANcPf0NtZ*5v5ySm>yiobPr=ZlGn=br6EBj@T&Pk#IXD@h|J}VGvjq zv(JUMC>`pp$(Z79ab$v>ru%r5g8B@=A!@f9_x;1W+!jr2$8_lMHw||(!HQXlOG2lSD(sI7M^lRHKp7I>MJ7uzUg^~VqkyWZNJ!^`V6psRu+IO_ISeV{GBH{q zVb&S@2So$)IaxclE1Sr0=HnJa3IQlV9TxbC;j{IU;rw*4xqA;2%Q60q zh!bCg0DVyi@h2ZX>R9mobt?t+^vy9p9yxU4n&_9JYD{}oCc-ISQ_arn4wsm{ldVbn za6-W+X=Y~SSSVHJY$G`Dsw<4mhDz&Z{&eOH1!d_C4>@6budL_5v0@x!_ z0tmKqkzOk3FAu1mg76JR8T;L!$2W`=QNbKTNG0RUSPrnZz-xe|Z~Fw-{Jl8q$;5Va zO1*JQSlVoD13gJt)1m1MPy&P~rOdDxyVaAwZ+)X}I(C~Rk%S5L&~PknmXh5K0h|6C zEB~krFwR0*C_dC&)20%aY?y|Y+66RjsMsF-OZJH;Dd`uU&d+~)Q_);a6qN5gKuMP( zj969cd)@oDj%D$+tA4C(nxNV*D0x#8EL6J)78+o*rTBZH3d-C!l{N40`ACETT7CdP zpYZO*zQ5NHI(u%Rn$VztcMcWqGTGQ0qmuOE$+C#zaf-^vGO_>tg1Lk*4q2 zirGz(&B^XKEGXeClRUF8r5zpvDDejFuWfzQdhrjf2zg4^F{HM4EOYok_AG!Q-0>u6t5<@{XDCBW z*!{5YdO8S0b&WWy=bVkA%i|8q@$pR<2 zHW|TB&KU<8NS^j}r@L&gl1>}tJcUtSr*mmU(|OoIf+CaPCcqL#4AVGPK@h4*m1x5o zVkZ3a=9li^2HuI<4OC2*+Br)l6hRJIAQEW5R%LMOU##6^ebm;9eo;5uD(pM-Znv9% z)B8@i!4P4qY{Oq+v*LjzbYh`XAWQs__aXVO6MN9Gez@3!(-&Rgf#0tjuDN1IBMW7P z)A|8E_!;jNanCS)waVPFn;O}%QzVNIY?D$GWJvOGxHEL#DOQYK9d5i2#x`f zc#x9ell+s)NaAS4P6pErSci0=?GoFK5_$x91HYwbnDXO@4z2;fiA@ZMZC+gPu!M=wO@L$ z@xbwT7egP!7_Ou`gxIpb%+do=94DHswxzEee{28y+Pk+>)BO|?x2Dpa&oC_uSL`O` z>O@Tra7Ba=@>PM5dp1AH{@TVq=)uGs5WxzUImp2mGTwFGf*kqygu98` zO=S$XKMKZ@-P`}c`A#~4s-Z4|VNBbz0YM+p+BK|Q;Q5PYa7cFoz1bn;yt3~Z&u`jT z1dMozBb%N(#Qo{wp4y0}X{>2r5&hFWMOy_YsbNt5i*xnV?kuJ^zpzg*Ik0Ro46(67 zwr6(&DD)R63zk2A^H0dbQVvh9R2H!Q+3JtaF#uS?{v#I$WX5C<@#mukR)_I26a4Sx z|EmA*bttHPDT2@|^%}pe;;+m97lNo!n2^-}>`K>t6~Nx7BVT#<4*l0UEdM$FpL1k{ z)DjJz1|D%_i?|@vhcHBp|ge z0!<#>-X6JLT}H5Q+ivjUG42yS3>~;O078_3Jl-Jo?Gf0mgfK(NC+k0+N!EP!gK3|; zL$7>*O7(@Qvx&7Bjc=vX<}pq09^h`%!UiXEen>%U$Ny2&#{5n#y;>%Ug=>f; zOufIMn?WE-Z@IW0UDEdECF0m_rJa9FwnV6p2f7|*egA|U6mr&213OEvZqeIQmyC}H z-zaqQiTJN(Z)dd4-xZPOl+%yx;a)nsM-?&a zbc9fA>kk(vDz?NmA5bpX35A24>WV=Z1r6@`IVc3W34*PfM_LnqZxvCCm{pTLyBy+v zL)d*k$>)4Hsl2{?B<=jN0>07t{!D2^>uIh6D6LZGrx4xD?;5o@yT{}o?*guF2ZdH8 zm6NhZp0E@k`TII8NxjTzf;1D#_=_dw-0I)#CSH+pENt0S_%&kdW9=L1VF|4x6%|rq~WD|`<{`XL0q9`E$)+e zmCVNx&h0za%(>XrImXO^ZY$DhR7mm$ay69! z6x~a15t7SP{Og=|HtTmv9%9uT-QFkFhGYjbg*<;7sW0g?XidA0_SoB0l8 zE;i$zq)UE|j(3bfi%-1_zg#3CZz^u)edOuILYp^|Kp_Gz0F={Qbbw1(&Y z_C>s?Kpi4q=G!8K;bvh)AL=?Sd+EL7%u~=h8W7DNxn}?NqljZaqWR3MXS&`W*BV-| zmAqjHn1zT0KtOxYaAQw90(Xw|zg-z`Yno+ica$rWNM|GR=)`8PCp8H`)`Scg`-ji| z&R?KiBifOCmU>@3DJPnMMmpwNbSMo%8Bi9m_x#S$5BYln`B7GD^(7ROtA|qV8M%ZP z%hXJVt3B8vy)WJ2vNO-s3jQDt5hD*}H1t9$`oH;x0QG?tnN7V&;`pz5;4JOk3J(^R z@{`?V`!BYZ=9mcEt+aK6{+{8-%$)ZZ3JyRvr3^IKXxVcg#4)faoQ4%&Iq$8pD%_PQ zDzG6Im8sw{|CEXPZEj2TO?fmIdWsy>f$O zWLm6Drz(9npTOPM6Du+jQ_z8|6($o|P5r4GZGwB-}~8+(TG*yg?h0T$ph@@QCZ^ii)O9<#IxZj?Ql zjX0Z=7nf948~?@KBs|Z?mrGwlHJ1G30KH42oFNiaUiQ>|k$TW>kkXdpOr?Z>`J?9o z8~GW_T`@)U1j(G_n94PZeYicQCtd}vgMR631q9k49$f@3cwWT8a8~qQ;co6p(X|K# zlEP)iQKqvp+OcJxdPvC4ya0^f@th{P01S1eL@`}xoQFZGlzGNbJGjxD$da`_~o zt4%*C;K{E0#o5|a;x|d(KNT3ZgO7NmtgVS{Q{7YRS?1B(?5=4j`I04GrI)XbjXMMj zsk+Ij$x&p0IC0YOFFm%Tfz=S>q_+0CluA;dWK zdmw3x_NDE(^4r4Rnjoy3*h@BV$*8%JNHmU>dEP34D9!v3RX*t@}I6PtS-T3ihY&WL#I7x417ddQqJpPLUBg!q}Fel z+V1=rpn^un`vR0^)Jn$>|1oc1I`o9*`5(!tbM4E|`$bq{E7{k`hzFrh{iz9k)+fk+ z&mh1VlW7c+v<^NFHmompnwadt6fMRoXGCtw^N` z&R^eNF%9iD?rDhz2ue4-4vg#tC0;R#5PX!~rZofpF*-(NVSX~yxVJi~+}@2wR6+Nm z{do^KeGp2-lP?(J@-H@!MZ9sop7pyNff%P;;~;2zSm?XmZRaUU_T)8HS(nnKekLgS z#ct4=VQ^7d(;;T_40|z#y-4&bj?AAf5T(=0%$4_O7UE|~7;(;?Y`QRuzg}WX3+ufE zdl8ry(@k8MO?9v)VDbr$xHz(N2F5BgoSz~c>a@50i?jgB$q&!+4q!=#`9U+4zl#(?M%A2mhsj^p3@uYg%}%10l#X4vbONWFXSg!zG|7OiaRaM^rnmj05q-30v+ajHO!1uRR-z$!PpV1Z*2SGkKeBPoXXxek8Vr z(V}4J){Ys?KWD2ecrNuc3yOU)r>dpMpJ$QxYHV#nXKNCK-x3kP7y_|x_9{udj1(c& zrGu9v6%8+!nMFs43F$da8I^5KO;Ev_IWt=L&m3Fj$i;N{>nFHK>+D%_CU6{oYvj4- zh>*6e?JzF`ilPxxVBZGK#L5k25iwhnXV%rjZzVUJl-+c3AAHOOoylyngDQ_Lc^16n zW3LOSj~5XLBVY7MHPpYsmBs1W2Ev7))ZeJ--XXn~OjRArMcIw19L{v(y(6>ONPxi_ zhCR+_WCXNs8Jfm#QaD*2_3L3ac~kd7cF)ni5CASZWt?j7sas;|(c)7|9xUb`HwNAJ zY8HV{pt5Ae+f6Hew5N>Ilu`;cDJMNX{XjeS z%>S-gLa`ZizS0+N+V6?;C!c8@T2*tTQ8V4IL65wW5L67qGNtUf1UKOVy`q;qxdZo) zGVN`gdJwpOE8TWbfrQNO%ptw^HcRw5IYaD6gxXaqes4cTy``Ji1TfmZNSNaf^yh82 z`ONHZ^zyp$`lmAW_w5zcH;0B6F1IX50yLKP8;uD;BSovN$qRc&;|&Pd|Jiy=cf(T~ zl*fxyo4Ny9cr=DQSrU_}VBFOg$jYsy4*Y#xoMQAJhfi_%5fwVbFSclR^LceJEUQN+ zT#^=+QVV%#t=r({kKBr0|IVWVO#S@-c(+Gt6lmq zX!D5_DaZ;p8s%7sPB5-V4R|sTAT;I#jKbMq6>8of5>Hdo19bX5ZTj$K_e`%w4S2L$ zMx?=GY^R-%r@V2RlkvV+TbCvf1ogqCwBdN}7X_>6xA!W^-7DS~L9jYGnromp1D3j> zhM@4ho;&4d)pfNw*>!1S-)leRxJBik8gs!i%xeR80xUTC1^kQ#Tzw0gp!@BPPeQm1#4ta>cC;J69wu5!_ zTOCYT&6LMc$%}93!U%ZTA`=hd|vX;o4IVvLFi&>FG9@%zn=rHCZx1O8Mk!g|@Nf z{8+xqv3tRiXHe7W;=0wE{A)(Ig>}#C8dNF%&9q7`I;F2FP1PjiLYC$`v;s$Xf)!XR zPy-YoluM*%L1&Wp9Ex)7%4XB8OTUum4J#iKepM7th~(5>05DnPLjwJpa276OI;B=_ zSj2E81bZ95YMn{U;mo#My^$4uq^tj?h&@E|d`w%C zB`X^B300lyt|MvP+U|M@~}$~+tRt5o%h|{fcC5;{ruwK0gol=j(9WN%h>tE`X@R^ zw}l?Qbd3C|AidyBYq00L=>bw@f23whwy#?c=OeM-A8M?KhRlk1xRiItRSjIO{EYTn zZ^Dm$IkD0Ij7`nzelNR70rht%h`Pi`KM{WdGdxQJzlu@QUY-&&G@B zA?{XzWw3q-@`Qpnn~k>37{5?=UnsERUBl8` zYDKa>yi?05gGRzVy*5xS0%3X*0JCdGIeSUsE6~7L`Z@4JBmPTgMrvYpn&)%-(xRp7 zDxOn;adQ}w<2WUAaFJD z-U)*n4A&7Y2AYAD*^)=#2_5feENHn#Gk1mgKnhLFBfPBdR!Iu4J9C5^*08O{e^4E!t}z zACw^vW&{I)gy4nxpc;s*sZLkB|q)- zC@IjTzKNB~-_(^5-%B0T1wjN$^<GYOea`4HSFM93Fc{vM|XZ7rb5s)t2-4<;Sg+(Kcw?P6&kvEs5+JCc${AkBzNaOxF z)&0iEwE8E3YtbZfSE}E6R9DHgNXj@aE1iAPP$+HiZ8(|TR);~pJKfYvMn7E)dT;Z- zPZ8th!oEe(EsC&&O=RKy`h5`NoLu84qsXB1Cm)Ez$}v!v0!CR$J)u04?Q(|jez&4L zdmSHukfhYc;?6C7U63iYRq2N!>{k_#M{vPI$lDzYEG^1$c$(GTRQRqVVUD_@%TecX z?z~LttwosBV{l$Dv5~MsXq$!3dzEHFe2)3Z;Ny-jZ5a_G9oegbl$raxH*>HwBi{>N z#vEcK3;#AWL-O_+BY6XjBR1L97etpa zo7zZM?rsU|NmqTmtDrl54Pi~F2ShmjeL`NE&6wk<_E6(ohxU&0g>B8?}tQXNx>!`}^&R#DXlo6i$t)Ak}b-&lf zR-`{(72_y~9jfjvSL4LJdLnw@hz_zMSZ>{4mbzxW+kAm@?(vg|t>V_LGGW<$6;KW5 zpZ&1F2m1wJ`L@@Xf653|R-yPJVmK2*a5^$#b3M}vF-j_)JKJf~c`ftV)z(>=0YPX# zB}w?oX0l2EB9{ZgSCXzeiMoUC-k)gh(XrCei!WpjRm zhRrr;9Eo?whvcEYH-0hILn0Vk_*H1aa73Q~q(I(NNy! zag;ID?S-i5;v71H?afXo4}0+vz|~WFF_4o?W4k{uN7yy|pg75e8(JV-#4odpjSy4w zCFMw5?D@#SzE7KNc^AvXhGupnu(@}VcfJ&PpFviV==-kjUmK0!zFdmK|W537v{ z{e{>cYTg#9Hzn`S*MFFd3i`oR9GaGe2Z|N?biUZWcwBRIzA>r|X;i({SA#EEz__nv z_c@d5OO{3nl{n6DF%xGAC!V~@`~iGW6!&qHyaZ%7-b1>1+i`yLZg5BM}PA4``CA*$n|9XoMk%AhX~ zcIndab3#@A05;3SsYjZi;78&?+ApIE^4AJ6M(Bl2xux8;LZINg(IsD!Ups1l9b}%5 zC-qzv(v&z1W{FTk>FE~)$1d7g?+nr7J$oqjN~YI=7O)N6aR;zHOE6I!myITTQT8IO zP1)z;3{Gjpd)0e)G4I?QM>-ny4xldn9I`fH3_;BV$glq*PS-FX9T!Q=;nU4L`sj5= zo}SX%hv`7K#BP(Z<+|Kg=V24H<~Yi4I8r~KlTr#+kP%-BPw0N%V-bj{C&pYQ>F8Xy z0ZmX4!?-}$l*}szg?hKMi3Ow>1~C)DOstr(Uvux~EZ06kJS-1BY&tsh>j@K#T9>+? zpPuCOwNFX)>q^{?SF}ArI4hsCMLE1t1kDC##(Q>unzHU&@%7V9I3LcUzFO9fsl0Cf z+0JtJYyHrxdXm*Usq4tjgOBoIdUx!zV=bnAHz`R{fpUCzz*H+J`hzktdw}Xq z;4#x{+x)hoBk`7+r0-pDeO@DD>0x3=SbLr`Aj^<^O!R(`DUg^^x3N8M&v1o z5Cf{BZOoB;G4|qup#-+Ai<|L#<;e+PV^3BPkSm>H!6GZvyd+EWstf~Bm)`T!xOkN- zy6LYOUPhbIGI^>P%!Y2=Y(R7NAXeTCE!`62QzO(9dzR3$2SB$5b1E4`U~|L?v8n$gd!= z9zE16OBh6yc=#ljlP-Q}Y(}lW1RNQSYi-3O&=a}CcP&Dc{N=3)?83vSia#x|!@%qD z*WT5J3`cz|xXm3!oNGfqXd5d$IF4Z+7}OiWj?0_~;15B;J+9NjhG!^d#mf}$q7K^` zN=w1oh_Fdl# zF1Jsvx+VA#zf!6B{wizpBX$mjmJL0BfDURCjpm6z`(k0-yBb!rAns@`KPXDTTv;k+ zJKGnI-LfYRa}M44+1tMB6#fY<&p1N<hr&b5b!p_>TLOPi#kYLix;Mx~yN@fVIEF zHp~v!bxTE2C)%~!!t2}ta7MuFLV(Ipo~6YA`omcZ+EL4DHF$i2-_mZ`;(`2k_w#F7 z(-waEf}c@*!%7L*cJyaXp|Rffcc3(7SLv9UzkDw@0sDg@Zim@k!g+_FIN6I@f9iEy zI@i(u^4v30+CI;eGr>M^HEjO3c#EUt;6(QfRG>+8$7CVgxsT?!@A5FlZ==3#K`(nf#}+5X zo-KQc_naM@eNU1Je%1+i{FMk)T9JJK94nuM(r?I6o-Mm_7rhlu)^D(_9aOd3VY0dp zV#D*=HvGKXwk%-=sbBfw+EQ^w^Q_O);IW)@j?q}!6UgR8ja}37UbNpeSXrk$a7JaJ z74yB1>}i*5%DHn21o3Chj#VsT@(*4Qiyev{&831FZp#oCNxk+Z$5F-e=bv#UXAIqU zI`Hh`-tl=|Gy{|gzsoQ#H2zx`QD!BGqWL8Mmr z!G@q7;?AcaRN0_{K-xC_?$fNQV{J`mX1{Rc4)qLo1j3jqZo`_g$ZYF^#w^^-g+c55 za3E^;o0_A8QPn+(TiCemC*}LC2PQGzJW|FwiA-%1D>*a7@959jG>7ajiVDY7z*&hS zoI)$lPkr2KRUqLx!yj@|5yCmeNEY2Pt6p-RO$(wAuT2?!Xvb)@#fw_6Sk3p?(vY@p zi~Ja9Tzf~*X2e~iS3v1G7JI-<#Y0MMevg;xf)QHdBS)ZGR(K;CdwY9EWFt1DHL`NM zW8pc9bbNcXdgU=Qq3BG*Z$6oo96tWlI(?*?vtk9q9oFm7biPHL)*XXJn%8CxeA?~x z>xf1vF~jhWP1Lg2n(o3YF1v~?NaZ~~u{BuRx@vFI_rMb-5Z><0r)EcdKWN-lWyZTV^}us%93&2+LO@ z{q&3~LWaWl^uGli4iLi7kdQz$X(dS$*Xv2f9jd_0^PcRAU)1+xNW)#u21n z_Sy;iK}Ag9nOApGm;}%FPv*VEuB^V+PfRTkAfx;74io@dJufwY@vUkaS?s%IHT<_8 z`+V(7o~W7XdeVs&)mrD_v{mQv{>%2uWvS7AXQGpYs+s5f}$Gu-o<$TFEcf&70<}!e_tl2i>sd-A$BS*5GYAR?$RQO=5ZW z^Y=f>#nUH(881htlHcV(7=!slkCCEh3;>uOn#B*gJ#F(R3a>0UJ6JbPx(p+`Q9YLP z)n)RRbg|nju4+Kr>ASMVLwy-Ak~Hw*8sw>QL{KZ9r}tQKaw)T&?~#q_w#6|87wN0V zh{03Us|n|rLQv>XrVopG2mKVpAIu?#-L~e^M>nzSn7p>PECL#d$rDZV^R+kYQS!gu zK={}WR9y1MRV{CTYDsnNitpO5HlvYc;~2UAM0+sIbP5 z>#Jz?wz)-;+-ylB58cnREMd!sFGq8WT90G=ePCp)_|NwnGG5oV5qI5`{8v)5QHJA} z#*cfEO#DJn3{7=YE3R0yw;)dCY+UZj6}j5|W7RKsic?h}t=YCeFsy_sD)v{XYYlvu z7+Q}uJe3DT>eqK`9K&?_A>1?X0bP{6U-lKPWUnm}(w{UrqQ<w407#sEV_Wt1>}Xe&ClWz>7sWN1zCfpdCetZI0u-9ty)DWcy-rJaE9 zww(=^y^B}4$zzveoES4d)@P?|eeKG074qd7dz>>o&oSkC>%A}w4?Vz@l+=sKJTLKF zpyD1rzDUha)~@DPHBaT_CSZ$zT_Jx(pX*@GWMUfk>#JKqrAxOf{S=3Cvq!!NT^B0& z^IZVX231Q2%z5ySHr@2yl#BF5?PNdfytj&#obV z*RgP0QZ8}lL<_QWm%Bxz+M3)U|EibBF^5cs=7MUg81SEqW@NG7kv`4Sq=? z-7-i7f(uI*mcltR4=|jGBb1FfL|3lFxKGO^+B*Lc`6CrGMzg^T9F>DDrS&O=V8^D$D(4IJf=%x51VB+ z3mA!I1t$pG@-Jn@e3|jbJEls(*l4$(?|NG7D-$2|3=xJ|u~=wA0F)V&*;Xh%Hvv23 zp+v8zHaicF?kCg{W~9WIhf$+$mlMS&m#-{Kt*?wu9>;9+bhoo@+=OqK);%Q0o9zVJ zMDe-Da0x}PW1I=nOyQ%0g{x?L4Wzi=Yy#WU5BbNdXBPQMSJeopg}jL!yo+qYg>s#b zMa8$JDVGi@^lQ^)Th1q3MSEY#^^R(HRw&G-lI$@#LA#Md1@RFv0(mY>!!u?I>MhoS zuw6n&q&>z`xG@?{TW8eFY+EYcb>szY`p|@ngY~4R3O&hF(62qJ9$jhlQioKq4ZJO7 z3C(I^XjHkQt)G|oitKLO(7_;A8($~J@HS`R@xb8u*G!K->2R7{4ewwkFdT>tUnpDX zBt^_+k{o-Q-R7tD&xRSyTyCyNK86`WU7%gpzm@Z&&^2aAD8;LLG?R6xn2Jj*VwrsCSOpP7fPFbziTcJYrO*6F4_lEVG@>;q0a73MIzb zqjj|T=!zAjV+eeaH`Jc3x{#yVeLl{*ZEB~R^n6z@9n2Kg{kRl0#ZlH~?pE1{UM-q% zoo$=v{ia}tj-)*Uhv@W%Ud-wmwaTI69i#VMPR;jEUk?oNjSW)Ip7KPZSk&2`k2GW0 zh|Yb$>q{;sp(HOST;C7JD=+qh_IM83_UBSch9q1kUXqaYIwmRGVkRxEOaBQZM%-aR zRY(PEqLR!;Tdu3}WMTX+X-=+yxhUS<(^mm5YGKUZwAJU9qrIh}O-HoSvyqz=|D_*7 zBzjpGdmO>ZkzYpOJy!`rsHcJfp!5lY@7(;RZWFT<pD0mnxIqd^e=na(2g!v+8UsoR-mt%Q7I^_G9?CzzX(%bL0DIE)p9QzeW zXN#FIN*HwjF9ehgFL@5mhoG!?9^q!aDDM}j9IFkUS{v9l9hPJYA8BNvb_s85JXjj( zffLi{RR_J!h^3nw{X&k#r-7OAZdFiKtTg@}0IdB`pqNR`)^gn@Kx{t4Yyl&-!j zzOrakiMa`Sg7)A89?R0RH5)_=D;j+05b~}m~2VVdbW8Gk*Gt7!cJu?H&AI{ zy~I>>VWrKO>aiHC3@pTa`?wdBA;sOue7Rq@rfsR4!yiAtqS7%Whc`K&rgD0jm(x?L!;)y;*G7o_R!6;@Z(eT0?f! zX1e1TJ8H)zpjD!}Z1lg(~!eruZR7!j1kl{LeG3A zE3Ebw?SWy=2NNXZ4zWp&P#Jtwum*X@&FL; z0af7p3|xVC*BG8+8Z%>L=ps@atemO?H6iY?)y5>tBR_&UbqtI8bbY1+kPuww$J4?; z%w}U^u48o{jds7yx~KT7c(*c-cp-#x#O0v!t&=V(rwQc3IwBRsyvR{Mci?EO4t~1l zi1QefrF*|N;KeVq%Uq}lQ^m(^@BTiR?dSmEDI=nL!N}oC;*+~dg+L~QvtOOl(gF`l{ty_(P^~NolO)ndQQxQuHDhp zk;mwDlkxuCq*Z4}8u$bNWP8em@pd*F%}%m>JlQVkAAUMJt~AK?^W=J?*>R`v6ha5X z`t!(mY=Q(+r2%h7QZo@vfKVpUI6{u(IIm#$`l^RgcqANeIE%dLSO9m(xGTSVmC=i0 zZzAz42(=04&w-Q+u|vZuN$_bn4A3u6r-OCBh#r$F0p#irjUhKt|FP0&98LNW2tjk2 znZ(9NIFRkxHHd!mY62=>pcwS+(Hfs@9|9zPr7#X^uB6?PN1YVcwLqEU@Fx0s|L#D!}W0=hznYX@q2wJs?DO2$>n{z zG%m40Z6I1gVPWZzyV$Xzby$#R@v?k~IaGy%7grc6j3b)~RQov-3>6qgIITXk1-S|Y zfI@`5vCnLRO5IU70IHBh4)SEJ^x2&5c4oKS|`bddP`Oz+!;v}%Kk zOZ%D}@-r&TJtLqrF3=8?db2+!%<@svS@5oYH+1&CL(voD3KEriD;MWa9`Y2!)on(g zsGPTT7+_UdH+z9&I7hy2oiZi?yw7C2QZ?27x;k3v*gWRoU^auOVthd9n*C{gzGAj8 zRDP-P!51!w6l*r8*XN!eWF|<1uVkyV1LX>nauLwP9^xjA|NKr{ zPW(>tQ=Y=*IrKx%GyOp`dA7(RuinA>)jYvldU=te7h_czHtODJDKsxFx~rQbi{?c8 zDx$S90A%e>wx3`q%S~C+X9NXQ$4LYDq1&$-twFv}779*#dh246XTIX8q{jA39~yJf zTVLaxiK33L2izBfd2BP+p!ZsqcJ>v4_vqIu>iw;w6#KW*YPn=re+Kyvw)_5^?(D3R zPA?+lp7cpxSxK9pv2Acp5x5to$puOb&^rMJORD>TYky5wwHW1x9z`OpF#z*`7+q&M z={Z+z#n9C7`pQOI6iotOPy0PEzs4vno_EohINXQbC{7p`RN#Kj;}C8PEeUfpngB9`o&-ft135}Abb-m0rv_wLoa z42>iw)=<7*%U`Lnv;fIjXe~E-b;awXdsUvlOWU)BNnxoW2Q!Jc?pq_2)-KZq*2Ixg z#h!L+C&oAqFZ&{pJ+Ffn9%5py!t>~QkX2%~ut8*&P$;E!?(yOg7 zyGkA0AyA&k)3P~!2^7_f7oYi_(Dc3m6OpT%|L!19PD%3LJGxiWfqxris;-S)xC z#z(^7C*#+YpDM$loDS}Q-rOEHHH_ZS*JP-2SuBgxlP(Xvb-H`CDT(;0Wa{N5rttdQ zmp`*19J}oq7_d-+?qG_CskQ?ugP;Rd*stZgp|=SS;8 z{X(lr{*{f{W9SST{h{}ov(A}Jd+o$q03*ENwcQseLCv}m-Q8LU24~D;y527Ggd4|t z@S0u;-?z}+$V`|Rki$N;AIf)abl0bV(_u>E>(hKl-oFOCNI%dqw{nsOLDUnUyjhYP zda_2E2{?34B8Pv;RG3xI4JqCW$1-{pok6x&s`wBk{WQOYvrazxb1?JkP*f$S*;p;p zILaESg0nux!aljQgmz@0a`c$o-Wp<-+!qx+om-Dmj`OY5acTEFlL(g6MM4nF_b*Rv-<#3gG#>SM3cMTk;|)KauobY1dh+T_rDNU}Mueje!I3)+fc2pfS)yHZ6f|`-e#1{@_^OY0$$Yagb5?NcvkIwE zsXihrh|kYB%*@vPQX`aDb6s&fp&dgz4NW%Q0AB-=a6zEu#JRixO45_w67NnFul;MS zGzPm=&r$>Xq$X?s=5D#@fC-5qyx z-1+*`Xn2B)Y`o=5n%d`9xLlgp2OmoAE7Wq7VG%4}uYy;O9ZUR9Kix{Vam!qBl#7kD9HC6bW#H1GF#_XSWy zu%=cnqB{V-RI02vCwJ%V^aa(ei2NRJU#5WNby9L0YH1fW0%m2+G{?J{i>sBp2)H)3 z+7VtUTdXc-(Hpj#;1yBHGsw?O@;rjxeoEPdj*zW8eI7p)NlCTcvE8zy9qg719F^p| z`>2dkr1{D!X$JH?d!3Iju2fa)ell!-D>57>WV1MDy{j zwOHK84@(ZmXXpdO4f2x-<$1dQA5-5Q4}~B9f1FUsOfGwb3L%Neil`(s$X+3P6`5yb zua=#xl(P4B#-Yf{o@ZoboOyQ~cfa>tpYQkk`{UE+@u?o4d*A#08qe4B`Fy|qyY3MK zfY5p9;7!U?N0|8bswJAs*%m~Y5io-nM(uemITQLaj&d`+38#~0X;#7ul&0FrMLJn+3ScDC=mTfxJ))JB zn9f?c<>Fs_T;W-SuM#Hgb!*E)wVL8*S~FQ>S+7s)+~5y18=!!Y25Ix^j%#yI_orP8 zK-rwlg7;Rd=;`jVX88PxlkqR!wG(5z)GTIi&ehX-Ey?chvCQ`$Qrzv;z_8U4?D00}Y%29a8cN6%(}vqYYK_h)L%%Jq4;bGI*hUgpmmg!!{aml=Zs^`rT>)-l18~`f3F&P zjl~`ZrW1Y|M>&rMlT>b#@@Wwuzfb919zE;#Wp~>-_IVWhcOaR-r@_DC1b(~s zsF)4c+=9FCw%C&9jY%;46E;#qJbk3!6$nl7xH(mWKGVN7<>n~EjPb5G7Q<&JaX%_F z)M7nzT*cr+jD1LCv#(Tzk^Jglkhlbd-ABQnQx49-Xpb@ReRU5i8HgD*=$^jo*-06t zT+b~`#XZMyMPQa<^Aejnm@{{Wpwr(nFu*QQZ{4A|Stixr_Q>+gaFx`KN-Q#j^*iV} zV8(@~eVpYK9NJQCnvhVf?udYFjznO?A`RL0bTu*inyA6C*C;u5z7-686G6Zc2P(C_UU{@~}PEhRTd`+wtlgTzBl%IKO zv0Gx=um!(kF-W;6XyoD=1%2geu52-=@P;wNpTRz_2Pk1I!x3$6{dy!-PEU28^wHW4|9TNnjkOCz^A6VaJsUW{rZ zpV61y7v~sFA-;C?)1jF9GRqs%N>r0B@Vq5k6~YQcZ6G3e{>Cn=)M;TMk>c*b<$O*>P8D%LZXdByX}*AiB@a9sY% zRz}?Lmm~jfB4*Bj8t;QPQ%}kY3`S?Cnst|`ji)IJhUksU-*6&+szc*6s&a~b&3TU| zbBUf+GO||15*uM%vVk!b}wux>Bh=((m1sr1BQ~EH~-lCr^(okuhvylg;`EV?05C#olY#k zDwP0h0>&;9Kts+EJ7iNE@y8h^Pw)xJXQPb%u0-B@;aT@!0-mvaFuVT3-jV!-hYolR z0WRhx64GU;Y%xl3gI`;dV||Xm5K97fXyQeV5c513^g*?MwnVyX->V_`3TmsUUW|U0 zL{w^3i;+BOBO$HH@UasSL)SiILC9qj5ezrdW$g=c=` zf<+qx?vBL&nNy{@MZOk;`#E*xhQYEX!KqmBAjE|b(R)PPSFb?#0j<))V+X$?+|W7Q z+tf+XU{*z(fUjhn=rwtNXdNWi5TkTp-fjxCE&Gc{_0scOJVR9ygv+Vt|0d-~jwi&Oq?vPU`)(@lg9zo5DTiwczs7I zi9M#NT!7L!pYdNJHz#mAMW#s>?BiJisfFs`ffs={yRU(r24GtD_nYxGRYstk=2bc5 zZn+#E&l6Tey1_z;e2wDM*KBR^c)|vOeE^TY_+x67#}ptTw$N92o(Rx)lKf(b zLgd651&>TGj65tHs8CiUI8wMak{G^8a!)-=7)w-A9IncLwJsVU5eGW`78>g%d<3_E zFXR$UHWJ*v|ANv!LY9{kqW^nU5wJmz!JBku-==TdMxUK^Bz`*7cvjJ{Y$$Co_mk%) z2Cv<)yn!k#Cw6iSh!Mxb04242m2mMu@$e+t4UptIZLNr<*&UCU-Od`WXt!!8^zi0$ z5qe^adw$_Kne)`aY3B``?om0j-$@caDW(8Oz8PpoVkM*j*>1}UBvMYf-D3X$|G7`) z8SD+xYFcOI+vl*)OB*j19Ny&Kx=5GSO3s%>&^?nJ;QeXtr?=ue8nJy-=U3O^SQhN` zfj;|y$V@R5LcD8Hz7kFtH$4?xez-WJ;c`5DXyF>mKfUnDm6s2AJ;+=!{8sFY$C==! z?@FGzfm$uLr)C3UU)$2(FIRdz3M{asngk@#y(b$hd@*MJ3-UbpUrLoNAD$Urx~mkm z(lN!EEJ~S_`fp9(LCGXvVt>8+$$}60^117=(`$&QU7xI{zG+Wk6Q#KzxO@Plm3|+< zeIra6saeoD8S~s;>Hc15x6u?hmar@PM|9ho@wte8ak?L zb$qD*L?)x0@tULf8E;I=C`M6?5bsTo(0I|__HesCkeht1g!c^s7^L7Hm-Tdb16I}!%-BnmoL7)46XIoX={0Hi;0(|`3;$K5{8 z=SUinVD|4$bF#8;7?t+KpBBA*DRjJqZs2*Lo9x+{1)3d@DG*`An~-|eHU-}7?OLD( zwA46pen)e^(8YP#J0dPeHDQXWJB^UOp))6j(?`3H@P7*bFDHaaIkDl?As5|j96o+> zPEW`fk(dvkZ<0~MBmL2`b>ekSclEz2O+264;Ru1ow!vM$7|gvuqI0kyWg*oe4^@nS zSe_XFJ*$s-(+!Ir)^`GUV@L-#Dn-*OwQi{2g6jYazA!*R4d*ocS(vr|lE?p6?CWKU zv9U&i&;~=HLsd%xrQryi*6I}oB{@D?bC|eT-dAK_&;M2LZM)_}j*xfjv~}e71>O{c zZ8dZG{zO0X$#l{3%x=!EPc1?ifWr*m9R<{Gk?@SiGHNIUg31no@Yr3$99I=O*rrPR zX8i^xy+>Oq!9{={^21D@oE_K@;67X)(Q&E{+t=ryk}jwM45Ohww#Mirx*X>!g z2=4%xjh);I9ZxA~mfZuz-5b7lSymcE1iFdHm+wE23;$xaT@9;#g6w|CbX!x8CFI=( zV;$f?fg%wPR?B1i{qa*}zN*~KNdu)X+b3#nwU+vTs*?d;P2&leSMc%=r>7MC`+X}T zhFu)LzMt;f5hB%jYSwf76=TKwRyQD5dk%$Bz7pr6nUnn%mRL`5Q}O8>Fgv9MvtQnb zvx*0RLfVJ*Bl}fARCC_>XT^!xl&$C|HEf?b_~skX(+DTJor>i`0*a?-N}*R*N5evnh9wC z{^{gaw5}C#5L}@+AS?m#a??7#4X|#tlrkwt;7-=4gDd4Qn- z4L9Ujp&9dRW*t zfiwd0S)V8^!-Kj`llUu+m;m3FBKGh8Pb)*JD(dXRyN3sHiau*E(dVJZa-KYm_Iuo> zvVOx_M7~grkJazP@XDr8q6wI;z3i{;JLiRUIG^5VZZ2YH&zP_ebK0oj66G1=QHeDj ze#ME;E%4J_N^srgeEr^c zvm0(nNhBh=J>(_%#Xw)2Q11gn`7iLr9r-uzU#kyA+Oy-?wE+n(Z4lW1m zH9GzGRAI6(a5<69E-a_PMULkJw}?ph#xsd~wqA8w{D=G@SH4b5CtMT(GUBfc^On ztWo1|F2AkdauZ7)?mPYCFNRmbmrN)d*$tV8`mdjBZgX|67Lawa^Q#sH7LP1RYU%k; zRML;}fsTy-P)!4Szfi498n{X)Z`JmPir6YRzTRjqHsS z++5ajw5RY@Xz!yLd4!$3&!W;~9EMm_wmqChS$zozywxT34i8&5P#Q`j?k5&hQ#aPD zL$_Lfe!4L`Qi3N`W`{0hU2;znxu36&77fr*Jp=fFrpZJg3>Yscejeu{RYp)c3z;ez zq_e@@C#=;p|K>hV*^&kc4}z8D0Y4F9(>Hm$=Q5CmR!^KH3{K`YEwW&_&3xx)y!?!; z@CjZn8shlEIZEIn(~O>mH#tPLA?k0^ch--LGSINbxePjN5P(72VaJpILqcsxFJ6j6zi zu1A~cml?TM;+KE}v>8*51-B)ZDK2uZaeI9gGg;bH@-Y7zcI9)m)L(!VN7AvtjDGgZ zG4L84a>S^vVLP%kiEZqS%yYYtYZ&NVplvNY((skuV;|D-UrvEeniZoa%n?0vJ*g=> zOk-VN>7IOH!mSb4zMiX0KPHCvi1nOKjkm`L)jzaT-L<*t1>enR%FW0AlWSEeZw|w0 zU-K}*&i1z*PaGZ$&0d{}7ju1}z3&p+cwOWi{}v{m@Eb8X+Tb4Ex-W9brGvw6x>d)E zU#h=+nkH)`+xwMxz20M4&m@r}VX_q|uVOOMmjSUqa=_{@AKeJyhR*|Enc@I079!5KE4%t5)%ZrVPh$)x)`;8D2(T@+3tFD?2^Yl`slb`CSYC*oGJd?j{ddkYr>&^ z{?{-s)E$Z@3!oH6Rxa&P0IPwYC<7jy`=6hxMx-32l@dF>gat!(IQ22 zO;P?mkLYL_w63nDFslzhqy(D!iQgdvf_7oX{);PXloj#K;F*jgkyM!qu)93{AJtc? zuYBx3db6Zei5h6gBkyjrF?a}S*#|52iX;sVyxN%oE!*j!vl!fis-%b~nG1jsq$RxK zQ#u7ox_wURX_0vWd69rXCC#Kj)s~lLM(0#-0iF$lbyx?JeV)a_pd%mr)jhwWAP~WM zRa{6|p4*1BmlEdJU)Q!v#9t}eLGD#bi!Q@wd)mQkXlaJBN^-YZ7Mv~N#0T~SP8vvu zpcQyQEiWIWSCMa`8|lt43++vBmo;#c3_IBbhzA*PJJoBHfodpS9aszKYU=&eWUFGq z)tCR=W*3hfYT>*pK@){pq0X^5lDuj%39ly!tY5Ll&_?Qv)!8uM?5bNRnMif1C|9c6 zQ?i>D6_ziTLk}K!(_C~vI)&0q-SB-LM7-1Mze;-DnCF~(krw&Rwks%~bM=ArygEif zJri*S9nkVE1lKg?eTeE~oL5LbaBj`K-`5DWl85q8Igv&H@H-@GZ*3`db_!ii8cZ_4 zObk4QD+#m?&w-^3{3YOs0v4+4cAS$aSFF`?Koj=Sb@+>NyQ+tU?pryT)43(ndaG>L zY`w$a9|sgN6Rr zbNR|C!rr=AlV2-40S4&P!CxJ%sbu9Jr~$T~*@ORGSZSgbhk z-45`@4%pN2FiHw@XtjC`yrbkWpmkzR@XWJE9s{-q z?{8{J}qM$>&hOuVOK6Myil+qvA?#7o^)Qv?`BbuBz#@Q zkF3GK(^5Ircr@J`Lo#!{roXUv@3r*fiNaD36v8Xe0_(pA4Hfm91avKF0(F%S1`(hl zZ(mRBS3;M+hVsX@RjcWv_If)%`Fa$4trAN=1-wtN+iocG87sK&A~G`osBdXHd{L6O z+_$EQT;gf#%?tjd2k>pcyH0(Ve_$%l^`o;;dHaxZE>0S)vi+Whec??7j6A-|#eqm% zZ#Wu`2{m7>2I)*}K^n=a|) zeLp=c3%L^Bvc|J8IlwEW_%Y+Y$D@a67Ts-BvH1W&b;CRY&eUr!q$ISH`?0|Gx0IXc zQi81dQ5q})?!n8)LM)_JXAf%496~A|O~M{>-|z^;UARO?EpwmlZz^mE@u8ivShH}S z(Dx9n6&KTfH_ksZ{o1W;rRCdt7ewoEeXCmR`sJMj z1kT!$YTLz1tryCx$aHW@RZr&WA}HwuLNzIc^{u4bbUUf@sKIT z@pf=4Z&QSrykd#cr9UO#V`m z_vi5f9pOpx(B5X4^%CUw*SG%)jK>ufub(cy=4q?f&nJ$NYq!_L1^0 z7j2LKNXWm8b!VdP(qklkH!k_v6=%*%(4s@asoLV8M$8$JK{y!(@n_?RNG2pAN_Ac! z4Q8${Y3QKsx9zLw_h#qQ7uP!C+fVF;h|>1Mdf9W_Q7cI-yBaqd>9!H@4^UTBIt4hl z5}50pOX<9IuW~}r;$Ka8FP% zT}oO+C(0(O67SQMXwhYA#WTez)hH7?q~lrNxjX69}F z#@*pE_*s69zo{;*HxPu6V-y7&Auao-$PFmrL}s*avn{@!*WttWCTt?dOxaBG;fpPA zMOs%uPwQ2sm!fZZMB+cuFd|D^A(IuMIGOHpYvL-i@5?2Kr72ZSYv!+t+u*ECFg!OH zAlojMj2TY(uX_rJ@ie`@$F7;9)DF+l35t_SZ%hp+ZzFeP1;^0{i3b$kqF^Z!<8(D! z8S4b>icDW%w866dE?tb9R#hE5larfrYHEQSi7@Plhr|(<6h^=MPC*&x$B?x1W)=}R z;WW+l^N>2+ z`#jTSTRDa551AThT|J5w=izL%4`l*DEW8_%?h^5xDz}fN28&~KEGtu$zpc|Y4a z52L9~3W|kVab|=sfjnnHf_0tz>&UR5eg0|P^V171%{SPIip~XumcI2yN=j7X+By>^g63+AhcOFM#!Wgwl{V4(xg3ru`>edDIM{Z{td z0_Y6=M;$(@OvwMp>=;m?e84}F+=p2<@sWVf$CCpk<|_2ckwz_c3LA8_`96j@u#@JL zoR4|9dY?76#+SP4__|Zm#0>p5Lf}_ZH9E(ImvEa?#O=IV(@XBa2%9GlQaBHVTc274 zI$37s3RL}mLd6vG*iVGc734W0_H9CbjUc@x8~)xOmJIWYh<76?=&udSmqZWDR%++w z&VP7xloUpzr2B(Lh<8F*s@ZLt3c1QB2kK0+)SK5S$&w6%Z64G;PeOq~%ZOctcwXc8Ht$YZu)2xdwXLwe>n|vHmtX3L|D8yrw{!W&dm_dx zd<74s?mina28*1y$G?h1bCr>|-4BN6u{0fsSDjSEh(rjNcTINVMSAZTO!z zub-24Q3%Y)Ash&0o6W<1s`0EN-87$lDwsS)jV!?(Hs4{MZXEPFZWsCFk%_mNX$2MF zW6+_*Y%4+$kis#v&QBU8r6aZ9Pkq0?3K~l= za1i{sKxuq>f>G=79|2GvH94aTVb_!NU=W=q8{F#niNA(4lEtsRZN8Q=b1>Q=Yoh2c zRCq}5r?iO&%)F<3(8r^0d=&qAC4Bez#pgdKc0vg*H*Tf3y=%L7<`cbeXd^mDQK!OS z=KhJ$-XfF+88D~Ygb>{MXs{C*QOW~d(v6qKzJJ!Ptz zptq+ySV9hk)M1ReRjpY-FRnQ;iRA5|w5qE-wEw#=c4C`&79whP80O0qhHhSlvJC`e zYB8Dar8k@})s~j}$=giR25D9-SBIY#jJtd9^!u{~oqtI!yA!1H_S&QP=Mr?B z5SzzMXR&E-p^B3zm#8&cCA$hj=CsJ`qJuPS1p25Sff3pTz-Bi0{6@hk{;?|7OBygV zZ+-&h(=-}0i6K1ewm9rwpa7xAeI6=c63ffq@Nvo8!jkI2HFc`A8m~8k^r0$#m(+jD z;+@0;uo@XAo|&V4Ouscsy}1r%OeZ(96jle5X4hJ{C)U$0QPut05vSmlI+FLHy#R%1 zl=^3eUzbm{#Q5^C`kYtQXh=0S&Wq&P3?r9HBg^JcV&d~%_4mDyTUZ#U=mR+hB%H=r zRG#5|XR|K?dn02}xxum)4*obPch%>c;^;Et?7I)L8b{qNo<6k!&kp2Up9autkGFJi zf|~iJh2}IsNTKPG0I*TSgwgt+Mvt`08SGrZcwuRmrNoA-?hzMXz`dN)RX|NeB-(s--{@AnJ-R!<$Dl%EPFkZpb*1X`n z>zvZa^jgdQdkQKv-#ThqJ0^89F{QaZ6~D5%|29ZZ;DDLZxMu`dn$qXi)fXj?#AIy*#P%~PT&|RTh#@v0%czuGoa6PCuxiW_U(uMVg*P=5bmH? za`Gxl(a%rb7?vFzZ6sLJGmdNAc6@|k-jPt;ech-34m;P$953*eTJ+H4f%6HhG9g<# z9)I|(decVOtvP4Yf1Ypylwu*Ut%$qX4XIM*l8l%t^m6LHAjEIt@k$NVf}3M3 zh0T!?i(xMfMbQimt%(QXCgqf<#u1vg`yNGQr$U%Yt=(xi$Hb8Ij2oE}jKn3=V|qNO zbVo_Kqc)tbmR4%Ak?o`Q>As$`aC3$T=Y#XL{*akFO}>!k&X!j<45*0w2Y(zUCRpUjw3HVD7v6aw62Qu>d7~~@b>h8?(!b~g#ssa(8{m!f)FiX%gZir1zs{Y+S z86yLweauwf@q6!aa=tqg@flO+h8_L3xbM{PZybzzk-wQWECOB@NkQ69nFZQ;RP*yd zi_RczA?kgnnEOW}bdm8>=QBdq#}!M?6Kq9rvdrP0hP&KXz*38Jw{Y1#OSc~ZF)T+-Zl^k|Ko|%8wB((O9)5OveLxQC{|vGX zhwSbMKDF8H5UX9`spf9{@#+&cj-kn2&Ksv8DNw$V0G7wUQh;r-LtbHTyxg2CNokDf z7BPx|mp8CbkKPBs!AT^FD;BE1uN;iDi1MyAr;B*j9{XhC$86vOjB;=U(X!cGq4o$L zKy2^x33oQmcvrISXZus&hScn{)%~Xmf-_L67q(}D-(j-~QdcgGa=KT?Omo;XqB2ch zEt)mV--r&OetDA|IzBCdeK>6J?dmx?D(Yv~Ik({-lDeO1?!8VGz|LmF|I`%_>1B-8 zzK$I9_-fwIsaqr@je8MtyQe+Emr{<|+zP0vpRFPU3Fk9soFa)2VA+R3y0Ef6)SgUK zdV9+{+?O-Bxd3iye$A69L%Nw)ilBE;;K-U8+wo=>B3e|I!1}H5bLFAcSYhQBPmY0K z-ppgUns0{3#TU?R$A5AOs#;X7rI8W8LqIDGDt*?B#l>4!`{=YiQ%*b%K`1wQB?s)$ z8maX>^7Dqrk8wK=&JQ)AZYzh|=P#!M$QA_FnF8CJI0tCyO*Nn;q2c%^KIb#gTYdX9P? z6$9PoX?Y%)<5e( zpffUapb3-GOc~7b3~wC6Q$?v6e+mDji8#Do_UmK(Lm$B=@jG-me=6{DuAwPmwEy!K z0QINeaS_Sqbo1N&9tVD%f0{lMA4^Z!AWsKM1q&1|NCoa}ypmYRth=-$wT+50r+wX# z`3o6wCL$B<^;Rp!#%e$JQWWtU6pCM)RZC+y?Nls|SsaL#mdM@^mfmFAxZ*Vl@L?A~ zGUDZPed$4p5`nKwTh|qLrvKD^6_CBCiDz5e%)`t0Q8NyX&W5rNy&HegWli)J)J2JY zeOT9--Z_{+-9SreY*Dg~s$i9P?B6n*I;PLBM5p&0F7>v!D+DJaNp4vDX5+0qqzTC> zCXrQR?P?;P$d9>sCL?Gq3?00aIjYF@h?YgCxg|hYnD;)z09aMW2QVsHO_Xx$HNxJc zcGf*y`e*K;w3%AjUb%jj; zuv9U}gT00(B?7^c+O@yR@(nylIb5dCvyBLGY;RFKesMcEXuu7Hh=MA5zB!TOul(#& z3Oq9BszkqI;m!1`+HJPVo1=BP{tBMYnTZ*)zpdz}Ka!CM)Eb1B@~UUaU0aB5qv$+4 zxATS3IZdS75o3$E??KY5&7bF2QiCqk#UR=g59w~7w0&I!8v3@l)WSW^?qBe5RU*}G zi`n8xCcVoS=~{$D>X;q1k@Ul2dDpU!fv$Abb`zl%^0FSRMvmnXJ$J} zo9_~7KmYWHcqC3Z-@v)ZOKwlwrZAyjeI=D&iPjbLXYrzoZZX-&q;cqz<6>dNktYZ6^b%i>u)r}JC^})2JR?`)5 zcrUxj;6ZM}mm^aXfoYLt_1k z2Oey#MCA;}dINGtpl)s6Q`X@`g3PG1Xdu3<Z^yRVo*OY?E2x3=;4*vZcke9Y-Y8R+bGmHoFb(atZVd0Aa>xDxkybhCoI zJ5HeZx(Fc60K4!XK_=ZD4oS>ib>jb^LV@}5-Wp?sbhyzYkaxT0a;c{O;JI6)qBOoV z8Mn31y+wWcvwTtuw9Xh_!V|v~ zoHh+J9az)lRG7*U8?2|J_G<;l{%D8=OGw% zHZrY(?IW+N=PF=!%kO(D{I{+TfL=MC#=k0KE3M5Bfh5)4lNI=4-RIAzll$%hvO*ND zzc_@tt>>AS#(mhCQx^sSC)7kNl4wr+EU`6;giL}Cj)*Q;H%~M0qox_=0Do5?U_0Jq%l=Z zEs>K+RCBN}m-tp;^BYdTn^4M^e*(~=Csd;k@*pmJ%|`aaD$TPh(H>w&R?JU~AxV4n zk8eJA0Ar%pqLaNV`E-+5u$MwSPYn%+RBkS+;N59N7$0Cb&Ib9NjnT(&*al*9(e!kT zeOVw_o6oPv19}YqpcMKKS9sDjWQy@){sM?_l8dUOHi=8pDN*3d0)9n`0(|xBG|a8* z_3W>ju?9#4;L9>@@Gdh&K=ffB(!iOd!DdO41t_jOEq6*2w3#&`R0=P=A$;fM57{5>6&Yn$OqpLd9R< z>%I7cGc;~XPsWU01JB-U7uzmYm&FIX_nq2Rrjn=KYPbmhsCqQ%yDtE%D^DEBB9wN@qCJV{%2HZh(g8k7xKsEodJjCFvz+YLC$!A z7Uj!F?SrRjU8SK~@ADv?iN6f>I+MZuoUs})XijbFIWHkT*nhHQOPo?)jYvtzXrSeoAqDTp zpb3yih$@qXIP%8mh*LjZf=hEx8)NQK$rh8;8rNOrCf?>JX~~7W1hKhmnjE02FwL?H zu*?Foij+XtC4&*5_mTp|r6ULzZ?5K@M7U^CvE<@28V>(5WlQh_2h)0+R{V|T@oyQP zu*-+Ld5nFUHxXiR1ihY2~693EWkYBBsz|S&XceBwwUv>=eIXE-eBW_v0o1 zfR&8Y0d=bGxqvTX`J9sJJcpyb2Y!sRV0Z-lL06;;jX}>mG}-FnoKUHUNu=MAba$jL zje&&PO48uXc{8=9R$4sa3>^mthkJl>(10Vj-EjU#2E(VIcR047@!|`@IG&ZHZRZW@ zZMeWjjBlNhgMXp8|9m#0_-wr!$gVsnIIy+;)0-jW1IuY?R!KP@AF)6Wf)(E<(2Zgm zoJQxwue)g+0rulo<*z0hikaR9Ksj66Lm-G>(1VLIG%&TJ_X%8%-H9|eK4_ajCrHH7 znGZi^sSU=N=-i{+Hr5Z;yf^KB(0xD>xfDDXWiif3LJRj~<8zM)qIHbu8~lygpaU`C z8$Zx;W#VYm)zBGIrB)9Oy5QGxo)tc={a;1olh`_{o^E6q>r2Xde}Spx5@q?jH$D71 zw_O2bDeeANF>&vAOVB-k?;tUjoRkYf>U2gD%}LO$$CQ9zY$}ojJ&ar!k$3nPW&66q zE(BY2y7gd|y1wm}D;>wNi8q*@K%Y>xm<=0kc*)Zo{m1C1k=E7s0e7xuBSu>H)8O03 z@*%}+ENDINvWb&kG>k{#h0&%Y38A-HmQ=kteTS{w$;4789q$n;^yl%04O3?sm5EJ* z$Vm#vM1iKo)78K1tG^CLm<#pw1ig+Zyy$rr>c=5adc47Cb~a>;#M+;yaZ6i_Ac9a~ zqH|jNyZUG7vBy%mi_d^pfRQ{nTKzXKSSh07=V9fy4oD!E{O7cLoBPjSJOkluPHFUi@#6%X=jY2oQUR4C?NNG@yEZvdVW*(9L@NlJ*)OZ$ zM7jJ3E5WjMyNP6egh}Q-le@rFce(059aUu{)2>X|nWKBgfnMQEZRbGOyZuZVNY7D6 z$+$j=vLLG0#IBHKo0;mp>At2EckN!~LKv+P1heqrk7)sA4V%2?pSE9HJPQNw$@$09 z2Z}``;>1S?IpXK(cn{%W`G_G?sEWfCsjoA3fGj0`y#99a(DTN;d=v#5RWfLKRI1#;7d8cK9j0k-v{yMDX3nU*IdG!v=B5Bj zdGo^`1;2_9Nha%F+1`ap`Nj+K~R(`uHMkIRDV7Qq-K7nE^uZ=1_0_~B(# z1&bNzRsx+6C?($2&^^_!IH32n5*ZJc+yI1uOPp{qxUdQqfs_nfG2>lf2y4F0nTGGv zO>G6p88#UF!CS_GB{fFXzsM`fWx(CFOXwJ#rw+y!hd zv%|1;UnA|tJ^M<-`inaMAuW#1=i@NT?$)O?fV=5ee^&=y+Tr|_l?G$xF&J;b7}JHg zv#1{BrZ1gx+|r8C`>Z6(r($tmu+Z+LkDLMVr1#z*(zhd_Us>q;c5v1||GLDFkQbvR z={o4b9l*qzF=2Y%O|<-1*-*nfOUj!ORw+Tw+PZF>68D*6m`pyMxsVK^zvW5mdAK0G z_38Bolv6-FDUz@>4sAKbB4E<&f~2Sb8-QT$tlif$baA$yo+#rZ_>#-gzwa$cVvJy3 zMP%`_xB0fj)}vUx%jZaJ2VdOnn;l5R<*%EL$>)_!B$3g)&0=&+MO}Ky{`!l-L1Ij} zQ&1JLY<&wViXz7o+r@vs!U~6)*a1>#(E(Iw))cbYEKWIroh}v=j$p|l$WhBb~(Bs?%?7VD5Uj=L1o0^|MS}M?inUkPi zIp?Z)Mq&cx zZA%0Z%O-vlNj_=v|NK4`bk#zMh3Om3>c1FpRRKa|S~&d8y`*nZDVZit7a}7D-0!tZ z3eQK+7SP_O+Tffp);kfQBla%BY7Gf=s>g=8Jxh{L3C`($;#H#SyafFlw}DO%`5GG) z(h`1S{*hY*q`D*oXC~O8WreI^h8MCB!GE1Fv*0DH3UO)7P)POc3;*d8-rqjVfTI;H zlTeX;9t5fI9YeivPGKGaO}TSBy4*?VgIczafBq{;VDS2Q4qv5&oK%gJA5FP=gT z5y8VG<2w*nCSy#c<0}{Y{cNl;)4{`&Zlb2R}ZVl*ug;fm>!&s~F~qQ@0*RqW65V;UcD8 zg(+^AI^$9$liZmR8X3v33NW{9NKI^0guUz65_36vD~n6oB%&@Zo}Wi?!oUv-O6@*l zIiDdX@~bHgh8$%j!H0i%;bJsVxL1qS?~`o(u@M}<#G&*g&Iz~&QjVcWA#d^8(N$S$ zhPT#?v(JiS>;IeIfXJ&4S_+!=y>lmdkxVwT-d29n z6jcPbPcJE>->ke14%{ic3{-2f-gT*Jm-`ZnUXiY4U!b)u_YBx zE=iqVpS3#Of`Aw;+gvSKmNE6nco?uJ7-ILSD15cdsQ6t0SesG=A>{STn41>|#6`QR zk|%#^v?Wqr+Wo(S3!})`F4oeo5g>;WT{wjeo=urx9>;Ce8#f}Igpi7Lydu>(W9Rs? zvNJu!jl3iLnnWu%Wa6mp zb=%n89}KzbtZc z(IHXB@JP>3c-;>$b-e3l|RqiARMY9MNkz7^(p{BqQnPOUC1xftnOQQ9AV> z4%+{Hes8l5Ni=x5_=!+dgg!Udxfb!|VWY9hI!p(;+6I)I$%VjWWn)DNC*=iGi7N>LTz#~L0% zlz5s&8f!lDFEct5o7#A4avKLOHmMtTHxH*jTg{LY++OZ$%T!B0ayKIna<0@uZ$O5z zjIDD2qZL~r*>La@M~c;Y9ZDlZH;)ElSong$+tYhN9LUSc{hLuIU6G|Q9oP_>9=e_k zM!m?OM8zZRkVgfloPjhJ`W^o`{A`T#cq+SJ^=GxvDG zO*-zbR2z7;%%I2VuG<3pHzNo}8b~`fqQGz6%`fEoy+HCUxosH4cax8Zp6;gCs)_iV zd)jl$;()dpcwDA5wnpGqkN*;>;mEJ&XotGt3A^5oDWC%l>e1B;s3*BmZ5LBZ&N`p- zW5R8?UCTDMrZgVsSM*@aVNs9CPe!O5nT7ROnw!^+9}1bqKfATpo%v+)*a)vFEdcZc7NY`GRA3o#QzP#S7kGFyM~dcxV%d3`?U(HdIEVd5yk^wreqLl=d=XyOx$xwNWA4+3ao%D;(U*9m+d z)8Ikb_Ni<0*XoELDInkSB}%L}mDr4i^!d$fI^-i3HYKVUX@V!f5lF>d)Hj!c7UP<0 zI1fR=jS&GY)Cgv2cBIfl0rwDBIBIc84A#A?a;@8um?w08OzuACRov1~(m9YCI1C0N zghss*N(OO&=uF;#pEI2@#ZVR6ZJt4IgW{$BnA2-a1Es?075?_**35cNJe}Shzh@i* z!#E#EAnuo}CBIo0fDDc$!GxqIR8o`grO#8b=T^Kf=l`T}W25~bSC_t-B(#w~PXPdi zc+-s9PI7=crU&UQ^jy=US?41N;D0TEuU>IArvL+j0Z^DRr#TP5Vw$W30J9olH7pci z^@N)mR-qkSS_y7msDpHD2JTBLqnd|R5q<0N4`9Ypl8i);$<$byW?j3>21L0hPX{S& zezLi3{FHuKY*g(m^eNPt!`uH~9S37wJ~#)|WsjNOfBnU3w$`i3fhh@5#5BGThl^i7 zlccb)RJL7&{R>o>p{oI}CGji!>oSC{NRmYdS-L8|-{Zc%Omn&lI|WDw`;nW@B2kSY z*GHJ~-dnf-IK;$wX&z~Ee&@vBP{(I#?gb)K)EA;KnTs!_?uwcfepeM!1u|)nu2H%x zUvJ`-$1TO=mO?_ejerV=7bm}vCvVhHGKCuPWleP{QYmdtpVHWd@G}vnR`JDQa9EnE z5XePA_TVnEEn(w$c{`&mdOe3hYT+_{OC0T%EJ&RfV;bTitnaS(J$crDn_A{a7k$+( z1=Gtjjs~a9<}Pg9XzP7ZMyQ-$q5XW@k8@v;uyGA-t-PQNe)c?hgdu_6!xoUj0VRL0 zlOr~d2a3owX5Qu*hsWWPKC21No7ohKWIw#jPtbbHLsgTy_~oZD*s}X{rPdbFzy}HA zqN<544VAUTue4m3%r*|{>4E}d2TSS`I8uV&_)_oJ)P1FKrC%7-WvUnpL(aD3n-~6_ zQV=_djS5Sjzd{bteog96yCWzgwU7UgsP7J^`u*cpgd`&g*}LqOm6?^S?95}Y$SRR> z2ssClWQHS>WK%}rpkuGBIJRSzJrB-&KBw>Rd7kU)x^n4{KIe1p`+dLPuXRVM$`9c; z0R6PTqOEycxvdWu9o~FR^H(k_wJ8aAa-p!J;itA`A`kR+p$M$-)sdC~eu+T6i9Ehx z-2n_buNikzccqH#Ga^ccZw(f4N6!WtscorOscr@S`>!6#0&8`uLTirm-;=!P11k81 z@Wvz~#O?68A@>!M^r_0Hf$hM5H%ku(3|YXoReQm>ymoya%!S6=k(8FT4>FPd@iAvv zith?PwRv9w4+VKFJbz^Ac47dv)56q;#en5#)&hh=|nC}w;c=_PcMy0>$l#f}v3=!YNr;;4}o!8fsb%Rw^{(s*QFTeDM*{j!Sy}K{OiTKvMu3coJCpuk17_0)>r*H?aJXrf;I6eCF6J zoS7St?RuoV24L>yR4x*p9beu_S484U-m^OM@Ol_zPyy>DdXZCCsGV6iLD9`U9?H0v zN^Q;7z}&+j{e*^}T|t^krAggZculR$JNLS#%I(14$MV6A|5Z9LsnUVD!{dN=BIOJK z5%8EdS^duZ@{@+~qHHHcFHL!Zg$KQ(@FEC5Y7x9|@p zmn*&`vj&bFpycCn0})vMPRj&YD#+eT)??&N)S|s*dmq8%*7E>|f_fZYIfCufouD+Y zuxf%Mfv5i-W|wgjGxVPKuO!`rQe}3Eg$E&?rS`J&wA!ItXqmDysoFN`Kk8e=TtA( z^9wq_H`RGLYf|H$9?(-p+c6afgCSGOx|CdliV(>}>5S;>W(>rxhD&YK_W7W49)2%~17H%sN&gM$~=lVgkX2 zbt9>5^)d>qpw#tPsM26y%I7%ZgKisbXGUZ!S>S)Vdq9KYM$(|5f9R1q1lR8N`o+T^ zchiF5m7SY4*4(wXJ1wChh_Yr7*Eg@sfjV#<r*E*KwcgE+8{YE%cvqT@%YQIEP9 zDVlEHr9K+G z|CTcVY-a&O4?PM$r%m<^~fU$>CVcKAPaAIQZ{0MmX z$~4K{2Sco(>*a*gd+!bO)G^%t%1Enq$$tlD0Odtsx#6w;?M$aQw$Nd_s8hBkB4}1i z!(!N`NJfY(R?$&My}u)HO#rUP&3-?-9w9LKysHb2%ZzYw>UE00QFSIbFq)xge2Ql; zQlb^8a4~R3P&5rDm!g{dmR2raoa=^A??2*@X>vJ9cN)K(Mt}r#AumI~!1F@I*>v>% z#yofC7xv`Q0=?1%zwE~6XAR>1MfQnsIQCDmRy6E zZ9ZqI2GG=LbMO-^!`gT00AfiR*nY?9&H_CwP;VHvU2!HY%D0(Enw%ObYl3F1}%JZ)oYs)KVMNXqPVd56E+J0ZcdQ(hfT@ z%H$_5P6w~Ko7X;M1cPFuy&aYB;(K9)l`^avzBea56nf_CNBl8GC~uq^Sa}h41d3W& zk;J$6M^+c)4jH@Y&Fpxw$*w^9=l`gA#H9QFR-osf&1!o|Rsr#s*f7|3_YZaVV70qj zf%J}BeX`9eJM>orL30hx8H?WuoxM++th=v}pFiBnye3-uOB7%)XhRqhW>Ta^S_R9g zuj>xpK!KlY<4RexnM&RLqY8p(1!n*U><@=%gW_lh#&t7^xkH{=k?WLx6px%4U;!nh z4KMuhJehFy{l@Ph5PRLL_xs*^C+zl5Pt!e8`~jE7>0~*!?0NCq*R4)P)eL7&}aA=jvawQw^SQj_EsbP zkqs*mHgSocllQKAvZ z!>RdruXrRA4zxIIWDE@PG2-aatqZZN)_ep(I!dzZ)?XRFi_ubQ(*kX^mZ5m4ZuGfjS?U0c3p0TG=fQs*Cl$~XQMF{O&t5zo$<@Ts^G~E@ZW;S)bFQzFM4QLYV!rC0Wf9X&-cF#>7b~xw7X`ktnO5fEbAYH7_SFGYK0UN{Pte8h2aSA&-k zd2K~ANZs?a@#eUL+%b^eS+}tRRN%tjz|>Uz?6fI(xE)}^uK446>(xSVgu80Nm_dbn z|8ET$t^V@Y#5ZR>Fj|o|H>)Tw0Zxz|M%ypRG0`Ku(<#PA3hL^UhEe!x(6b}YfvDue zBhcCF)WyTQFkd?7n0dEf%X1a-M`wYd+3iegJB9$H2--SI}(-~$)$pNg*-}2^` zCkL#xI)3aw(w?^8-=m6;0X02rE8<@O~~TGxvz(bF}(z<$8hrPamJ9jlPwkc zUtoSrzR=DiPE#Es?2dy0>+@4ZKYjx26jc|0l$ zfB85ghXB!V%V~U)I&FrcmCl&CuKp=^Xhg+1WSu*AX!R%f7}U0PQmeRWPhd*OTnt-9 zlx^&U&dnT87;e?cgOKb65;N6e_$=jpr$oe)F7;1?fD(}04w$rgc*h!D64^q&^*Hpq zR|77)h%bBNl#Ct2->RB7t#Ho~e`Hd;)Sqpqidat#8b>;RLWbt;-xFIGd1c_4drBBH zQg9G{w)vKUK()U%{QXRpOugXczPd75Ud%n7v(pPr+HEl^5A@Z7wQ-WK-;VE=|LfkQ zfqu3p`ydGDlK|C5^^0-6Q^I7HaqIaCP!pOhJYFd&^|-p9e-hkeBxXMPF}+5tn_%O7 zlppzoUuZya8`*2_1gREeR`hlj;m;T9c`T^HI?tfbsO;aIfN0P5qml zpDz-IB=?AuLjk`=l&ilV6?o#$vyAa$t)*54mxs#+B>V>*LnhC6E_t}EH;A1jepi7e zKA^leeG0OPG5xyrf`?Zm#hQ&wiqvvxTqM8t=~qfu!pLvng3Md3C=t#I2u*-&N;aGp zl5_AZku(7#F!DW7xVh_|PG74(m(jJ=P7qlQ@YUj&HX)Pj6ZKc3;g3hn$P%Wyc1 zEcWZ`r7X>4!NZ|g#14NZ4laZOH>a{bnfccNw2{W}>f`T$o=z%kX9u9DMSpw;%P!a( zvMYx&8aZaK$JA{X$Op}YDqW2@2eajE09w4ynit~6Jl}d89dU4+u?^FlDSBRNDXw$? zITGfo!N-=TxS!;bm>!^VsxJE2i}2LnUl*;man;c&|Nc(BE{}6jyr6i1V51pu^8iLx z;+e5RqL0e%?DIQ+UQEflr+-jFv<%FhOp(NHyg0Te*We?B^f9x}NeWrdX*)PC?TPGN z4%VQs`_!9SSy33}%j0}1DCF0&O zNRJ_*FI-QJm9u?-k(ltbHtQeq{g5=#O~F&=v^)!~XmRD%EgK^Q+{EA3Eu={~a~p5G zScm6sU-j%E`0_=@r!Ny!bu^GnIc2)$fbA93mqYzckL~V zU3dwP*ia3(9g2=SK8%j<%nrum?=Tav2PY1K;GUDe%+$p6VlIv%a15*Vaq8h1;mWh0 z4iF+a%a_&vP|DBjK+SQtc_=CD zUF^2V!P*o=wN9Z$`AXgteQin9b$4X?pKIZY($X#sMF)&js;=Fd5rCubB)=TZc9fTO zE2Nb0M~1HH+sXi?C{5Kma*p^1`6<~kTV+knM6fSsDst+I$f-+|-*hTd%^X-P^PC?; zA5cA~w2gJw05UCt+fVWI5WfVHYovfATNal4&gkicV>{#W!0f4(d17YceVq6jaG=Xx zGmk%rU({cZo<$d!TYqb$yy?}mH~oI#l=;TX`^a}_h_b{tsK-z1a@8V31eUg0nSEYp zvw5M0PMJ1ucst7C@iVO87IW>qDTf|iv=+zvf$oF(>!}l^5ia!jt;F)|v(m?RX(8Ge zewiAkE#ZzOM^WvzIg_v=qnmmXxRRFG{IAIB$kQv28z(>w!Po-KUU?b#}d)y6LG{??eN_<-N!80K@)SkIKSLC zJ$KEZdE$Y?NgH-~vYt*Wn7 zo(eW*&3G|IK1jd&Yv9niWasV(axLWbetgwbm^@cPZlul|YChmR3r_7;&JqMN!h zMM~AyfD{EEOJ^qoDL0-EZL=fwjo1)8R9LZe5w97$QrF4DUNhZ(g#}_mXIbIYF9@9#DJ9U+G1o}Bk!hi< zSv4PtkR-TD@*cJBE)2!c3!w=eG@LM}C1Ct*GK*sT0ZhT>0BmpyJ&s?pxaKF2GUVz8 z=r4hK47{tC@W<{6%7|Z~J+R1Qo3-!H!nn|FNoU+WUlsc`<708%-~8pr?_cExjVb+VWvBsG7Lo8nJEFe1<#WOJ>Wg95-eCdtJn6nuV>Df-G22N zo2I-m1*A?@`s6#NtW*$NX??^dp{q|KH1~Il2{JYdrRjN3;*+}ch1y+7CZ$m)U;y_z zP7nV}OPd$dH3j}N^Fuv+aYxX}BmJsrD)m;-IoQT4b?Xq!IQCXv8!wbeJgSd*t(po_ z?@}Y6nWlBN_03$QXrFooasarE)WW1D00KiEuo+5AZI`SdBn-AQs;A$?P%y=Y4Clee6lg7T6$4S|RlrO0#Gfgw4&4_TZ|HgmmV7)+$WZv(o;GQc zk@zf>(F4!2wPZjasW`1=M}8;%7ZAVbeE2bmkas$}>;@)ua{tF4@RQDxY0=$u*@xtm zYA5m7Mf}qFw|wfHe#1x+g6wV68~gKO#76Wg_7LpR`rKhsImfWFUX@R9`kI>zSs}vJ zf+L9`d0TxC0F9FkEgG)NpK3j=R_io-n@H-AeMGNuOMkI>;_KS8o_J=MoK0rZ1x{@$ zB8puoC@3RqW7GSp{ykX>1GRTDymK=(zVtBM67%u?nd_m2#ph8b1d(@6$&5csqC4X* z#Pi&RmbQ6^@63d&j{aFJh=!c++0g+0pZ%2wfJOeJ!V6i?p2LsZW{HO#8U8dZZkOu^ zH?P-Ed51Mvw94G#2`Bz|MDX9~+uJxKGOcNBHQvwz*686gC-0+ixKc z1Xo|4HE!0YE&!LxA!_TG5w$>Fn_m9_uCoU1UJ^Wvt&#IBK|LZt)7_fMLX>Kkt^2Lk zt0m|Ua#(>LkfgYckWKLdyOncUMC7A102O`Z$x}Nh*Vn$?(x)2t&SDo$&x`}eK;n*C za{LDYl8zE7aiy=)oq=PayF_2|Em7E=Iqp9Yz^AVrxP$~`fSSAQ>6FKI_KSu4?73#% zbx@5CE?scDj;xu~Sp<&fkU5G?kje2Z=9$TQF2+Wc!2oaaa8fnkE53Thx(mL zmolAXPlK6Y9G^ZT6yy)Jh>1B%9MrYmGuOc4zMC5fgf*c*@obH}P0|)Pz zC%;n;O&90-;y;#zOH;E|Y82W~noyQDh@z#UpP1=8Sz5k)_^IBH)?jXz^~!5|y-D<4ww z`q1!m-&&ni8L;V)K0FscGxQD;21aNbu!~9K#7RxCps{&Jb3PMJa{il|R`d3jH6M2s zz#C0u9P&{Hs(m~gS=K?r_^AOdr0#T+KRL(rhT>~(WVs}8B%!VtjDO=~>_&b7Z{GZs zc7ZXRN0Z_%Ad!Qesh#!~CQa9&c+{}b+ERbCj-s_^vH^aeJ%t04)nm$t9#^zA{+bbP zyL&e&sT(&q@pBFHEHcF_(`IsZb93<$ZYGteC;6~+IEy9Fc-PJ(kn%=kA2^Q#jv`?i zu0WaDZ+hnEb2dwhC>6q$3ePj#_Z46D# zl1LpiM;u)nUXYfd^GsQIwK%}P9=t5!f>w5HskpT-EKTX=wEA$nh!pe#ur6Xi!%m+R z*7Je5j=_D$y~~aWETbkzqz!smd0-f2+i>bzJ%@!5+@~MZoQw%=2~BQqRzwF1s8zqM9Y-`_(=gy;QXN$2HcnIpbuc-MYCx_&Fg~ zs87>wR#1~)CZUyptTM)AeNY}qlb~2&)|G5VwY_PxWaY%@Rrf}(bk~aO&wx4k zS|wNMQJ{sD7r#DzHYEx}{=|lc>ETt4RIiW1fjpeqxcQyhaJ~`q(!d(9Xk%<{=`ac8=()TljMZ-(u~88G&w|Rfe-fCr1>_X zIQ*nmu+tJi&y8q9We1u~4uLy3IL@wj)cAFwwgjz@0clmKS}IyEE?tF=GGq8>?Jo8g z5&}Ngc@@Y&Y>a=idB$^k_PSUC!&rL-5vav^xvJJg?V;JqaNty4j6R9_5I}$L*`{Ts zog#{DQaZ@P>lsEf>iKpv1n1BV;y-@PuD zSmk#m`n&uEtPR_(nEWO_-C%B23Oy;agX$U}hz3Qj9$aO?F&`udME1Bu`*kyR^#Zc; z9t%Ecnk*r!8LIu?2fyH7&4!2+rjh(lZoo|M1fDzM9<{^Qa?gULpNKgcu3YvDuOGpf zc#VWl14J!}jvP-3Y|M+%lnprM<_gVmlIa^UIV?dH^KH6cBFhp5Rm9%^LQ4Zc+DT;( z!fDIVxZ}kqK_!u_UT@ICt0wpHDtJ(wJ#S|~Yjr7cZ)Q@lerJpabM(i|G^gzru2-_7 zcFO05PuLW7WMH2{>c{X+|b7_W}_Ip3i ze>}d{&UwaLMD#MkA?w(k{fSrY&7Biyq6?4+g zB8%^(&<@L`sg*>*#LDRoeC|CRRvf&SG><(i9psc(0xS3ezk%r&U{AJ6^2Xq5Z zPB#ZME*fW(XsBPcfV^3_;VED(OLh8q{WGCmvg22*XqqO=a-#iQb=l;$$y1(~3mi=R z0Yx=;UEKDDbLH5QTvC@2VH-_fE4Zn#$>_O6|KW_#{hS4qUM3;eYNA8 zXs7C{#&1-STkznQsFR04XH(csf<^;3njR0g7Xk>;hqc&2^p0Vj`y%sb+_hevR_Yo# z-fNWKa9}zy##&1lrXB~PyeW`T`=S8{p7kATLYZzg-%KKMMK(PBv8}av{i-_~T!da! zOnaLdpDy2PY=N(_f({;Mnk1YMYaz3$lS=P z;GX=L1?hbSklKl4@M8#AJMv3(lH?%3`tQK?)l(l1Q(Zb-=X-()!~l)LePv+aDGD&Z zN)rck_GMjzqJBf@t=CVHAvyu^-!pQe0wjFSi48g31?Aj7qH^i&8KZPMtM*Ghp2Zeg zuEoqg)oqk|s|qDpsbm3jn~t1lvIR~LI7?FiMF(sJvfJuks1**KDGhkmy_$0JB0z(-~Jk#1L-_*-PiA>V~<{zcukthv;u z6vcdJr)gQ33sP?H`7mp(8{S*A9Mdn%{r`!OHTHlqym(h){w?V}L=ZkqMk+1}MVo&R zM8UJ0wMl?Tm;?BvWNPm)Wl6cVcF5(qi}=vAVSu5eYIl$`ZHe{%vt z&+4r+?Wg~n1(3%S4cOAUN5!pWj!XcR)Gm#ykayls@vv%J^sO5bMitk+>0I}x6kdoHVg(_p~QhP=9Z{jP3Ek`ab+HqozNI z7Grz3ldj7riw~y>D2OhOC<=`yAJXQBeik(=PeL0hU8`(ujnNTbW7@ImcuPO&;z7)9 zeVuMTJN5j=u$)|0(ADr8+(Cck7@;QdMz`Nlyn%!<{>*qM0JSj|wCQ`eEaXq!My(wm ztey2f#m@%jBonDz{UT|B?E4(;dN=_6kxEup0IY1mThvOew$rHfU#Pd@Lh7wxpUaWb6 z-Cu!ty}lj+{==-WJjnITb9E`k4zzl27Wi0w;pu0hRLXyr$Gn)oXM+FQtMvBvNMRH1 zTRnYZYe2w(=dCQk(fi1!-cl~%S{3{nW`U`E;#UG}2$DrDd@g1DMMSwQ8q<+N*WYc* zpQR+f&>UEKkMt44Z#ifZZxnIlJ>|JjQAcKCQdMe&6drWnVO(SJ9XSM3_*mvqRan*_ zepqnGdWfh&yJT+MJwK4Al~cjgzNRWK0@+e$Sz^7t8@54|5)}Mv4*mK$g@#Fv&4ht?i#S7Bz!l&trHnPXonC7oUgaPN*0 z4+Gtz#k?W>YB3+(KKjLdNml}M;D8CqoaQIz_P(lUSi9PV_{f9G5*Aw8C3pDK25p2$ z)xd&S2#i53)4&l!`IBt%D#^OpE+D&~p*I}W`kC%6rD<1nJ#O`CUPyYFS9I$LfMasC0k!%C8V;k#_;bmDu@F99`H6U5Mn0_1HIM83<%>%;AzW%r9$d^s z2&uRvOUG~E-WDyGwHuNt(P=U_sVvP2=uqdy@^A>VgW z2SG!YVr^n63jsPros}9)#A(C2R@R6>)@9EfXj78qg`KIWopl!JPfLIA(W>CDauY5? z-j4iC82euN+uRN(LL+L+^{M)Iy5Bf#WnhvJQ$E_x*jKY=F-?*m|Xm zkuOr4?+pSkPyA=)(sFLk!qCTuP(7pE{MjBUy1|tn&eS%T*!~u&o#SViJ}5wW0HUQa z{B=k~*5HT~ZrhjN^*)T=FU}-_8u*p!>qRgn`fZmw7^(mXT73TCuUwS+`(_87iH5yE z6R>DOn>nj9vPqEZ$Yv>#jK2P7`I0EDjY7(qHLHy56R7L%ZJXSxnli}&ESp8Gl+z&V z{q{_;S_R*4(;tzQvXi<^8b1-f z6_f32neeS7ZnGeWvf!XQl`!xdgQ>UM7zWMQ`bS<_lU0dwh;rciOMeH#NB*+%=yBt_ zE!C56+!35>k0*%Ok2w)aIX;A|dzB`UIK4J}RZ#0qHTZ{DYOm`Irj~Jsx2TrdNU^3~ zvP-gr;(73Ht8Bn1ZNy4y%Te3-dK3kYWJmWys=tq#tD^3umAVSC_(%${+`TNe|s zX z71LDEhS){v92e2$OKtJ-Y~xgj_+9c2foIBqFkB-vdu`ebpT~EfTNl+XJ)Zdt45_@& zo%G@@OzB;enZ2oxi_?IZ!U%ouMQG5ZAD993P864j80D`TBec85mdgmI!X9(MjNi6O z6J#F5VKzU2)0J1i_$~G05k=b-s^25k>X|l}QoeoojVEsM@H(HO!2`+!z)cU!52)T{ zI#EiOYwOo&Ulf!zKV3NDUnFQX4=Jn2%u4c|jBG6pu$~97$P%c!Z*kFg0aZjd8JP9? ze>?A1k))Mqyt@Ae);CkkxmV}>VtL!N?M1Kl0peQn+b^H@P{&bi(@D`^zHTHnb6v0L zDzzo31IN`Le@oA-**MGYAd#ktif181+24jG5Lut}r2Z%{#j42<#F*Bf34${q`qj^F z6E2a%SEEF^9kX*26GPsErCk!i9qpM=XQ=dr4M)gy-KFqX&OH|b7MqLcjgs@yg$GUi zj@{+tp)D^Yjo8NX)e!p|>Qblzs89hkwlrO#c`f-(C{)1ekBT05x3(*$6qMo6k zIG;tgV6kWFDkZv6I(R$MmyYHUEPpO|Ggt@$K0)?nPuh<8E@b(_qJH)ecj}``l9hTl z08zmH6K;WZ3a9*gz*Va6nHat%kYuGN&g~{A)h3`O;o){5O=%Ah2z@+x7HOsyp6DLd z8c{25ytiR+_{8+5z-^QCZM<+Jj4)z?Vct3Va(Fdd!DgFzCnsw@ZFtBd_F~w4SU?c2 zCAo6#@9mQeH0ykR)HDB&MQYWvBLIRH2Q4&Y3MZa^&8_S%=3Z2u^!g;7#9IEz7uGD2 z6?yFT%vR+!BcGIIXWvybkal97&NC!=RsV7z3lb4+B5jA@le{MbSVyWvX_Y;I-z_b? z0TM0x5KHf(q--fErx~jNxosR1(a%noGzXq19(aKnU=^&UternBxte68l>>l(k0jEB zhrji5=8g315t>`Jay>PaJ@>)53=-_YJ%@HM^x`71?Ay4fT5Hrg?z!wwxCXJ;^S4-I z>0E-a2=9R8ZMW0lQIk%3o3UL2qqD*tu@nzRE z+p_yv9tB}p%!R6HOdTU?b-wnFeAgG50Wc_ooWYPa?}BTBo8(bC6IbYL%)a5fS+=&n z;QRvrI12gFT4^Wh=!%x#^&kES zLYL)pbz0Sm?*?kdIdvG>X5w=$S)(MhySu*~mu|ij-%Jv>T)0fmWkidyj!SLGxNL37 zRWqc>Gm2Phlak)=1LU*sWZx8xi%I8_I(R^k$ln$U?&T$cxH?kXiuWR{ID%VIi3Ic0 z$+@S8skz_3Qp_EXHcd5YTCoRR#6S9I-FjqB1$x?Z*aP30B<@9Sc@EEa(-Ir+J4J9E zC}q)^F>`426(nc&M|QSmqEDeRymRd5o>_DJ-bJv;ypM0VkLv{h+6nu~=pPS#Ya;^Qy!V5Y8Usx1;KNP=jjkk5TRoqB0i z>Vk~vPDHO2J&KfGGpN9EwM^V@%RY3s)^5IeUE^yRdU>EB!c=0$)i2?|{XJmi#9oEf zN&cQhBMajuZYP;D%G*v1R79ZJci4-fTnmeFU5?r?pOL9O)z!^HGV)7jGt)=YtD1m8 zvYM|inq0C!%6&?p!8hOo!_^Or^$}$O;%B#$b$m_Xk7+Rmapj$F2qpH|D|=Q-(~!4M z3-5I?BHnfauiOvnRVTHob}yc0gYp%p-kg;{8y8wH;cPYX8kCAnh2IOvsB}c8xZ?uOMB}RWnvV~DIBw144d?Xogi}(!L1JxFIcvkksXY9`Ukzh zZ=YAYUTTM_G^GpUj~`C&DKD_=wJ+eCDk*Z5A)lXGH0kHD!v%nqJ6ozV-JF(}#1f*yd~0`4`< zGq5ZdQPUKN*bT}h0Z8RU)>=H|b2s9JUB4tJ=Fsi$@UZhK6%dJ)`1MO@{CVf$-G)V^Z!0lnZrvQLBR+Z82tmM66fh@revMRQfe^2)2owG|VEx zm$krRg$4gp0O!R#0p8c5xU1tL8!JEMLO4#8{(&L{EWUZ}B2qu|5tURK5p(roG0t6y z3U$A@8)&SrF@UG}~p$?pg+QM5^4BL#f??n+=d5;UXo$N?R z_V+$SlLlB@isIhNN;-7ZpehtFd@#&2qsY=8a9b>tnmK-K2>0`A&8VoZ#9IWmcAGc` zP6_XxEr3)HV>}6sr@&v5>SwORrl8&T0gZFq;V-dkXYO{dubl)mG_Mh53f+ApJ3kS& zQL+k*Pa9D=8scVe*37-RX!gHUEhdt82l~f@evAzw z^Kg5M`M@O8=I3FSJCaxp>|DW+`T^nwb3RSenwKX+{-JS&}$Sv$k!oj%W)0CboC_BZkE6?|G zu!9~pVbpzR`fVSb307E|+Arv=UEipaMAa>87bC>Gh)^p6wMu{`u+qMn$-COOHi3pUliwHXwK$7;4Um zPsRU|Rd-0N7dBCfd%~uraE!=izbzZUul~2uUSfX(=t!4nX-Ucdu`V-#c`>;%HG9M; z_C;n;BqtJcI8Is$10imrWrGh%*$At}@3s~*Is zRwE7LLT}7y9vnW_+UD< z&G&2hQjmT`G#q&8&9qk8fh4EW?xpb?YB_7uHqK?Xz`YOr9#?>9q2n5Lmjiw9lC?le zW#CCua~adPk=@w<9)Z1m!+(Wf-p3C+$F5#0Q>c%`44dEvgTWdF^fGwVEb(}0QN1Y$ zzrm$;l$ybXC;w$d%D7vQ(uQ$Ay;#eTw;?2;bs}m zs2S+duGst@z8L(09_AhWy4C14mL-wq(OpT0@^MFJ8G}#BQQ_#(k$c^us?< zN+b|g0ft=;IU3fOXX}jz@0JQ~vXMOm`D(R>m`XBbKBzd_vHPaSQ1;?1`m;aQ#o)t< z`|P2tpMS;mc42&1o51GLbeJ1uTJ^S{OR*B5`1;e+&>%ddE0X-+WUcJ>C&2KJc{wVf zE0YdY6;09;(lrFc>ZTW-xc&luraIux*X#yiZMD7pbvXEE_*p|ZYR}?CeNS4|A)|=) z>iZn%LT%wH^pMHGd!6kv&C+ek`bC4FonVlc-0oeZ*g~nf`Ot{K>?iPXy821p>4sl8 zKAQ$_E; zabZkIxl^Bf*`~;imHLsd-t>@MSpIQoBKH9Zzn$kpm=p%1UV&58RSl?#KUseO`DODD z`^kzx073fSnv4h5q}GRd&3qssKYE{N*AixiEYAK6L#z?|e8)m3R_q{J#-0joaa0S2 zql0%mu9nMYDvdyhkfr)~(P>lK)jZp0Jc?ic(iF!kdgyy?H8CGIRQOs6q-#~G=$cr<{GrZI1 zy|B?$Awy2a?Qg6=jAJH~ywdO^Ds&Q9WS2{a^N4K;BUO<@(8*v@gvxxd*H7V{qLh95Jr zXC0NaGMK7Jw)r-mxK`K?Ut!=i%jf(^c2KQo;-@R6?8JWyNE6Ro`V;32P5Y`A8F!1( z{Sl+~axPo=1jyAxu1AA5msJ}6nZBWRZ;`=Pi6l}YWkP)$I=s~wa7p_g9xO@Shdxb; zCKW$hrcSu-W*yr zaFg>s?5l2e!|wJxfvMH|eGfy{Qdy55u_)g4+C=08o2+DAv@QMp8!q42S3FmG(X0iw zdNVL@$vIU;QZOkP`JiaVuKv)X&Np}9w8P@jlJ9rbN0><8IfLnUFMKktM-l_-kt*Za zIm!y(A~B|i^0N(oU*>3Lya{0APUvipx;_lUxeIi zni4(zcx>H>f(bcrcS7Kst^Cu!!|(O$v#9VKeDNESbd;HI5h{1YJcgGh(m;M67vQHs z0x}OD4jykXE4PfyHmAp4eiklkg-2c}sQ6-9R1hTC-drk1c=a8p4gm{pR>r)a1HUrn zy6;i?UUt?q#E`M&BRb_aWz`Z$sxK?+6 zP;ekCf^qFo*4G7|w#G`7-iXj9nSG{Cdp;87aak@f)^1;~< z)I7pub!4~G|~ny zQ$_6vv4jRyzZt-lmWv=v*uyY!a&j%GAMb!>>79oSnH!_=kCq1oT(Vf$czo0%* zeJ=q{YU?yfF6CdzcMD_zYj^f-%ypNlv!dI(moD&`TruThI-!om4MZJuWqywc%UEaM z=~!y4GCwej=vz~UTlzt+;@z|2x5X)$C$dh$&O05t8Gl8C2zB-;Wxxa*)!4>&KL zKc+POZ#T2c6=@6H?=Z}Z6%(op1vJ!xTn7W!tHc3+D5g+VCtlP7a^I9y(bqNF=zW#c zA0R$)7>EN-3u5Bm`hNAml48aO2)^KCB8P>f-JuM5c_>b?0L-un(?uyL22vuabX@9o zn-%N7p-f(S-{5VhhPH0AO9?mx?%`Js=C@zZ`2Cxmeeseu&QscIp|s>QY(Xp%c2IPv zW)T16H*yWlXu3G_6E6~YL!})`?Vv9_21jy3e;lf1xHz%4l+)2rEQ@6U0@Up5?C$Rm zbIcf#V~{u#?=KA_3;ry=^eF&?E24InT=-}xXT8tw?go1>1$~sjiLCR7#vlh~k%RJb@1yau=TfF+mDz;tBwj z3h#(l2NLXJc%^C%V0eah&M^R^eKpd)v4D%L?ntsh*qYx3!UO^d@EK$^&6YBgTkwfqbYXs$Qw+17#lE};IKl(ud=r^J+#jE4ca_;d6 zhEd6vHF+M4S}$+!h(I64HOf4Ra`62OAN8y78QTKV0@5GA1~0EGkitr74;c1^ z)`Te606$5Uj9sEA!uUElT#k~AIwV~bP%u#gBaM@R>-~h2n*ZC`+}>lZlF&Sk0@|`(##;IPz+4;*9i-SCGA{w4XJK1XIK-!~rcOsE!akC#Q7Nz2{+V#L>K$lLIbko*hr1EuQ?`L=g| zpsQgApHicYV(L$XB`g|;^C!p7zdkyUAsyZSKOf}8J=%u2g5q>B&V&z)qXv{Ox^ZL? zM;NUS;%zQwe%+Gp8$8Bg)~NSU15qJ@cu|87Ye^x}cXL3oGK`mf*0W{m?c=T?Ny#x} zcD@;x^k}$iER|ZJ`}bEE4y=rizA`2kr6wgY(l#YJm6j_C09V8003DKbToEIP;Vv*m z=CD$Sj9VrD3`iyU^PR`-byl{1aU;CC8CUWZd<5H?%Z`P&KC;)Qw}C6e4J7(}+Y-}n z7@@Q+KKTutn!z)P>_v_)4-QQ3^YH3e(fGbN2OGXS4L&D!w|06B9=iwE&q};+tx%dY zR9E=(80uiw9sFCEz51^f0=!mV7ReBc9$h2WANg`l%bx^n&CR@JZSDOUk+#!%PDx(+ zNN2ed97_)-O^aG2r7pw$=l4Aep8#{@JMN>I`w(J8!xx69eD?tKMzTS({YwsEtZwNw zI>M$Ts-$)4&Fn0_`#CXVN{&1GhgiN5GdFM-&Ee-u7H&kPTz!vs;s=szGjyym^gv|C zIjY*vz0*D;X<7h?N@L)?h5Pb&B1_Gx0k}k=M1&q+y-ltAM{6q=%1f|iEh`)vc327$ zgp$2eoG82kyu+L`|1!m~Gd_Fg0R)37ZG+#ytwn9{XCV3w!LZ=x8yB?X(|}a8a~5S+ z5xH`9=#H#<2tAtM|LZ?i`YcYNuEjs;ROv)7@*M2GuJUHIdW|@inrb*StRnYg>uvD( z?3f^)R?f=$4#k1QsV2HD?Nr-4{t^m2FhI;X_iX=kGU(ZIz&}D$`{ZPK--`Xk-LB~1 zvReliNh?hb=u@bMnpTv^@3=*46{%g*_Ts(4S7e{rKfq$gc!^U7e6Mvz(U zpha?kc&bvRl=O$C)EmGN5C+nG?UYFD743W059;r#&lZ_)SQGW-2CqTc{9ef5K1)`*e+ z`^6|Wj->3YcXUNO1U40;D~Fa>>yMjT{12ym;=8P}i2iWj-SWzBC&Zj0{{xfk4-2eh zGew;KnKCuoD*wzzQobbZ;twj0MN-79{-ISMIipDZA$$G9cf5aG!Vlel*NENt*a{%7 ztiI@DaAx7-+L)!kTYri5KXS`w*VmTXkN*!@Zy6Qk`$Y{)w}7P5NGTms0z-;4h=6nm zC?(y(NT+m)lz@nIH$zH^bT`rsG7K=x+|SMLzux!5^Q_?mYgmg9SDd}iKKnX{$cLSA zoegYc^aqFBZYJI8k6IPNN!Pq%liSz=6ssA*ffc7Vd5TeT;4iGzf|9-DJ$YK{#5be8 zoukLi(TCHd8*g0MH)!ccsJzIuI1KLkL^?hJp9wq*KOJv|q-2ny@`on7qWSKl-e$rM zPYgozUF1~zVBG^1gIg)JNT$Fh*JhN=zb$~LmZoJ05$wW?7$Im*;QV>?LH!xi*Lgvx zq0n?nP|D{6r`(uG=%0u4;!_DSu#7EW?;eln(_uj36d1~Ec0%T$vT|?3m-=~SZq(t_ zQ4S8iS?jOe_M zOyRBpiFV%b7GPC-YtsszpLgCO{PO|Rztkat!!`Rt6<0wr(Z6$S)$)`+(tIr>d6P+o z?bD^km5xVIfF+7Yv)~U8anYkK!57@Xm)GuB{;!vs<;zuy7)$PMy@pr4y^(Iy4e8Q5 z+KO>upS5aK72j56V~_J$X}BtV6g0M#@UrG_`<38?Igfl{NoH;>%Oot5$d9ZxgH4V-0+r4%%X^82)vk$69>Q@!$J#Yv;gT_TYYnbh*1>9UU~}o~%f>W{C|y8hq)5q>-oLpZscDi(EL0rUP87K>@M9nPfhI{pP9Rvt@hXmxwZsy`Bl zb8#Hzuq4DLcx#m;{vf3E^WoRkJv|}Q31>Cnm{XT zT5XQL`hGC|p9ObWFa8{o2*)pXF@WJ(EGo}rGCmB00DRW-`22w&To&9?`_i~unuc@k zuOOxi2`Vmh@mu?zoA;_g?o(hHa{N-6!IS(#oAPA8T9k3c{iq-*&4Eyzy6KPOp3v3b ziimy!qQM>Q$bj6FFH+MN5810xheh%neTFjSo^a2=D2u6DG{eagEgv zvqGH823A+awKA)ws=WZFN?vJLRSVa=Rtm#>U&+(@Tm40c}syRzf7- zp9kA3cK!rBO+RMbvkYLmmbn)2HO_e-oMIC$J)@U|$MXzA1#r*pWm^VV zSSfnmTWm6=UGvh1ItH_Ek|PK8CuEob`ObKckKqsCwYo7eA; z_2#K_yfD~NDE-q_1(!{W5`2^s7%;W)8b1kt*9tzV z3gOb@8*>)F`Na z=5ZCg@Mhceepvh@zxmp22lQe(muX`u>~frEjK@WgXDkxv`_cbhskSrf&03W6bp!*J z?4!3wj|6j#W%( z;40OXPbOm68^A@i*v|KF?UDR$rzhDdrPVZvf5Vk)!{4XXRblmsUtVW7sIL3_{Q}MN z#>A9)tv#wA?#cAJ&Kww=U|yMZ{)LuyqBrlphGEKJtnwex!jfRi1D#|&cHn&M$8J#I zcgCp*Lo+oPJ1dj9XX~%$$OuyFETL{VihnYlOxtS{Ojs~tKQab{bvmC1hYj~pcEWW z)_o=uhY%n?UHoeN9=seG?nELRkR7VI=v5wR5EeUKHdGw9Au0y ziGn4Hj&a59`W?_hfgy&l_pHk>iee zWb)38K-D_c;7k|aM~AwI)6w^vj)e;^cd6P)fAt`2KwF6v#90x+VKqNtGruVTfxIZo zdeX|bR0a0jJ(G$xKOnmLZ~n#c7&e5O?dwN`E=cu#d5Unsf>=c&za6?N4(s zB?fgeWd|eUH}HLKQxGzQoBO#Ww>job4cn0^YReA%0dCH7Btomns|UI2_pGno{BQkGmL8}=lsegnIR;rcHmO$_sw-6KH$)aLej62#gbkd z7*5D^C@l`$&`{i)Z!br{cHV9Rt_Wiz8gf_Z4|O{n=#?O;nrv|> z!-v&HXK9~x=TW($mwBTX!)DqCAGTh-3mbY7^uf~%PI-W3T<*?Qhov)C?Zw&UUkf&dQxLZIt%==sAT z605(=K_1)3D6%$zvDjqH!8~uFALX05HAvWI-);B0_L)`Y6{F~E^j?aE&))8gnwiWJ zT^z33jmB8qf+US1iB(aTww%eh!DuMW+1wU~GD}1qYd>M%vuG!)gqdD4QSgh41(1O= zFWhB_d1+CN@ZH~zYX$;8oI!HlGHAKa!j9ip|FgWs0FN3jhgw!2bmg20c(ubvNlPX$ zsXsS1rEud)>6UPRUJ7+13kfUl8`lmENIkJ*a-l&f&sB{-Elnz;KbAP3XFnMz?dV5` z$b1>oHi@>p6rPAWv_++ym&7We3v*jzZt_0Iyyk|oZ6!eQ>nX_aYm@X}q8^{1E2An7 zSMVgBg4k_tHvhKI-2Utr2~Dt*{>OmcnxVkL1nFf|f)`e%w^RS#fTsfjNxyAr5|`#3rCNQWB-gOM{I!WO zzV2TM`6CswXiqHy7?J{>-uzvq5K3|loJcg@-5&vE>by}T)Y$#?uKv{B7Eac7=AvhE zn_$pgFP`+QX7qI>Vr+W|1+DY)I4G*5jk-7~h`<#w?9A5R)WPN_YMTCX#`dM}JM__y zs95SCN%AP}i_WYiz7tAmmkSZWJPXkm8v49u4L^5-(o3R(K740PnU3X@9vvsukp7`S z5h9dX1CA0GLzRN&Gad>ou@4j>?C#GQrQ>v&hSe8gj-HkiGJX}?$wbSIUr>Q3$st|2 zOP*}H*p?4eXDv*HbNBjjSxXE94+Jr`lz|Pw>}VvECHeNI-F~WdlYMe=8Um*%$Rmft;5ByYfoq3XIkTAPwyw< zOj>oxVSOB2*YAvsi@g>~b9aDDJY;R<;Tib)f3&D;C@aeK72MUvvMy22sL`lG1xI*l z|2UH-?xH2m*v|&f@|%C-tK1gcgO+Pmh@;D+W1wq9eEOfrGVu5356V8XFeJnpTIHfd z^zB^JrF6F>U?$@pK`By|)u2S#6PSI=n)zGV1Aw-2&ng7^G{R3l4jkrc%*zOIPXL|} zWHs!wC14(>MxS5a1kf#o+%SqCbUauJKwvKc?8hg>;_9@W_29BM!V574y zIJQnf5YN985uNIIi+^xvx-&^*Igf!vo`mgNu)~?mBPn$B8Q)v@+6t2iHIZAdg;8m$ zl9XaXP_k02@9VttG8t@>kV^Tq#~3f9unW-i0gaI0!yKHdk@H}uU(e4i4!Qyf;+m*w ztEhF;hSV&1P@5A@>a0!u{EBfz!~-yE!_C!Lyw=MlN$kNK4;=>ZHBhO()Lx}raJfqQ zX<4A-$)xVpZum;g!{SyfNJWs4rb_O_9hf47`WmYKwcW|;_pF;#8*cFtgx{U(-E}W*YzK* z+a;vA(A51K1V`es*n|Un19OGZ^-fh}X%448R|Sijb$-H@!D@^TeoCYv3*JLNYU8dJ zkbhi|iAJ#Ruw2r+l+J`PrhsrcQll^U#>pC*y%{XtvVG zVs)dEhRt0=6k)i7^LHMip30U~TGq_^!Ga168d^)C7eG^g&$^SQKH0d4G;<2=VqX-8 z*;XTltDeVUF)Kii3ViWvZqy0_Z`t$yU5WmkpRn>=T?@n7{uRsc3DP$$x1l0Rv_ALs z_Y}jO1gF1+pY|6Jv$COVF-ZN6#7tI|Vx&o0?yjflcjap%Z;|@ps|d-45f%W7c8R zX3r@93(gmuKfRchsD|8+Nq8u%>Rp(^LTU-(Clk%P_LVq~M?Qt4-ClN_m6>iZc-Qhv zEd0be+NamPBw1&wkem_*-m1h^ih`qcdBhT;N2+wxz3$QBd+n64FFZqBN#6;=0>=XJ zA6ca4pWpf(89HLWzMJdQDGL7~{Z0la3!|tU&o?eij==!F)(q7p^$-*Mxm7N0PAXS; z7p82is=6K08_jn7oc+hWrtT%!SkX7i@Wm^)Hl{19%?8rWJP~%DEL`1&L+$8Ru*EJs+s{vq3hn;Gj<1Gy*ilPp z*Jnp9PD zuqq-dC{vwy+lsxkler>VXYy{gr6Wbp+pGqw*mwJKId3hsVS3WIQ#HR`c{XZk_q9nYi}`GY&oelk|RDY2ab}EoAp(q`)iT z^5`3hyD7yn81X`p(ER{}s6Ki%s*U=;bDxRx=nV3XY9e})$AyvSbm6&y4F2`!kb9{F z-5zGkfS!~X7YudY6`+5!Z1FOYMf>s)wZoEAi_6XWmI8B~{~K(NF^;DX@4=dGYd_uQ zC6~}d|GPN!HYfE0x)-@-0Bk;wi1gY;Hm2t`4s=4ptJIw~H?6 zK-qr5Sctf8yrd|FT6UJb4%1YAlw78IY{egqbMpXwCPNNf?D^h|3F_#ZH;knG{Ws-< zF$7DBF;ZC3r%Ue2MuI8j1)pyU^)jCx0|8=g>{~KAj$Gdf+LPUYRTt)uKib^~{2!mh z6tAHn9d%_L+0q@>>K;;&NJyoU1_zJr8v5iU;K`o<+p6_~92US?`4^H26mbUu+rzU% zyMyy*CpWu|!DsN(Ks1dtg{MPQX$N<>bL}4By6jK6gqpVRF_{eDzN}UJ-m2^Z_0dKE zP?~c9f?xJOKzVxyC|ag4Sd6?1Iu7x24B;81E;#1n3 zy-_?4O~^!ao2@lgO}~4;v{q0YPa)QfeV{(zyzH|zemOC8`oKn}`W>8|Xe?O7moBpD zQep0*vWde#p($aDz?__4epqKfE*#gkM$L6(u^~k-v)5&ginuCFM?KO5>(49F)btCS zOA%sxaE*6&vdh;yY=hbX`1zV0jX>LiwxURZKmNX z=f?8kf%#nNMHlsIBDDKrS1MGYw&gERV@r>m7 z%2FwV%uLY@oK$n#DII*$Zv*_?3Z(V|q9uLD-rYPS|9dQL7`VTv@w93In0mjt^ffHs z-_(|7nq7G+s>N_e8>}KSNK7E-p1!{h4ZG2F(}D}J_skRcc2PV{A?GhUTrru8@u%6! zm>XKHW&1}E@YTYHs7A&u_$!i1C!Qojwpi)F&ALxdm=7`X z|2MVZvTJZ)M#I@N{Ch#bNz$Q53JHx-xLytzKn%lht^ra0((L~(NzmOT!Al*B21TY7 zo>Sh{+h6f3Z;Y-&hqmw%ZXV%2xItST1Fkqa+<)kYDDaSvq}&U!2u#xt+(emmOoVpJ zw_!I|YZgZnFR>EITpGPHK9D_N^|T#h+b~Js8+x1caq~SZ3vuKtok>xYuwYe4l$?%+ z74BS}2B04Mg|T~E0KTiPr^zNOYBSlbKa~Q`b9MZfEos=tEeT_+!vP!a zDQDBsCh+3ZXwN)b)K60K!$R(I)Tm2;pYBhs_D?gj*~ME>EpjwfZB8c|g^v{UbR7$4 zC8q{fP_-iH3a3>h-y#6L0q6_K*IWxINzOenyT9k*yN(aQr0nM^^|jsPW-gfxJKJ*k z*+Pxz-emsT=BH^Gl!}4@?7Bym-;IJKM;&H113(xPveP(Ql=F1mpm{a+NBq}MO}>;Y z6TqK`RJX{^ao}YE$qw+GK<7TAViAU&#U4txqD_0%~?P=oaJZiedJj~1F&TEx(m zhgd6)Ncufq{f)MvM(odWNkd!=CfQDknn(+#i?WVjot+9(s+y|&A!AVQjw4|4v|8@3 zi^AS}Ghjp)fAI*$DihgjYbyhl_TxM}A~}}NiwO+B{{j+&b^B&@rxV3=5)$k2wFwFM zxZZ9GWWLAXb)PQIwL$Pl>H^Oqb|z4{4W~cH%rrL+2Z6;zcw%ybn6i+q%8gB$^Y&Q4 zC|u##xLxecF4S?RnCTbFoROz{VGK_$8Kb2*bag?Oxm8&fUEB2Hy3aiys5?d2b}6Z`~=aLohr<6I|?=I#JM%HkPMz8sXa^ zT}hlD6Em>0?7M?ca7w1~2BQrO#T73_I#sdgQYBvLS*R!4Sou89prus_DZ|77LbU)4(zCce?2LoLqx8{w0qI9EsrH|5vvJJl-oHk%~qFup<%4F4B{Iw5?*` zCU#%WI~`$mRMR!U5Sx!K26FS6B#|yB`|WAxic3IZrf+B2bAeS-MYO~OuqCyf=1^fwrg?BRom^dZY?NCu?+3XNmcAi@Z^9yJED@pnk@!1cW>u{k1IlGJ>W&*IMfn7V zKV9P}@7X^V$!0XFrV7VZ1VSj(F{r=b><@FBN z(4%|R>2`lzQ0M?e^vJ0$#l3Et>v+_ZrFWs2CSl|)81Vu*AG-qaLILQP`kr!rZo3YC zCq7flfYuTUD)PvQxW6Duddik!)=mV!;zpAcGM=`uC9(wYW-jQ;9?jR|IC6K3R#ytT zuAf+{x_?`*?L9Jg+9((HIPvyWga%SXbVPLgK>sAW8MkW!h9B=0E&+gEE{Z*|mjQS$FKF9oR-{ONjN082#3wJ;sF&(?emF61$=vd9h5V9E= zdMNbDj3g(!p96$jmTJGf3yo?Ira?ux0il~(=q82KI;*mIx62xXqN$ppk0UdZ2{TPWQb=stneWanw&a@`eP8Us3pFOpm?@*~ng&|8b!^ z3#|=|=8X(whSDbM%?q~;5&3e0lS{&E2C1| zen^i|_Kffjp;CZ=k%(YMSz0+E@dl-{)s?8R#o8Y$D{UwTu1$@>eElBN8a7tXj6%&z z^W84OED*Qh0xw|#^+tTsWl}94KHP9lRLW*iKAh#RH>A-VK$)L&1XP*$QtMX-m%qG7 zr1MW>%F4*WtYb=r95ki?iC#c4c1%H7;{L>ZifB5_vvA5L%(hH&Zp|g%*)ne8}(7yMeRCL?kbLvZ%`=+1li5zC1HAhl!JEf8HaNu6q83w@@Z}-!GwohY#FH zpK~Li>k;TVZvcukUO^epnqn*}-`$YRyC5$t=#S+@ok2>}w!S3>O--`JGD@WfnaMHG zXkOdaUgeP|6vj*%EHH-e`t&WBV?xh*bEeaWc_Q%T*)%YEwAd&^%y3=2e7XI}nfVt# zFAsZjdZaLbOQ!ggC<|=llT5u~wV&^)4vs~OE*oMC7kD~4?jKvzMkOtE-FT5^{B!>s z^V$li&XM#@^&~?ndXgvTvF;6IJK9*xHOR+-5NmDYXYy+}Ok16j>s3(cM7tkDR$tB0 z5reeWLL_(6P-07o+GaruP&qP&g0v-C^iZ`SecmI0u<0Tj#rN+0zefy1V|?bz4^E1Z zFi}9)#i_H(L2w)+oI^sSYi)tzLx$9&&BCUebz`4G=G~c9)M}EUJ8-h!meu5w%Dwj_ z(8v2~7UK-98!F*6UNi5VAESShEA3PjecQ;Db6GgM{WE)x;uQsHvkNKVF_3?h@XWa< zusE>(-K?k-F63{3x|2-1nA8s>lvTLFE)_|~-u6HGQhi5X-Xg*mcm!xbP?(V3(XyNk z2FbO| zDCYO`y?N;Uc@cj9*<-)!zxP&Cn}_2`;OwL3ZQf=CH>DfV|1QU8DPhd5s-Hg3O1w6& zKBhPc^U3{2`u-#)cFOx2oH$CkZ2X2c%J(@#=cwp7#r4k1Bc0ui!Iq)5g}4*Bir zVITfbq3o$+pc~)oDFdqkRercITpB52sEZ+=$XG3M968bA^iDTD1an&Xg{# z05F@K@juL=rheZG@RY3xWZ`W0Vge)H9oNaT)W4u#DGh+z$*xcOGx1w!tw0ib*1d`xd7Z=IA-X?I*e`r2m`JrCK&%oWN6eHqXn8UV;=XL1Y~NS8aa&W9JjzvO((HK70XCpMA) z^1`xvNML)ME(P6_s3U9ILK&7Zih0#{+oSm3lL@YOjxZ`#5&sVS4tG9^sYv@t|3JbR znRMC}LVAXUKC_eV)?D&!?Ih1 z*ejkDu1^)`UXcjvdg-{AH*L#olNv%lq+Ngf(VCt%|73T%c}=jflK3lk@`Neh1ez)2 zkP1-jR-AsTaXHUZQP$PPB^|n+-InX&z*_;+J+WM4{dN4O#&U=Dp<0`{ z3L*J{30Z~may7RWKRSI9qp&G@NLL7nJ;dM^Uu_>4?Dbn(`ScNji5%#Tx!)R$(O}1o zn|8-@Gr}>V1O;g?zRL2F zdd=o$Z-(6><+E=mwh*7}oT3*hoBz1_W4_Zz(H7t&)_~d>@3{}%0N*;=pzK?W%k@RJ zbT#|TXKa$%jtnGpT~Y%z(v1R&UOA>=JV5dEdk0_J$1&xyUt(EBogO?3FxrByVr^$C z2{{Fpb{4*ImXD6|UMM-nIyO`A|IJybih%w-dhSb?6NCh2C7c%iLz3Tj4EMLj%02KN z4Bur72TZ4<+4dHR4?cTVs?bs%?6@ugOc{<#fg?VyB2ubRu#n48{v;}M#%#ru3Abjo z97pOilPTP=3Em`@^dM4PBUTd}TyP>&PcpzNEhMs*bUPJ(>%pc*tJExo!N0*EAvzRF zJipqz_9aSZ5|rlre5`bLF&d&UuEm_T4*tHOO<<)6X|S-$Yuva?$C7r|2kEfj@2s>r z=t2xEul0_*?+Tbb!$bSG3*D#jRxC;Up8l{Tqr`?r_ydE1ECoDs4K=LDSG|_DuUP*d&l&JJ-|9>UIF{9G?mG$iZghnkCJLZm(aA5@_U zYe?U-A={yEA8n+C^HWT3EI~r==Od3L}NpRi}jHL3uRwpf2JpWH_}nmMx*OR zT)A?&j6JQ$QwqA6jFSrQhJF3c&w~ZgWlxTxB}EQG+%l8-5+qvNsOVJV)rbnF(|Hs= zBdsU6DKvEtkCL=G#C8KOue)lQQ0)Z_tg{Mc&|}giQxVBa70g0mx=!ysRr!giXaDE| zRx>`z%6>fg$XSLwOm^-2X0D_8jrTksUDH*s4jlk$hYbuWNe#~oWHxQA8Gow>uHtre z43(`7LRCWvO>eB*`6tKR>FCi`t&7%}%vl$^T2C)LHQ=AF)cW0NGO&Gw!wCD$CG#-@ zA4r@K_4{%nOa7RnCYjLpoc`+d_K6N-%c_2Z!-O4j3#Z#n;B+UR9U9-0p9VZ^4_sgP zxDei$ac{u-a*@YJI@4F8xx&9-;nbf6{bl#RJpTXMC4N8)x{rb(y3<5MqOnLThefeQ zJ(wT8{*Auoeqc30_qwT&*n0T(1DU(i*@7A)q)oYFSnj>lY}A8f;vVs_sJ&Kx?Fv|;Tyx9cnQi}~L&66yx%;vdKM=I|2cpkgu4VUN4V#(`H ziTgDgETMFZ2CEfowg}A!J23y`2NNWjI%0tb2Kx8su{SDF#D4-TSl2u$&k62lS0aUr z?S0a?nYQtFNEI&N0eo1!KsM#A6Fp8Qg7IFp^teH--*;Xcm|Z-og9`yTp8wzv`IS8Q z>GmCLuo5sZq~qXg6}wmzm<8@_08jt$irfMG*d4%+&5`^xv@qj6Z2->S0ka6yy721H zbA`AY;9|?`3yETTj-@T+{`=BvBvwq{`r4%f^)WvWy*-=Rn#48k-f2@7@duIJ5f{`m z-L#_Q35B`Zu9)=v7Wk=Be={=~Dp({6RE zN@>@!Xi*q8uU5VU>JcwQMd^@L-g{+dpzEY(XRsyPj8#&<3|2a31Ih`NrMZUJo;eUD zKZtNvK)R+3+V~5}&q6MrD~A-H=+>x=SwA!;r%WaYE9aHA<5W#gvgGt#SpsMb(vJDc zcvkXGTcFm8_zpG*u|(Sm4G#Pv^t5^APRD#}`o(dqnSW@fEK4ZQw$KB4l`e^Kz#r8V{`a@S|Vq_QGfagQ>1vdqj z6-iaS-xs{24@*b3WmGXDWg7DrS~U|wEgzpIQ)y++F)S}#Xti>yi6!RnR52PS+x$X) z{!(XevJ1+5A)gWIhI2i!$`CX7=72>IMg<7`ZwW6`ZmI+`j7m~xW0he~agTbZK-3@J zUr3#%-f!3!MRD(cl@t;DxX3GDlI_qp|HIuQ*3ZP0_KC*BIgx~c8m+68DbJKdTN!cM zrGJGGUJ5#S+mnm?h9VZ}71bjzweE+#$66RvIYjek+GWmUMB!hx%;3yR%nO<)E<}zx zM}u8HE|mf&;O5fb=cNbfV`*qauJiMd%4sx)^QE^*8o$3Xw2`EUrR2|L@F&jC*A^2g zy*W`|927Xb$!i7FI=E@heYm5E=#~jivMQWHSA|#SSJMl!<18^mCqI%Zml-T)Nl=KhWMGTC zGL2{wPS9JR9PPReYEX((oQU>Gglff^qBld9$CvYdc9I3vTH9IJ{_>`v52K&hpAP!Y z2!ad9$u3S6fAz{jESuyW?TBR1K9dd|v9!9n;kp~*{a*=5pXnK~)+MnaXaYZ@m4#d1 ziI2*JM24(=15Y=eBzt}07tB!nB)b4#cfvW-LYf$vXOy20Ddi5;ATBjOIu#9lf$QF= z0naRw3MF+PPIi=vnR3ZwQED5nYeyzyl2##M!+E5~bS>VBZUy3r(9vJIx6k$=YuuSc zIkE4qry0d5CvF4I1!?2LwMhDV`dbQ3jg}$e6K#E^x1GdWWEGL|j%3648aRk4={wRh z7i!0F6cgcZ!~$A5lJ5t#Avhn-RK4X&$5PJWpqj#M4J4532`3a=dWRH0p}w*q9I$?4 zR$2G?P%kh604EGWXYr=BBq-&Pq^&?iLyr^NkcU%7U5i++s5+Wv5e20>uZ$Y_M1CXZ z*i-Zy)b(LK_5T%PZQp~n!gucvai@6%MyZ=>1keHaE;plJ<*=}ZK3cF)8PNJvxW$DK zRW{0rp5OLNpRL{0W0h}sV!h5C&lNe<7kHUsyw3BICgjo4g?TSUu@6Phfv!bftH)Do zK>;B_+LYx@P!|aM@ro2gm?RrZg$`#;mNgFCn`!+R4X{RQNg7%i>|{mebk~Zp(<-Ww z^MU=HQ29B7)}Dy|JL9WhZIjZ4pA!{R)Z&B^0~HOX3oUlsIbO}p;?bcaR8?2IL*#f% zsDl9i)z*_Ld7*V`)D$zOYGp^M01KR<j~-cksCYcfKI#eFSea2Ii+tUhtyiVu)?5HXbSY&;x`#Mi`+ z(E6hi!cG!SNPHnEIA#jgpKulRh;bnt;f-%K^vUDDJ_^Tgit1_t`^-~xV`44;1n=5^ zG|q|LG45q&CozV^hs`&o@m3RkZz<2}$bBO-XzTFSsc3su;^osC^~g{3m$)fj*Wra> z_B1ZbQ#&yjGuWJ^(}V}5L{(*;oAC6e)KI=LrJfZlXg^u8Dfo?dIv^M+OGblxZ12qH zBcFFt9Y3xgcYx;iRROn66|K?)yu((JSIUd0ra6wBClkGp+trHx4JPKEdkcK*l1>O< zZEG#%1Jlkd=0%D5W@PW8*7v@^p87S{zYA4hP430E-9|njsT7dIl^nQ=xY6+oJB0-U z!^biYZwS)QRd5FYFnz2HID%ZyQDFX8pt2CXW6YvnAGko1vvUxY>uQ|w# zmTWiasAIMH^jB9{diC+BAW(0>&k)-m@yI2j(>*%w^$U42KH0pwr!@b(r$RwMS#}DAlk=)Q>9)^fWlCD z*Nu^&*S#_?{0Xet_O}dpkk>@%-h*-VvSSZm=zb53X_cA&LpyLO#nmenQ5OwkxJr6kN##`XqD8noY#-F^Xkn-@R z=9t7aDX1eb39MfQqBT}k=XhLP|9eqUGW=bowzI@&?PlTJS->Jt2#4HtYd0}$DPXlY zcOMODsc!Q2J(A734#V&c4Z3x(NG=cGXSEr(D2(*BuBB_^HhiQH9&Z;)+TKjsgnM9A za!M=%D_KEZpMLkAZo8NJL9I8_Pqq`Rs&*0)@9jisf>e}7P(;w)&*$9SLcNld zuBVcfs>U8JNCGbN{yJVG4z6F8r%a!Ig1&NLjHCerYh~w;dee!6%**mE!6X0O1)_OQ zIqU@Uu(phE+!4MgwEl|L6X8+c)~HXiR_xzl6Gw`4in3=39B4cb}hY&IkEvI}RER zg;PN~A=l3t3=v{Rf1iV8M&!M2x0?+B@#;3*DUB z3Yn*)DCcE}QQ_g2F#^S!VazCYEg>K8@q^_)f)bkRoiR35`YWW!!zcRmk}+gly($@K~l^Nvt~5P-_AOQD6B{VI;moou`Gu zSBgc&%dp2kswWc57k!KX#%#L-Hg%SPT=5-S=RODOCha@PRbKC%?svmA58kQ<0dy=G zGuf6figXVR%#~6flfFpS=vR#V6d970m&gf#aN+aEt#P`un9Y9r-4eq(w?G1mBSpcB zE9OI1-M&z{)KfWZl*MO9!wk_x3oGV+wxFC(&%Y(Gyl01Lf=BTj9C=*F8+;otNm(@V7znx&M^*RnfFctp6N$Tap^^q^47B9L91y@hwr=r9 zq?8u5&xu$&T6s?wAwkIj|KdOYwf7hx-9W=MA9}31A)XCgR2>7koHqXhtj~9V^*NhP zwYD>?s`D*(j21`q+~@^UKMJVs8=rXKOml*RyZXq4ruS=5nb+8iMS)hNh=n=34Ey8k z*OB4u!{xgVX$z-2SG&hTBck%D{E0%k5*}(9k(8+uLlU)jb&}aJKUjip)DDCbPOg_9 z<)sqr3r`=7&AH#p4v#@hFp~`eb-VRy4Prvax34dLSdP8;={LrxPV6B7o<{p2+a%Bn zw!T`}gp+vWu50+IshSdzcA@IZT@X;Oq^ki=lmtoW@8=SQb z|5ZQrn!t-oo@>Gos1Kx9af|j^}ZDPd}BFRJ1Y?hJN|q=7afo+ z1V&K6ePH!Jdv@$yxQr!Uw)ld>7mUMbY<5M0yY5=WlFdxDP56zoO0P!HWX;sZJI?2e zuS;sd8+MyhCm2A_`pZHeYu5@#56er9+J>6ti2!IP;IS&g_#73~0n)QCw1LdlRswH? z?}|Dv(~2UPE;4D!Tqp`YP|#tSqz-nI%_p=+-`rc|IiYma*K#5@`tn()I;uYs>($Us zFsJ9lWA?HZj#($mH_4DFV}*Z$~%S4lY)@ zIj}#Q7!|nBm6Mu}|Gtv`J^1|m%`xyaSH3QO01~la@Ko1sO20-0W0ll{ce{8Q{99ji zkx=uEx|zy%E_{jWIN&kJt>!mZ`8eu5)3(=mxAhgJMvG_1_+zvoT@ba&6r=$?zE*ai_<2K85#%fz#W zEtaHE5IpE#E2Ud|dLPqx83im*#ANlXj|Wc(`bdCN=7sa7EgINVjqDm!ibPSmf>zD& z9=w0o?nzm+yZ%S1*4sba(3Wr*=Kn$d@oma&S?;Sq_w%^((vBVrnN9we?fBIv3W-z6 zZ}y^9Fe+Oo`l8Z9V|#CZMHiiFd4QGAu(19Bl#16_Eie<3>;mu)NGVFy7BtFzwayE$ z|0joIaAKcMUm_iY6j)weKtYDoIT(T~w7kfU+;e#D83Lklg=T+q9#xpxsi6q!t%N>{ z6jvhwNw2B)F^!kwvrGdhz+nY0h@hR2nwH~ssO@Ys0s+mpyjlO)Hb}VY-Y8S+yITei z<)0VN*x9xTS=mIvjK?y-?w!rGY{;C*lvoW#l+a5WEW_m2ppB@hOPR3KnzMjfLmx6Y43J@`ZM)c38cP3aU8gbV5^XZa%KP&R^Xrl zzzm4~N&%5o+uEtnqL`v5kxH57<@!ui6Ves&o0^cgNeM z?Y$0|N-fTzB9i_Ocu*Wi`2K2R9ceqUBiT$I^ zbJ|YkNk+Q-7)BohVd?@QNFcioy@&uU3H%=?=~k3gpr)npubjy?4C8UVSxDE0+;G9n z2GvK)L>s`V;&%>ekth7KDDgqro5RnTb^UJg`&~;wfx5*9qSF7U!Q`E(t=Rc*I4HPS z@|@01Pv^ebY@f}PU+xrsOcY2~ZtW`D<=uz`-n(98F2*y&J(PX_X#FEm^_I- z1RuAPRkTDZU1<~8eeZ+Vb-A5FuMBabs;g_=J#GAVhK|uI3zuE!Ax=+v2EM@k-S@Zs zWOA0YrHyf?RJ4UuUOF5bBI7I!9Ewp)gb7}jlr>W8+#A)(RNxcs@J2l_0XZ3jmEehj zsmbUR*7EP0jPH0?9O0GmHEr1%(LSJ6PWmok{87(+=b8fxU?8K&ddYQh>eOXPY$sUy zcm3_I?GSFVnjR@GwAoQ1#SWo6${_g3n{V3?!amtDoYANY9(BW@HFV{97+D*|w`^io zvI*Rz2mcCSFT>=2Ya#b4_4oa%tx8@xlY#{qNCpy;oZ+opFiy2^&NB6R#U+tEU(tEC zUz$8VgLT77Ig})T>?Y^_%rWpV6X99VW@^0an75j564w9e@Nm-mGBm-GG<943*TzHAvQrsoVzx8feRNAQXjL9^5(KEr=-==cC`fQaAPtS^6gPbBp1ADQ!IO=Y|h_ z@ky6v)#Ny?mT#P6Oo{_rMLnF|T@WF#0GFN}H_n&N^WM+9xuk0^o%XE`Sp50jtdH~q zJ~#azvd$_j$}W8OG!lX|(%q6u!_XxNf`CXPA&qo*BhmsALn9&~-7SrDcX!7C!_4d# zzyH3j9S5G_$y)QSr+)YSw8dhJjfIaq)T}cgKAFsoD`B4Oj5{^V_RabQ??U5dxGRiy zK8$$~ZCBUqWiOUk2~Aj8pxVZWH$5+e1aUpR>33CB2$x>IIu67;YnEg2;(zX!t^iuN zIX2BZ9NWK_ZNH~R%s}Xx!$fL2bRVW7;F13MJ_^5PyQNoF8nbIF;0UyP%^AsrS~ZIA z?~_HOEWZ^Y5aCzP!%r{wTcr>})&Ap2=La4EFfza3m;c>v75YLnf}OG<%8|6;6Cg-v zy3(JBn%37zUc#_+2pia+FB9D_#VDfhA?f*Zy}g`##fhA(60&8s9Yuoi^i$$3Jtt&H z-QLl#xy}o5Uz?Sst%P6@rcifXuVjR=*I{!^B=pm(aMwj_TDg17mY+Bn`}q1?_L>Ah zE@sx(yOI)n+*+Re%z(2;VespnafUtrO2`|2us?2*}rYdY@7a^zAFEsJ1)O)VW zS5#7ONUmiGo;=<7cvd%h0o8UYB%psHy`7S!nOp(|hPHo{>TJX{BAn5?b?ht6 zGz^r*mQjlRDGXqpgdTGP$5%6hi(F25;_ceY;u;*)j1CFUdOZ>7E@k3wkUf4-OlM1CW+1e^A0CtFZ8sZ$ z_x=o9-d~-=^~D?PvG6T;Mf}0N(g`9z>^BShOFraAkFKmE$#mAJZ8$h(k?340G~NRk zZ?AhDEA4cuDI1FnOsMtz%xaX3`IZg^of~(IcRhSqTPB8y+^g0_WqVAg>g2YIe}M)l z?5Iz|z!w244?5DEUez7GdO(hF_p?H00mu>RJHU-?H>|i_nEn&q>_CuDWt2oz>*~-V=e1QJvp=?aXci}AGkCvy zn*b~Abv_;N6-xdj&(!!{z<=D<>j`Kb#K+-_Gb;hn4fW zrR#0REMmo!0wNoBs!PeLVhS!VO&g-bAF&VxG)H~IjDuO#d; z!?_jKS)7LJ>^W^Glgxh061u-u`PzS*K2J>$8mL(1zV!*aSpCT09Od%swQ=1KN9CQY z*{ChMJyeT(Z!#KG*Q2G6IlKEvMJ#emFOu^;zk!>xg;0K@rh-hp1dpZ|>=b!qYg!1`ggX1}Qqcjlvm~f7cY5lSN zqL%{;8Rh;0ErEu{mkrK$Ybuo?#wqwA%g`9 zvbmN)o#FZ_-O8<)*agvcBAyUdQ}}d7u=yaR*WR5Y=jA|6^R+Gw>HR4SQty|Fkxu3H z&pE@yVaToBN)`u%TOIpEQ85f8FWEND=X<{P8Pe#%oL^5-RGC+!$c6~xgm?>VQhNb% zYkwAu3pG8h%8DSzgMT1;RM7Uw?IWT4>|DU!!ZSVojr7*wgzRxA@LUSb_IB@nGW1TC zmcAxRQ=7sQVx2pv=74~y5~tEuT~{P&3JyU=$YqwhpseO~xi zisC+&qPSm{=zvl*5&b6a>j|2lc+l7=@7@?>4Nkl1u=IRsHtZ$PA2*k?LYP>8bh%tu zSUGWy<4tv#!8O93we3f6kk22ml1Jl&(2+=&21RE)LrZD&{i<($hD2`v=x^gAmYUkB zdbw8>{KTv%BF2qB1`FE*tE-$O{#RyGpzi?3Mr#)EO++I0!?h2=GGdze zlMLYNgK20E&HRY-5Iy*}Ra;KqZ?)DG#nB{bZx_huFjzgQcHgN?OxKQ>&@Bm*8&P~p(F zT*+VFF==gf<3`_~Y(&1_!q29lEC)V`us$S}{*QEnOyG>n^`j%q9cv)OM@rYP0R?CP<5>D{5DMi#tjHs=g&JwR$^7mzdBbsOLlp_M7bIeMIe{OU=-8c$0HKODMAyGqb4jaZjU$K!)W)) zP$Hl)9I_R5cptWM!MfQU--e~&0)Cmqk(}PvPT%I`W-Fr}K{USjd{4mH3L6~*&43(e zG{#cB2B}+Sc5KNy6JrCjHlXhX$brO;@+5@kuXp!2y>IV@NijmzbH_q%%oYc29G@uJ zyZLx!61YT+6|yflHq2yk3Svj?5&8IpKr@;{7MC9sXIOOU{%~9>D3uFbCzK7dpQN22 zwx!y3F?iC^3SVQ@$NYL0tlj$nPj9bM?~` zZM?C#N_PPO*X9rFb;$WOG4O~m z8;9zFF%efO_Wpn!`IX1p62$@0ngY5pjm;CLpXCutz$I*nq z(>{M4S`n6T+FLzxQwI7;xJ%2>iNeUcB2BJCYbj&%)O>nx| zQYGkR?iWIpe&frr36xFsn5@|xNVjqHqMZ5~Q{z`8ve?!mzuvyQ!F}6W-OY9If~g5g z)hMg6IHa4d{s%_e1LyG8Me%V{x7VgW2&ArE$V!~&mX9OmoPgXZxf3h;cvmhX`ts#P z3l5xR?}*>IzxmDOYrLzm+yF)27JUi{D1&o`x-Wu>^^(*YXojX)FP8)wAtLnLYTgL9 z#M*_0+Ql4Oj!SI-ULC1Nmy|l6cD$(L2OJ-gp(nl>?I~?~CjO6a{=<|ur*(BlVThgyNk7l@$#CenpSk1wlk;D2a5yv|fW&aPsU63kEqEd9-aYg7 z!cTp6QnJGJC*){l4XXH^+mpvU2_*`OxD?-9o#r$*`U2{|I;My3#2m0R*WXpKx0hyd zI%V)@KLgte-SasBMgYoi8%m(R%(vd zwNkB}ikZd4IjOAMQTFH~!>l}1w)%nn6(3W~5PG=IYVk9tC@x;!^eE{ML?8e0%5oWx zo_@UQ8^=+h<5U#sUsOe|1`BoPjA!(}MiD-Z-;Cv9FDbHS}q!>(2L# zm(kZ!_si-a4{`%w!nI$#yyGyKI@tUK&}e6=LIMb>O=xSgqTo%du0HX?l+K27;)z}p zmJmUIa4odGRQ>xnV>2TEHI9oz5XAqI61>so0%m-pl$*F4UkONbG3{uft;8+vfLq6~AS$pzS|p-awj{-YDkyT?#o^1U7lsNFqD zZ*3i6dB!&%qKJ1<_fER*H->cFzmYCQ=f?z@$FNDbdA@9ecELDx=jeEFiCca+laO2( z$8!?M@n7{@?^}+nb*-bXi&j_ZsIq8qKXrBX!oZSMC#VoN6hc0mg2N{cPEHEtBZIGP zxVE(-3w}Ydv;|n$T%UG1?CoKg3XR*!tOe5zu4B5QV^sZeww1Cn>u;lC=_N*SFv|f) zN&hBs{EzVu+yH zh`Qa?Lu>xk3kvU01(++NGM9j_P7|Vf0i5WQ&NPP}_r$9Ej55{JBS482b5^IdF_VdI_%b|6`n4Gf zqV#L>(cUBb?u5rL#=q+Iv@i)5;GTdxnNY^?0$bR7o|LT;156UpW5vx%G95k7<&@3E zOM~Yj>qY)hCAx!=zT9E*S-8O;R_UoW1z-Pk4&T|P^v(tc-2V}{>nDmDdXoM~2$Yk% zvKfk!?-ny&KW8VzAmgvXyW@$>11I-S=$YeFrwDbg)iZu%KtP*QU(S%;)$8!+M4hIj zYkD+YSzufk7bu2fm?nbfqgE-tVCA!vMk;b{~#`wPq5M$vc zxkd(QZ3+rjPWy6j(Y5Qs38E?%hrBEd`Xiys_H6r! zg|T9cXQOZeq8qkNRv8kh#L~_Qru3E1xCNN3*G2sSb5}e}Pj@T#jf3c&W%E!_^(=u& z-Kr!F(^0902{8NDn;Qa9zB4L6OBcS4cB$8SPs8LL~JJ&zc&asJReRZ$=NSr!%a zGNnF#`!GKe9FFV-xQXgO$%Rt;C`~?f5B)N{XvT3fTyndZX?Y$Ts1JS^&b?=aDkPWM zTrrnnlp3+#K9jl9Qo5D|`umdXx;~@*KV(y~8l;pKgD0Nfx$qxes2iMr^k1!W6PdP- z)xA45BfHmO(T`i*;*2*;&&z6%oQ8p$P8?O!61+&zp2L*GRUM7X-&>w&;a5T1n@jnW?>*n7v?q6of#AlcCVRmH^h1*zlA+7I%F2`bjmj92o#yxpmq#J5W8EIfBI+-KKnkL45%<@la&zL(29coy%o`z{ zr5Ie#mF2Rf1DB;(Ut@-2fY=>wY^39UW8a+;i6!ucC7WC4EXQey2zpJgZg-AHrk13{ zk+7jkH(9zn|E3n1nI-Ns46ZCaN|$U}JJ#tL>IH&J@}o3qRyTMx{_r4pAElVlBYhtdL0*T{)othquGxaPIk*5^pqCC{p|~$q^yNzyo>HF zHr@g)XAkQ%f*N=HjaO1-Xs=V>TU{0FH6bvo2$Wl|AcF2_-PT2t@W6_z?xN=dH{S@p z%KSw1zygGuLVar!o~B0QA0?*5@?Y!j&{{$esx0K+1C{%Ir&>7`OFVY?Iu6=JTaF6v zaqRiy#jV?uSgoJ^r4)PLr8K_VB8{Dxq6z*=;S)fUnsU~G0qt|y8yMS)}yVqC)( zl_kygT&XNkD5cMKEu6DiOn1qp+CW!NVA;=fi3xeN8)-$+cDy<*$7#04Y&4a4=#LL@ zx+Y`Fu@=CH}tHl;uk201>pL-_|uNL03z{1a-#i)qEB zoI9mV!=6LzH}}~yy3Fm9BBQHnR4PWp>c)+|(E;@!EIFlK)&@b2i_wH{BOlUJZT*>E{1p32 z%s5riEHr_DW@9~1ClkJdCgFCCG3z=QbHh#sy_`=F(mT3)uW>FNk<|{IloFg)r(t{< zXzwzZxQ{t)s((Lqsd$=ASN^ng)TscrVY-C49@H?|Xk`P&?;dWs|1S|`K#n8s69B17 z4stDX=^+W@^a#ggdD(p0Kesb8Q^@J?BDMzJ2r7(4^#OJCE0rj5DUvGE5U@Y3`*^km z!rOC`z7P&Wj#C%=Tzy0E_d50T`FO+w4r5Hn1KDS`vIg9cpm#w&p2BS{?H>E+ruUrz zMa>&n6sp!H-}ZBHzAR&>M@Lju7`OPF#KR=Cm*(PD#pP7`2(BNusT4SZF& z%V;u{8Wv7{z0#$_JOJtMj_nb z6mU7XFy5&|nO4Z8;s8uB5md@D2lK7ypimvorL{hi9>2(`-O*4e zR(K7SDphjnW%fhy)|0mm&r;L~Jukq2j!$10i-;Te&ZNr5VJJb2=9jeRUksJLR~L-& zByh)j{q+bqaH2=K9}237ckg76lukQd7GVu4QFoQ@7(P(1muCQ+>mx}1&&x;+mkV&Y zE{(G$kAD^MyN;KF>+6zD0QzLQ2InyLH?<~rtHTZc1jr7oEMKiw9`BEuQUObv9V9>v zNp7N4^{OQ12)?S;D;+ATrAT=_;Cle)sr~`F(cKYN-GB{PwNX2GGxuF#plb7oRWK*y{h1EgLT(+O~`c;Od7f z#m^Rb`Ktwo5$J_)y@lw*O5rcQ3J-K#1N1k@1%bG8^j#js`Y%BIA3PxN>o+jgZ|XCl z!=ovfo4(Hfle6~srtpPQ0A!LbGd|!c6My0kCYK%+>XWBtT}W~la^H!`FxaC*jwI*I zgVTuUnY9;klu*Bsv(^UynOX>iMc~6`k+5>UupCLo9AD&w%hJ4g7yNJG;jc->I|JQ2 z0f{W}p=K%GWfdou$zpKbJ~%2U_BB36s5E@kdlFPaY`=9qS@MZB`qw`4-E21MH!9mW zjHpCE;6l>sE*>R*{mX-|b$Y)#iH=1Z!M_hqIc}~e{?#^gJx?#jw{|qlDcessN^zU; ze`!6PFc};F)#aS3p?tie#^y&0k)PNpvf&%(cJ~n+d}B(0b$$G<=qR25H;6F3v9r0s zp>U#~R$2XMC_ndbHlfA@yX9M^e^xATkDZ}aBZDwdz_<2dA$yKR-guvx{39Uo-5}+T zL~j7u6dHBb>Lmnxz0KfhYBx8}hRU^949L~ z?ap}zR+trB7p!umjnninA7c@e2WF{nxdty$VqaYc@d8V{>-xIs<66z7y)Ep8TMv{S zuf5e$L7s#AZ3+E}#ma*H9tExHIHc4{4??q{hA;r;;W z1LnBb&)3hlKrxJJ(wXFQ#Gm`M45)C=votn>->R}SXxI4bHBBeBa4oNi|1lXmebPAT z5fI=NW(5ZyRW_@alQlsPIU=6u85CZf@8F#PTej5nRvfGwGuiAkAC2Z1DD))QCyDh~ z1h=5yjr{;wOF?Z%;8t2ypwMRV9&%_ICfS#pX9EGIoR#z=jhK-~xogULxr))q>$H_g z0jaqUFNX*d>Dxbzu74MUxXv%oh__aIFp*uGLHuil@GLUo`~Ujnq!FK)^AloNu z848%4dK}O0yFM%u^K{$Gd-K;MI?o z)78~(vNog*&qy2UYCNCIFVVR?ByKN<@o`2ghGb=pypG04hODn(S7Bf-GopN!;<*@I z$Jn6H?-x}BCKoap@a-fl>bud%R8~Z}U6eRMU*<0lbCDP#1rT+}9)rW63*tGq9OoOB zqhrH1qCuj+*m2Gda-b;~SPC@Gff+^;zK&O?Am5{sNb20~OEeAg=FP>ez?2J9MshK&jd!DKvSJ3D z?#^;g=Xj^5lT)RyQ0q?n z5Q)BV^~Jqym6ddjvXQC+n?&6|zrdlcZ+;Jfn(@|^2SbJtff?Q=JVbzp_xqb?rdn*} zZvn^3IhOG5aue6-w`Lu@_P}`qxYo#PobE*qUbkOWyT|fmO&Zff#ubtW>*XFd3$Q7z z2=C)>c0{b$_vT|m&|hu{+PDb#tBgAqZ@^KO{alhAf_S&{YY8C`hrf{BLI~)g4%L7D zkZ9?uk9GfUjI*@bYG zA)Q+R`O0)S)CRcLw6wRoLXgZ#JEZd8tG&{Vf0~DJM}|BY$&H#KY03{CU^Ny=AB%O@ zO8k_dg`v=Bg(8ZELc&TU&ZlHS?K9clnpI2rW#~j}Mis9em2cQpSG<#{{D+P{mgK$~ zf+6ZCdQ-Bjp1T33tqohkC^RsG+zrJ6REl3`&)E;UWl<1%Q+Lqan^R>4|CbKC7?SLdpN594G5US%hMK;9F?8nOh4SIm}*|pUCTCI>T?JQ{2I>O2E6Q zX!#r_qxrnxQ{eYMLAm@_SeCyTh6V$qiym@VQ~C;|_tTMpA^#f2n8@co{m!uD;h|FL zBn)6EU|)X&;qI2EE*WmYW~JCD>qNjnVL zG2Z5~-o)hpROdj-HpqnAZB%YBr%JBVsH*P;%qJiGvxIStX#vs&kkudr!z&<*oSVC3 zUM=mYX}%4cC;YTvd^@Ha7kY-2-%9LBVHK7`1~^!C`;Ql(_Rc$pf^CQ zHRo%o|K5X17c#V>ePl*ptF<7wZuWnie1*>(Z7U?h2q92z@?Wl8^q4Cb?KqeK>U{g4 z_RH0_U9rN>jiGOj++V)x)Ys3vAEB^e7GrMwaL1E90NDcJI*ZAYZ((0lAUi31C7&Yx z+_hJ{=|RvJSVZS!WTax#|3Q)nPeHnd4xJ%i?~V@P7u;m|>_L0Q(`Kbbo@#*;!i90!94!r=sqNz3wu`I zQZp^Z=S(lV*p>qHP>rwOnP~88=}_y}y<(M{Ayo{SE{$QV%`mHy+XkA*Y(qIQh8lX_ z_5uc)@TyqQI><)=8G2{2L?7G7$C>;>Ah?nAG3aOycn4m55pswXF>HopiVW_yj?9pnKAmix8p0 zE+4JBGj_T3*!e^|g7VMyvAW(%){ZnyALrq3@d+HA1U^xl4{L2L^zn9I#=M+%f2f{C zVxWJDTLcYNE+|zLuDuu~xWozx3KfJ6Jt4Y~KVU@G5S5X>dX+{miRYfI6hidod5~4S zIZ(RD;mBy1xrQ=zOYuC7}8g=U+x*KR0@pPCp1+x*y!uHuimyC z^}g+2PrMcmty20HwSGu9IbL{rFw_$m$f@jAmKP!K&KyVwQxVYuWc*Vh4C!@*f)P>D z_EF@Pvqnu#KLjg#-Wo7+qv@n`(mXN5t{qWz=>Jh&A7KmeU4`~cX02km71Z0X4TMYj zdcK1Mc$xn=F68eSjsK)m81kQ<39fd=o9FRzVc`H@X21RWCy*U2J{Xz=l^^vUsUJoq9gVXRY z!@xD)xzwP4teX)+8?+pySsCb-c8|?SyEBa0M_H<;J7leQk9kP{=&N64Ypd^bL+38t zCd-V`E!j6O`<0YiUVS#Pzc1b;svba#v^rOntist~0(uGw?B*nn&-VHQ+87Q8?3oyU z$`YWw0qS%Alq4((nG)EuuU|>ClW`RVeUhGYx9JB>&;Dvzr`TOOk@Mt#-}CYC;yOAj z=Od_;M~}4bs5@cCDuOpFFh-Sh+m8U4nRr$L{&^LTgZJw+BU-$|j*bh0&EMPSj}{?Iar?y*W8DBsvX5)wL} zsW$@ia<`-Wc(W`(j-cR-egLg#D}rh4Yj))w9^lCPGdRBZ67Db@;ver*{3U5yxuyzZ zQPl;ZE1GUbdV)B_8(z%h2Lt=5X1#v7e^q{6OF&?1Vle($k?(&5-yJ&I5@X{%Nc}xO_5uBl#6M4SzE7M&y1gM*j$Cna z!qCo|k4jAZeGq39CZ->I+dLJLE|5LK?Fo+BuJE{7EKN}R31Uh=fMlYCg};kzI^%#X zly{!+kjRJl1fdwR&i_0ww&ghg7p5zM%<@w;FrrHJ|K9T%LNT}uZsfCQKypdNz19JUL1uPK7?WO&E~vhcy8fNM0QVo;?q=LqK1*q#MA$f-+&1C-SGiG)u^FT+xyCU)Hq#cSF8Uf+xns1FE1V2uqTh4Gl|6$HVSbd#p06jZ^_9_rlx@s!%| z-9LK*VH_Z*3n0~TOncl;6y&q$}tM z9p2rMQw=!IJ6AcV@@jky=Joo*#Uqo#4cx)Ic=j!lBrM#o(F16_FOf!h=}@Cz^Ucs9 zqH#h)Du{yH>+30=L~b;js2h9=G~U~HVUw))@sI}CY2w++`rQ0d%6Y~~Wt7UOB@m8? z!9;WwM);psM<30UNcVAYf$o9KSKd#Gfch$(e2b5-Xdk(bcM1FXuJR9)z%0`x0}2rO zUAuHke@HlM+3h zij!G587+f?U~E}y2@?5|eiJBf$)a?g)E!sk3FwhW)xVBp-&uGJnOAM~!tx7!;vd#( zi9gtjzERo_po(yfvQLBNz*)mqmmn8{!>!PU69kVaJ-QLw>}cC=Mf>_DhsM8x&+4D? zc<;&6T0N5!uCv_ZkqT-&QXPE24I!L3TV*UN!TirJVh}g%-8#wcf1Rs#6Io zZMqR88#1Pn7+yNq8)X9xY9rzip-mHaCpMg*wjC` zA?Map8wuH3?UCXEWe{DS#jAm zPc2>0;O~j7UseAqYnzJ7UCkZ1t*A~EFp2L>IMz<(&atY)N@tLVi)iMJGiS%sZLfY# zILN=_9GdYnE5>hn480%P9u>nVEHmwn_?q>l&Ay4btrMd{p(0|fiEX7R-eF^`Zupu{ z+O4+X#u@SVbfqwRAw>C`1uMYf^l|nkAmE4UK$=F2^ALLqpGWuwR8~5B3O7iX2*`BP zwa~qeM+86X7Qp7;ic;XNVD#6c@`B1?33)3%)8c2@m}FID5Wr@iPvr{Z!kZXljrwDb zl8i}xL87;HIN$&7TR(^h^UfUQb@jp(Pa+S=nu%jVA4_JU8_18xVdWpdk^Bi&I)Tz9OaB@L5TNh4zv8JmNN3y8p>U56(bbC zf5LGqpAPsFvFht)&u|dpFfTClY!6Tlj%IHwTE0)m-)1}f$!Rr})RY<__2Kl8L%egq zGc5|oF4wuWglJN7cL3#~;_gDj2!UX$Gu?0lN7*y))2_pF5H3v-VXIfwdsrJi43|Jr z7SMxb3ncL)C{{L)OA)3Xp0?Ni{rT_()*x zH`r9au^5ke?aXEd_MTLjV{GI@BTk;Kd+gaJcupD@m*-8MK3Zl@rE`MYG3A04nnj*M z5M&t*CPnJfMq=FZT|sSXOxC632DFG*Y^oY!07snbgcW7->hd%|KwbcD#X?PK9vakb+0d| z8GDdU9*s4ID2j=ja@gkroFHu)&mE(scmJ7&F?BX!iwkcVJ2A4wj34>nMTU8Ue`U(R zN2wy$5(m>skX33(E3;wha!p2qjHN8u#|^OyAo5PAd-YPw}{wrouV;INCo)`?KM~SOh*9Aoqz+`=oWi=P9acCu3X$QfEQ` z`t<~(dyLfO`7?S@6w!(9p=4P#tov8qB!qpkhcRSfzymSc%JPCXDQ*c>rjbhAo;5(L z+dQvpf56SF8SsD2yQ%O|vBUIF)Y#8zH8S7Wpj4ji^3_|r)HYb4Nb+;&ny58hxRq^0 zFmlw9oOC_VX4jaSherv_w(WVqq8=6c2#2=ec@;o(^!brO`%|`9r1x)2DIn6MJwAEy zN$NmHlqlERQUjzlVP=og$5BrytU6&mw#+C?5$U2jLH8a=S&M3XSs(*ZA$x7fplLJFo zosM+BQ2%@1=5sOwS;-#f`Z6Uz@f>aD$uMCQdHhSlPhE)*2}qi2SifUK-hV+s;rf;a zI_hIlr(!jlL+N=#{oQcYByrHijM#k=r}xqi)Ln`9^r2Bx zH56N(CWrBuqU@lpAVV3Nu|ygLE6G%4np7UBbGqa^Kf;JY5m1K8Mi$kC`h_UX7Vvk- zj?9jwZ{l9Hy`o+E;C5|7aggytCE`yr!8>{T6mdg-WQPT3he65v^JQ&wU>OG*xh+}4 zyZQs=R+MO-pKspyzZOBiS}eFlh)$xx2yH$_YpCifKIp0e^oCqHcRxAWlh2 z-hr^Yx*T(Uzox@=CQ-;I?SNj&9GBCHOcRW;J>3xkSr@nq3^L)Tf14eG5&XF$l3$HR zPhxEfE#!a=R7llPb3CYHoTO-hER)1~Hmcp3^h8GeS`_KXgN1D@FB>@^ojMla9_j}=u z^aF+@N)R={xZxB{j$~{InbDlj$^CVS#m#5GPmB9xf<2`pVkgIl7ZrUBUTa;mf--Uu~G#V3x0fpO$1XU+Q7;Inw@OBP^iGt@0xh**ne(4N+b%xKxH7}u5H|b$d6mnM_>^#6l&U5kMqF#-Qw3FfshM$9L6>j z-;2JQiwFxPw|;N(e$D*72@)b{9A7rtG>FJLX!K#l*cZ3+Z+#cR8aA395+xE13KAw= zt;%dY^H(R6ND~?9x^?8pcScykoKJz4ir9xStnvm#qSnPv|n#- zIUrJvN9`rRKa{*R-Afk{?J5eC`^4`}9QV~3x0fPz8O(=3d-X7;!(O94~ ziYvY^3a{L=a_{g6Bx^9{o{Pydn#V&X-qP1A67LQX{W*J}j|a2u%1h$IJkr}`MmlzoT4aI88+>P3ngp6%-PDQ}b9=pecI)e5 z)hBB6cNM*=^{?7rq>kJz-M`oW1<9p;T?l%Z`)fF-)!v`;YbKx;nuoSucz?VEr*&8z zEgg2r;TUl619lpAB>$vFhc~|D*3bBaLN{;jVFe*-Qr{kEnK+TjOI#eQ5Nd|CziHyN zbnLqc$fC~mnf}Aoj^VNoMZ0I4MUzg6@mA2imlq5B;Ip4e5F1rd`m!%0$stJS`lIqH zt3jQyMVOvr-b+Uu$9Z9VK2)~xUCb9x=M z^|_X&C~$MkDobbV;a|%x$Pw8Q;xayy7|mfV>`md1Z$9v2&LdeA!t%ABfvV4-W`v55 zIVQBkTqMyPK7mAE+Ug?9|AiC3sIelnB0bEIZ;e)krEz@Nn;zfcgoZ)jJC}#{%qzy7 zqA9PSDdi&p*a2`xAVuR8ng1jD{~t~Os;+K2_FCndO3IE-?N+xHcEV4 zyj&VwygCmt%)N(|9hHH(^=|ATql`EGCP zNZ->WH6F7@h9>>nX2S8|HAX?zJ?%wDoD9t#*_C#pXD&_Y?NPUWsk#N!>UxK#|Ao(Y z|E$yt2T|1S{glIGS;WHUaX}A1x9h=`Nw2oH!h!V+x7zx;)QVmIADh8^pLgvM;1kMY zY^$mucce3gHm%;t%X#4eFuCoo6P#ULaBAze7+Fe5a^G=g?|L1g)OsIW6>TKBNodV% z*@dK+a z(MrSofIx#Yu)LG|G+%bD%j#nCdqx)16Yv1$ky0G#OZ)gjse&E$+uey>svNd6<-i{^ z1O%>bqgzQYq~1$Uz8gig8PESUof3!j;324drAi~6=2_5^b?jzWv`XPY#AWv4tioid zMjf3fAhkj&d+0H3;>8iRBybgDa&XF6nuC+}ddbl!C+-1{)p>1+r4AJ02nEZ~PES7kp+obM=)KeYw8 z$Qjwph>Oj-3~KAU5?lD;jfPOWrLp8mfdu@i(VU*l=&DG0~O=cc~mz3(b17jv+sI^=rMQD z9;9y?qK)s$QCg}68V&ONU1FCDeS4>hc+U$yrfX(e6m-w(5WjAu)iBuJ+U07F$`BbT zpnbWWQQL0K?F1^$y2byf?FWLoTVe0%Z zn%+Dd>i>HmA6v33A(f@oB)d__z9dH0882ne*mp)T z_H~SH#&~|m`}6x5 zkIXQL{p0nlAyLtluq@N)9GdwzQ17T2a}17P_=2AB@NhMva>T;Q&%3$70FS3BPNKK=@Gp<7Qms;W}5n=q+h|y>4d&`d|a<*rmC_>`o zg@0jyoKs)wCZLxl&{qWS-!FoLGdBT(N_Ud$OKnRs-?{)5D@@*T`&fw(OZ@;5@27Pj zm_BSCIjO+Xy|K&UEcxw=K5dmhTS5xY8(v_|YyE`dN!#!Dryybnm&>v|WPLfslUS=mM zcx!5?{n012(j(ek_^AzH>msGi1G2#Bn@`@Y$EOl#p3ol4A~o@`nxJ#nIZpD&#do#O zz?yZJ@J`(w>BWz3x3v_HIQFTDMr#1OJMauHH~WOR1?Td`!)_XZ-(!_%@+>;U4jISGhz8bh@x&lwL%P14d!?1Ks-jH?kGPUK5x%w{4X70pw{#%||N?pb3b0 zBD;5Vlndog3tv-%K1-5L9M~^~(Y(9BglXlQ<|%Xd&ObtT%C#C7rk#+iWK_x_Jc+qF z`2K^o8{F48Jk4@PgEeU#9%vqX;U)~A5S6Bh*CgssvqSP63SVob3C!3~==tYKqL2F# z^iv`00XGAaY=s7n3?vs)J0HzVxW7xIFHe1saA<9IB3++0-?zE_aP0t~6)Jr=vC~x# zEPo%0c9~@C>T8zu@RyhvoKJk({mIKb2{^-T>5XEh5AD{&V?<%8yjFf$qJO z>w{Fp70D!P+n)22rWMHfZS z#Tcacm$dI+14Gi^3C$`{pP1Z#Bz6ASWFE-`%6EDBt(6HD`Qnbig!+JjR~WAlGmM^+ zv%k~&jdtagrZcV;eerrY!w!x-Vo|HjL42SoKwX*b=k$#!;%jjs%c=}ow*p;e&A}Fo7wt-H-IFCOht*JfYIieeJwuN_Cqhf%U@@4Kp{B&9!-&=AYbVkqxw5!VlOj)k7sC4QL)FJJH^9 z_YmW9osO|FmM=D7F;%%RxZL1EC=ucxsjPhJ5;-u$EY9_IsJbWur4{qbfDtYNa|_;g z$^gsl(Q`n#-nsoHNz`3D%BKDA_QzHJvYv;BX5x!F`$@4UF}*7}Om4AnlcnQHw%o!F z=B*p;_!ZMj`)QeZK{ge2r%K$fLaSR4!TvA4)xa__pN_`hxx5d9IuAbDm0VF|xqAiS zH68QojMj>%LD5kxritgi zxct3+9-p%Bbn5iEJxZYWdB14{FnPUn>C#p7%gv$;j_({hOvLVDpf>pG8hUJ*aJ=^2 zUNyKF!sa@3^J^;1U~)>1w#X=8B8O?{I^zqgT$fS7rhUb?m9$_e7onsod|J?5p5Ybq ziA{zX%{*jL=UJC-tB&=rm`;sya2yR{KyvXTxv9?AJ-LlaRoQ$I&_RgWfEGkXja zqiqKiqnwW|YGi5zM-Ko)rCBcV{7YF~JXg5zdyhN;9A-ntcs4$vvXR68aYJ_Hu!vQY zQ(iWHr@c#kh^ZRG3oLRS4#h(6T{|J$2OjAB?2gGL z*tF75mOL%1vh1!8svlPv-;hEyZ*fM!(@i3bFQCXq#2uMtP{;(LIbtSQB$17w(+Afj ziYn;#uL#?@BOW3Ah6+B@?e9!&6M@gbrX~dtfDf5G?|mwR(fY6nWJ=Uh{K8OxL;8}v zpJ13gf=UA72qg5L2Qrx)*4+G)=ji3cx3R;(dNSTq-OZHbIB~)9H zhCYQcF(GHOuBe+gk9EpuUQl36%pC;!qw{Q9-@Kdt9;+O;^UgLw^5S+9W9uL}>i+er zasA_!OKaD&d|p$tD_4o~-(T@5YA899aoNI$&MCssQg6NG>(qjHM=y!|9t(bW)tCs~!>nR=%q}q*IUBTT48)ZNepEoX#_~vY5 z_sQ~!S_yHhFI{V2P{kqd>Q^%$Ms8$yC6{rf!*pxUV-D$-N-;HU#_1bOxvn#1Ked2Ggqo~kDf9FK~_QXyk^6UYr;56p8M?-Z#nC6M0nid&VW;8mT< zKNXI9yR>a>4Y6RgvMfudRj1ni>QI#19_i$-FrD~IW*ormt_pUHZHXVdhFzNm>cZ{^ zRYl&VeSTGo=TTGwp?bN(8<4{s6Z*Hd{EtGg^%K60YOZ~%n5!DItJ45}eRXac@Tvv&JjD7Fof6EWq zW736_wi$6`HV0zZ4aFfU3x!#&roND(eB_%??X>mnv<~L!oOf?k8)~m$jOe%Sm?}l# zE;@>+-ZoX3f9v==>b`a??KNAu5HUf{f>4>aOvrBiC3vchZE>ni9}#HG1RbO>qi}8YMvY=AtBWyVjS9| zzdRkd;-Ha{@?=kZ>Uwg|*HcTKMUS2|wC`)pyuy(wxfh_*6cZYfT!jpF$Pgh_jYt={ z-(6?Yz;)|=Z?RC5zn)*=QrAYu6DbKqW)1^&@FKWk11@^&G4~OY-iv&y>;;V9jj?dU zLn=3cL!TNTP@(;EB&^*EW6)~481lUyC?bOtYZ{@8BU%`c@;kN_x+H~1N;n1W^%xYO zuS^~W96k)5mU^lU#XD1tVB1c`BL-Sq$FzqR#g1>neSb^?S2JTZC`hJATsU_6>7lS( zvgo<#vvMq}mimp=+qYx^mE7H5k1fbEHNnm?LABp3aLysVy&Mn9Ou8>Io!6 z-febD&L$%`g-QEjK>D`P&(@Z)Aa}gBJHk(&*7I$&;R;4Nk?F&C3gqus z7_hmuk*Rxe*WasbT&v=}Qo{AZ)m&BFDZ;nsX`}r#z?P2n8$117)g${xgm+Tx(B&iU{`OlTjpjHOATT9+yKU z8X1yVk$AaCJgFYxlYAzrH@tz5LedVlrT)3rE)5HqDj%zZ5JSR@>~o%wX?lL9_^ zEf7--l1bDt$lgMhpX{Z_?xwV7pW=pZHoDjUN{m0cUr$~BF<DSS|9ki)GJot*o9AasI{FZgsVvktX;2AMD6^5VKmxg0dPm4oVZTxq8|h`fW4mIH zciEDWp~c|~t&zcO?)Ov|ebj}jhEs!ReQ9S@ba43VvAr?WzqbofPi5u4#v0_#s3Tx7 zSVziSj?qQ-g}WyfBGdb-k6NdQD3jxlb5iMlnzzhj7y`_)ezJze5@f5hChk&lCiGjs zMeEf%JAifWo7iH}R)%=C*}?3@DoQ$s;hdAY84 z6`y`dv}k{CLi8oReDI@M3+21XhvWSM(^33%=|fscqH@q?h;8QaL6lK#6SLen_}}Y< zYsOO!?WmLbLk7=(M15@0R)dE*qbRefw0+c%K`mF8)sV*VB=j2X^ZS2NbyP@Ow+q;`c+_ZF0wWMB(E9si;J2$MPd& zEBNX~4wh>r3SM;>5EbSSsdZ)w0BX zDgu2gaD8Dhy+BTVP6#&{S|weD96FI}Q7(;7y+ZGJDX5o%gS!3Yx#z=LM^3*zM{ z&u!(~18YerXo^c9o8|lb)MF#;1ec~iq=>wIqkg&$B*+jIpDUUuAY;l^=Ns{fnPf<1_JBM2`3;qQSkZ%;;Nn=~!vBitkh^E6h_Z3Wodt#xxV472 zl!O3nn*l2hA`2EyZTUQ}-;mxN_KOu>Ync5xJ@+*9%78%4+C~#8Q6&xf_90%*u=yN-W&JszpH;T6cU489)8+-c)q?$Qoj07}zZg%g|9O zTe+-vh;nxdr{rw{>L(_OmtbU>T`6hh;g*gH6!q>T3#=4`rI-y$t5a|TeSYuY$^2*u z+kGy!yPDMG1Uya-IBBrwISR5|-HM(ZsXF4Rym;)hn^U{Tf-G|?+D%xLCY}CGJ{gVW zW6b)q>`GnZTS5|3`M(+?z=m353{OIJ*Pcb?Q)eb$w77+7<83Ce-oGB=gLysChNofw zWEI27l(m|n8SO&N%N%MM+L8YV=jXW#E+2!g^mxEr*MAV)O|fBK6$dFy79x)$zl559 z?w*c}%`nnt2Zm!N*ac){20t4c352c=pqWc8m_kd(dzy&h}rynDl4?JCP37q?Qmb9>99nyG#Jw~aLlz9Jalttn%zC7>*-guBFRTd`Bzjl6|E5V5pC7QGU9mSvVpo;`XYqkFH>GRQ}9 zN!_MmbQ*>9e^k?Cx#@Ee3i^TBN8UcD_*@1q?6WS zKffe+Y&@TG;t=D$2$<}UYR^k5(3ozdbzHfTX@AkHsYI_7cMxNn!LgWDHF`P_vUsuK zM*T_6sIF69S7GRt`%CX>{O+1d9t-&$q=3D7>2I?9De!1rwrWUPx%UtN^do^V@e3U| zr&Xw)R|!wZzQf7u@#|T+!7@ysEZSE*)puhP8@OLG zq}oxLuA4u-h+NVR!CJjbn# zTdfM8%;-IcWfpPrV{kavbsjcwFK8{0`y5Xa3$^BF$g8|bC#WyI2AyKLtnOeAcH8^1 z=OI@vThyBCM{ppX$5*7%0>v+yyJ_lEzK`8@bfF1K+v?c%n69;{55Q+guigciKAA~S z8l;|>N36`mO#}Ixdxc18p^v9WJ#1b8uC;Z~vlUyYAhCnEN)>jiShQ0nuA~Ij?T4U% z^*nGP^%zX*^Ag+jhFi+#!gf3=M6h1vzISxZO6jyVB{(lrF1+{V&9Fg;~{ zgjKCt$OSw3$?SKZg+}BQe3UXB35!7Zjl%yZ>w)(@WXG2kOY$5d27^bT0ExOxCr9f4 zQd@xSS!|p|=#rjA+16o@e|sGqHqJG3ni4SulbS30 zFnYe(oO)hR-U$36b-x0c#?STl_oQ44hfBImoah9N2TS!wxG(nnnaO8(ea(w>n*NSw zr|XQB)SVoku=rI2y*w@9aIo|n??Rzc^P-C%BR07>Jj?CCXw=Qdp$9azs~X_6THGc;+Uya?j&e^3#P9@vqp=M(0uswHft1=}ypJhc-7om0W>j+A#e-o(Dse48z zASWjI4I&OF1N~c->`F4K{Ucv?9 z2a^I4|AJ??cYMJ{=v1cY)vzs}Y2bI|`*`K+4BLXhJ`_%YmN^!jm6*d&n&)Im_gj=w zR<<{1bjvTQ&OR;|uF9B8*+a1f(?zUirM`gQyC12C`6dP*>%7o|l|?Y9vF(aMy83HwJ#}ax)%@WePUPa-F1ihA$&1Y&{XD$h56L8y z-O`hAs?0~a#ycDSZkctM$kp2W1-0iUs8QHBx+hMJvZfs3;K@}*CGS`Q0w5k~Yv&fY z!5ev=YhJRpF+O63Q~M0*wV8w10IWD01i~ax;x8!CuMd z>hBnVIgN$dceHsJ>cpzbg;E^MPN>*>Z>_@qtNr=Gz2oG3hnBIP=2hh8roX!KpH;Ch zNw(8b#<&;H>JlJ!n{QgD+rFrdP#NMXJ!zaLB?!`#K1sH}A=TCTY@nK3KphL<(^(K9~GDHq#6(q+jFw( zqzP7}8NzDXIm`+!?n*{{RGO{Y3qc;11*ICn%}96m@$tW{pfAaY-IhD%XxHmhaHf*< zdpX{EA1G`75D?BkMV(F zJ6z`Z50En=Iru%pxgE4qqo0K0O`mq2koP>yW88N~@@!z?f2AjzC5t`x0=}YX%9yo( zT8J|g-MFI^TzwVO@0B0>Im;;TI~Aq1YP&JL{C&gg_2TbHm1t2lUrwzQj-7xPzl_$h4~KgHY3bU@mX+KYI$MVP(oRbqiL&|atT$T z%}RN|Fc3_VMw0YlD-K=m9`6Ok4RxT!YA9)Pd?%rP;nSy z=iv+Ubirrxl+jBIBXf+?ej*$%$^%apZq~h33p~*Zdtu2jCz3ALMhW-ar_oCI-cGeW z%9-BKuyW{IuqXAk{T)Hp#PLCD12_2~gR6k&i!G4&FU2b6q2HI#gILxzuioCAQ*?~* z$Dt<+6>v%2l3HKeB3@6Rs&PSS--_;HA=CRh(5A#gAdRC|dG;5>z1ts#fz={qzBCe_ zIn`eQ_+xO*;R^O;GRX5bt?E(Ctt$!E7N%_W(V)C|$Z!#UnLXLq&JI*kzGk zT7frmftPlyL;pyU!kWqoTQNx)Az$nb8%zvalBt#(K|A<4z)SG6D(#o+9K@}N>vi3E zle>AQq*%?Jge1lTc37Unt&i2MnoR(HdAxG>_qU*rSW&(`!TqY;G?ZzmPaSxMhgt%V zN-83oVim{Kxsn_vo-S}u_WA9-#CEWwwCKB)j7&X!+B!iw^+>TLiEb1n3#L3L;7L!o9D0Sf!A!TqC2S&- zz-qj>B`!N7qN}U4nu#x`#H0DCUVDl&$Ms)|lvMz7OhO-*Av0bHAgGjo;%U9$&$KVQ zCJj$@+83oKtTe(t^@R9yIUe2HO&zH^Zt|V0%HeRyB$MQo5wruqR^0xP#@VTgpDAS; zC2FyAcPtf(4i#OwKf-kS$z@7vXJgNG-BRoCb?ui^=jLUz zjX)>TPu!xO*z<{^MJkfkBUo0hX`eIY#7t9z4u$UfZ9IrGKUn3^OYK7M>KF6p`FmN&<{&3J88u=(+h2 zk*vVeFRHZM&9~+jyG`y0OsY}M68uAExLQr(U1E%F8FK!CG2CkSMq@*?XOS~0q{k3J zCAdYlE%X!vP6ulXyLu)*7ap<%*?tPeIv^{)7BOu+{-%&l<5p+IzVdixD#8;$iX~3P zy-`-J+}>D&-?`|cm?G)Ttu<}y1U_b(N7^$<8Xq9H_?f#Y+l zauf}_)pp|ZXHKbIk-&fXkfh?o{|kQEXTfiwF+f83rjGq&Wk-yWY;Kw&l5Z zJmrj$qI3$jav<=Ie57%>(oZEXrFjL|`4us)Boi7`iM~)l^F{f1J^Caynq_ZA9oK{r z+BR46$vH)7&a83sYSkzVbM}`iuiia%Wg*Am&&Da@)4my+r9BT{M(fNYf?XcR%4X1O zG0b$FD+T>=HM<^#*cjV0=ZcD-6YH_cqIR3aY7*W(@1pa&&JixWc;PtKIT>D)r42}L z*@U?V!o;lYZ0^>Vtb`ZK=%$&@+3AX6{kdLCQpJ%bK~|O`s`E8g^|-;TzL8 z5$8tnmst;%zT%#tfFFOr`+;BMaM=Z$K;S>xoSs_-jW=8XYGV>^D5CDbNV*wGIC7o23Bkge~ci zLwvtjaUoIpb2V@1BWdHmDaV10;<36i$#QN;$OHKCW>CAM_Y%uv`NE7 zEHm?Hx;lA^{dMnRX(Y=abN%)(qxefQB&eAK!m(JJB00G<`f`F?OKo;YTx}gYFsDiG z?%eU!`X)cVzf=z(Xv$8}BKnuz5G9#u-%l9oTYL_j9$@A%b+9k*c zLAgP2puREf+C=X6Mc1K+tyR9ywj?je9ofyy*rCI`URXzpi&9%XtXpDynIPZHufS() zE9}*0zrT>Huc#tRr{-CEadtr$owGBswcLNk)Ml_e|K&rSz`}Rr(mI+4xf_i5J)Z^W z#yOk}93qa#g?jJjP{krDD+5B1C2P>7m!8eEa`Y{0E*YVpFKiW^+)S8L3l-Rr1)p#T z0Au<%YiGWy{~^3@XF*Ph9O$ZYvwDCQgs6}`xkyt=Cqz$OjW^$qXK15Ct8LKg(HK+d zQd`Ep`N3rx?Jh4`_qkXqN2;XLy4IGD9p1h)uAQk)ig4ztmEw?pMkn*)94X(*kqg>X z?S*NlV~_I2(z#v4;qkMD2^B6$U2M-q6slLeuON|PBsV+5G?d-`acr{LcOnQOX3j3j&53et0YEfeLK;<6zk5FvmxpWBl4ae)R&3Mz{6lL}<| zS}M%=R~q4FIwM6rxjnsG!XdVji)`C8K)MDd@+4cv@Ex|xn=*mK2Sch^pb5;%J8n;u}fVTs)3{p z3w2&AMgdltx2zHJ4sz&@;ExZUyOz(btO9B^6v(mCVH{P;pf>N3v(Thr1I(_=DjVcr zy{Zs7mnbRl={DgoiBxlh+$#I#&@g^&`2cvZb1K#Sf8K!{Xw>(Ah~>lG0iP$BCz0Gh zV$!a)s8ZEK$${cH?yC&fX~k8(zY%tnnz!cqTZ3f@Vk1gB-_V|Z8{6x&kKK*T?UgPo zc>I$=la!h$@}OFY##Kh>5orFd>O@B5&0lWca#7MO1`_Gg2Ga1)JlYHbvWeVz;KW(x zpwvrW)5g^*?d|O&zzm7c??6If-Lbj#scUjwMG{jAIzcv+L>N6h>B>fX%;WN>*pXL| z>xpB)|5yghlDj)@iI;Y^Y_9}$GB(W!S9I}t4d13XJ$+YsNw%haC=4uN4q|T{RmbZs z#Dcjwy{sG_tIQ2nNBW%Yd{H zt(4V4LGph6^D2M_Kl~-9xh9@1UZu&Aq%e`_ueNoP9;^`-D)`1_I?;g~Rgd1acd5~R zGt^G&GC=j!Aln`aN+Io8Gzt8WV7V#a(cC+0I&%W(%YPSDK%YNO*sNnFqg@Cx@`X7} zr+43j8rW1niN0a~bWeNZ90u{L9e?5IRx7=}ECftWXo)Ri1uqI2lBtgu<2JhxG2EL<5S}g(TI(H&^eS?7 zLIp2}M|CDK;;ETG;m;_ZET}2x=`o~(-E0DT*y@AFKkuDJ9z-E)>D8$_*H@|2J6vOF zPt0@Nj9{kQI?px7q7|6z887}SV`P?;n@^bv*0v6O)HTVH76cn%rKWm8UdbjQGg1EG z17H~o&CyV^Mb-RWpip|fa_$+J7opKAk!mGLZHQ?^eKSQ+!{E*5Z$YdxR88m`;xc*3 zUns5h0lg0*aa|@7NF;=QyO;-5yzn6NtTNQ)WxHPHciwjO@D z70tQ=xOXf6&r>j24Z`XDpULt%i{_ikliD0eLhcK>iqOK%>`4JnUh$c9ZG--Q0fbMf zk%ryFRO)R0QiwrZ(AGFD;b6i>pWAI2E(0PR`q%lH%4tRCX@3)2Y3RH-bTGlBJQlj+ zGu=DszKR?nacGaVSQ0=O;TjO321&Q%Bv_iDq?J^AZtvQ z(Na2ck9QM3TYk+5R66MG3!(q-!2SYl)4pHfS5FPB(-hl9f;MzP@HM0D6J3E%VNEG8 z?-xNpFKX;P)Y)e096zB8PoQxs%SJw3^ly%HmK)9HNTqH0sxJYp;5yQmF()&iz7_Y- zb7(ys@Md}_vLb$*yJi4%T?dtQVNt7maLc{UME(Xv#`% z+1a(7JVgd{%yqU>1ku06ha#GYB`5huF(lGY6p&T1d^At}2gv*T=-x4)Q)JruKJ*AW zcW797s2GIijTqLtk2A2lQuP~0yp`JL`v}3_Kyg50gdDQAJk;PM0ARvGInn=@6+Tc? zIYc8uOPQZ1iJ`SQT}M0ekpDbeZZ{H>QKJ8Thz><)4raJAF!n1d%Lw#IqE6^0waZT6 zTo+Bj@uULTF7Vtf8JREg6xqgZ;jSTiU54uywLzbI7xh5aSc-WI{g}H{!BV79&G{2P z4LlP|ioPx5MPUKv)6mcz`4pzGCh3Y6(TslY9YQiP#$d9L_U@>-myQ)>)m2hk6u0*9 zToB7%Nfq)93oMUn0-0ASV7Qus#qP^!1;Fh!E`=VKWSYUeajPGDe>-#^b$k-7HxQCa zZn1cT=0iv_gwEaiS4V|iPq9?y~Xt}J(Q%iAmxPNim2P7V%F#Ha6` zcc+-VlC!jcAhqE!_JFIkRs6VKYMJx!M*WZ z1%@uWiB|k?0r=rw;1$NDdTf0#b*q(}G)XM17u!%;S-aWKKA<&X)Zu!QFXKl`p$9=D z!9=#O8}5&>3~;&F+qH?&><_w*TS#X6&IKME$i|=*-)X51ws+}S7kQ{le_hLj)hb({ z_Z3K;Lw&7}0yptJr~%bOB+Ur|`4-pBgaNDuwj=DIOMaAhErx<3N(1^!@87(%6LEMH zAzL_9rDe>vbVun}-n;G9ba=BW_{2UDD1y32AiipzvQzbjnL(a;ux5nq%%BZB{tPE1 zGfb=j&*n}_#;$p`U6SW6E{WsQVc?;Cs`3bn|848Xb)-VPBhWS$x^?dY2ZI*#OZVr~ zRnZR^YE)>>b3ftrQNxof76%Q8q5WoRcNpqK-B@Z0_2vb%Qm{dlNK zrk-M@aiRq8-|kKn_vH0P$0nYX&aUM=8F1*wt=JYREewsuV0^s*^!ZH|1I)>CItpl# z`SZMUf|=;h__jh7)(?nI6s2KDIh*y=znHpZ} zz&e5$f&3G^0J&Qak--VC!pp+$QTo^F*A`-1&>7g7y-){V7bsDgynUHo0(>$nc!E-+ zyrsYTKg+Y{8IkjYy7OUL$veubWnuZP+z{4g6m(ZC4uqlIanLbP5T*Ih8xx0RiL^g| zuMu=V{JDUD#2xHz;diFIyBx=cpn^on3lkdEyflj|8R4|$0_PiD2+zLi0-@C*6S@Gs zq7m}TIvzhcrIe}nXuuUnsfx9=0)i5c-06riuV z$Ds{SBferDnt|;)r}v9@;ooGITlV5nC!Cu(9Fqs%js5udn>wCrOpd6U#GcZeRMDL< zYrjYI>8d$ko+h(dy{5P}`;+$EL=r+_luo01;#$tPN zJ>Zmm9?*wrjZ(oIVJ__pCfJvPFSs8ZL=PLwU2U2;kUmhPyhHN=Yn=@H#>lq(LvFTe zR6i7a>IDt8PllfGoi%0npQ&_xfybstQOxd|i<8p~2GetuX85ILm~`So;22 z-`Fw+nmDn`B#bzHW^G{Jr^rG4g`Fl%-g3jgk(Ai9i-IgBf>2v1EDonv%{Ycf#BxLw&#p0vTeiMBfRt&k>4)O=!S;j`c{FKYeq|a14X{>Wa^;Y zJVe6Gd5?7`-Rv>Ywj=B1+o6sbZm8C%N}l)U4$XDBF7&y$9uxuV6^9dBT3k0ssc=O~<~B_r>%l_y+8g~=T+dP> z=%kYe!Ky&T9M)$pQkU=JvHdPXv>-rGcX!)0l_UC2ls}_o{$r=LeMgmCqx9&!pchJ+ zw+2clUP@ox=DwMk+G0BAcr}9aC(G~KN#`Qc>Om)mQJy~o)90n~miN9g#h+)TF(rXa z9cPJHQpf0nWlJQwYT^Uj*UKU(e;z3|So!*w*m)!KQ??j@-r`>>ly}P?=@ig-IjV3_mcj)vDKRoz%+n5Ne04$48jrs6=NJ_LDkdNB;-! zDG~@KQKNF}ymQK|D~`7QuI$A@GxF>ews2j$QRLs_o!x+{NVj-j`jzI*jo4QG>(vq>xMdfA;3-9oTt&rE(mYuoB(V{BBZCmknALbo>{rn1pY))mQ zf=kL(5b$A+c4s?8=)oUb>q+aWsqlM^qnGxP%=zHH1fI%lC*AR2cU`03+y8xMYGo=U+my$e||#JXfv?(Y|x&T%+)4O)E`PWj<9#49O5p` znxiR#w)Pt6x{sD8bhc}K>~`ZsG|f*f*Ebz{YgU};;*~Z{$dOH8r4f?e$#c*;8Wl#| zYFqi^4U9v30gOR5D5RaEMZcAxy2XS`r8@LIoA`NeeW)9!nFe-$&%xRIhV?tpq%c9v z1jLPe6aa8(1GX^8|FAV5>g=Zf+>2dv>80NE#IL(W7cSl6^rM@{om1dsrMUxgzR`L? zJ20v+~M5CO`*I-1sn_4rHd*nedEko@GJB)KP4&^?O<#GsCs(#pF* zUO?Klo?sz_a;eLv)a{1y?ELeFuauJHH!lp;C7y86cGgjFpfm$hIj-Y+FEY@v)Zk*7 zmbU${W~b;-bYr;prh{ACiLWPs{?9pQ@-^m`g5LmoNl9@Zyn&H`&x|Xa4$$=){TQxG z>pd>`5|_QCq|EVy0ysR{w)z=x4x?_M$_H&R z;{tXX|HOu zVdyb0SJO{QH(Mfd*~L68F! zhs=6k)r=5o0nfaX;V<2L2*fqLR^SmMbxnP#h&$=0w#Rh2O!>{>+{2c2Wg@@qH(jX0xnjI!rXlaf8}p z1RnG4BVSk+_Wfw?x07Eh0w51z-b0N&K8<~FAhkn@&G?ejfBpbJU6)%xJL8|_n^tvf z%BwDZX`fGBDv>W~=LPpdr-!b>cgCMG{ixD=Rli3ned7!9jasvZ@E5OpO;dfvk*5ys z&VtahzM1~wvsxyfA2I^ArMk)LfenQA4$3$_so#a4#T$1V4$KCfr4 zXq;TOsm{E=*d5KI{iS|6-iZh@-GowUrDC~j)v2bA_H-9h>oHdQu5V~MO1$Nxw8?k< zQOzf@Sy}6D+`L&=GH7jc7pcO_icNS&6f7GvA-{t^&5cOk1m3*F(!oLn{i$B|FW+Hu zyoc3hvNp89w#&u&q7W?t{RD#bEA%f(z0$Z+{R8`L-@y<1;e1}1Or=k)UN**iTRi9v z{|1Aisb*TY;rH=^5hwf(C;fLqn$PNqY_GFEN)#F4inwbDKCCby1aeZ)Rl4}{WdcL=Kr3V$2`L8W5ZbTP@862+J4Ao%k(ce#xfhcQdH7g zy%Z*ib)5*%2{i#c<+6(lH;g=|&vE`A;fD2%47c@wnAlb1BtaH(@3i(vpG(m|*3)CE z!02=!_MvL(7u$4`kGUpWF_bd7p{hDf1s??UR8#_1 zC_gT(K8EwHeUyIPc!`#=7lqznO`!z#HlhzHR`hesNe?y#r3rCekT6Wrl#9@#&2l5g zdtLcRe!&_O@&RH?(EG8?#0H_`*nfGT>fzHRgH-n-c(W5SegutqYzBpT0WNR+pT**pRR*5fF9xD>J%mY{Xmlt%AL2?x`pbw1sypE%^_rH2kx( zT2xF_cVrP}tWUswovreXc)N~_SUHE_p5I51XC2;VI~Y#?Kce0{p6WOJAGd{2WJE?~ z%gB~pvRBrzDP-?Gj$>3x*-3~)LiTp-L&?nEhvS@Vj$?0*^L=@LKA+#Ohd=uJ+^_4t zuj_d|hyL36qYCNY<|=j`%54w$psQ2JJWV0ggtw zr;QM2T;x6ao3U9soT8_*{#(mqZ1KD&huhmXbf9Gi~oqcP~6*d8zNhuj7e5) zAusL!qH+1-QQbV(1C)(oA&?V>U*pANzPT}CkfdMR18Hki@)Hy-mOckk3DLT+_}-?uwy+mIv|fkf{BBqTSK$ z45R~*{5?+6%af2%hhHx^&H;%ZZ@62`_;QF!?%z*p1UF*Zoq3t3q$;mQ5t|3Jyx)zW zG8d(fHZSIRJgSFx2C1T)z2L?PTaOcS^#v0>qo1GAqUw+THcg&&x;A-&KU@|v7D`98 zWm5KX^qZI5(?MOByOd7%u4Vep(tyed^7*yA0u}?QH!prET`g~I&_b4*()lbuC_DH>;vCAb#KhOvroLm5QHWf zEN%hCifn^r@zE!3IX@9a#k{zjUDpC!Jce=T=zLlO5?7twf_JxrKbgkohmQDh z{v}nQr|pp@0*1H4SL{;W>FgtZ=ijTYndHRnWhIUBL}!X3;XHs{fd&gFvj;RWQ1H~2 zsu%79Z*6X?tIp7iOn2p%HAp(xV5&SZ4s?jU%0s_mH;RN_?){oVq^?bWsUPEDd-s)F zPlUXzLS%wpve4@rw65!+lGi{8q!b&5a_*}gWTLoawhmowTG7T|;P-goBkKGSCmdmnFWDpB^dD~q~aV5qDB6QmMRL?Mgve47tLLH1?(uwh*^&Lrf+0ZS z%Pk^UsIx}Q8KGbAqw|#iE@QU~pu^&a!E~S_3uHrJ5p>qgql#i{`gm+ABCGei!+zlz zVJb|;a@=VI9*f!BK6b^{s(w&9jIGVO^q;d_4xeputwdM2kIYaz`ws#OXV_-T_B&V&>R}h ztodCRq&QKBTC8qa2r}H8n%cH*Espqt6~GsVl)U%ccRaJ1@RbGB|$%V?<->)U1JaW(QgcT(Z2SxmkldUbn z#HY&3GT!NSLOto2F87jKXTU`=E;1kmRnlSp}GNYQv-Mn$+f*Is2>+k&;FVf`)WoYhu7Ddpq$B< zdM^8xXN76CMH>Ox%Vo`D_iS|}m^S0KxH-3Y>mpC}-*l=38R*2NLOK(t{sPvG{**pclVzsQ7cDP3V#LF7~ zf80MJgUpzkCRS z=Rc@gdEo)rTe`sj!^`R*(bZ76ETCHloD{9?>lZM4zribu7rkHfQB>t9XdxCq)eT5; z@ncJwkLdqa@AcjiW$6&k&e(YHX!mWo7Kt7ajxxdZ6`4Wj7Xo!%RZGeS9rlAa zlu1BIrcI9tmUdHGXd!e;nXvtA1U$oXlt0qyiun>94w|zmK6_OW6+Us~*3zgb6P*Se zVUU#CTc%%4-~Hj`c18{td`Jj+1tR9KVC(ulo4_El5+JqLa?6bWP_TQP=G|1_qun13 zfQ1hmEjkWU%VZ-1d)hu}Kjxgupi(eZ!T<0Cnrurtak8I6duUbX%Z|198mwbizdtg3 z+;&+}uU#y9EJ*2{KV2L;5v)Y&_H^VtM~c8CqL_4yu%y!+jh^A3ePqos*^R*EnKh%l|-g^5ul{w zbrHui5@$~b=jLXG>>WP(fZWCRFs7r`MYAvNTQ`1=OK8?F9u@#EFl83Q8zIhk{kUyv$2%FalY(sYZR~< ze_lEKkp?%eqSO3BAovmgd{ptfLt;orSb$U*njxst*2Y{9_tU91EK7IdNZKqu zhpAApycu?OaI(O=<~SDO*&+pRO}uOctn1dobenR9P0HXJ@P(7i!ZD9+%rwjiq`qwcRxyN9+V5{ad5i7>?1&HkVJfXy>% zx~9ZeDLqn6mqg~okD_s+ODC6y=nnIw>YJtguOj}XUo2emk|WJ@fh%|Q<)(_h{GEG~ zB&heX(ND&uDkC!F5uV3K0KFtZ-6GHRXp*vUvp#TC>yt9b4esqkBxqwSeiY0oT|P3= z5VB$dWth>^Jl-05p-h8)`V(uM%%Fiab#@$oWru~fA^o*MS^KTwrIw2YlgbOsaz8mw zpaVsUf`2p5ZWP z=Z_n#vqPtPmNZy{#KHug0LJ?1+TMNcY4WDF{?AX58K0+1%K9tH%C|j`0VvsL>MNi5 zD~~^R-%K|w|CsPk9Jl}5%4>lgJbni>t~fAS@ik7d^k2YRBdC!f-0y?UZ>4PBy!|Oy z!}`%LrTR&Z;HTMSrEc$!m`0x8sg9 zrT4IA7t|0=Y)G*AyX1Xnwjp~b(XL**bbxZ^&irJTKpv5O?Ze0%gDR)3SWj?Hj#41F zFe55{P!MWm-E_q(klGOOJD2S}7nwKD7Wttq2bNzM{Yxi6Q1yVse%A zTU%e53K?n_*mbM7*1~JPsDq8DLShP@;WD3GcXJP&m0FJXc++7@`z^!sZ_5%uRp0r3 z3PRjKj;R{S6{mUm8JHvN5r+7+BSsd1IRivBI) z~88`Dy^JQO`I*ciZYmGRvl9w=M&fODmTzyMgv zkZmVXol#?f43_x=MThr3h!mp5{Q@Ca6!BzACDakr@!$W0 zN=Uy>UxfS9J7@6Xzg3x{q>6JR3xDipgg+PLiW*7zzLqy>D#+*279TPB^+}m(V9Tw- zwA6;|?<%HE)I#NY7!WDRQ@2-p`4!>mI*29)OZV^c_Vlj;Vf%odI<&0gXHIlg<^y(% zru=A^ED&CYz4Q%{C=cyvZo6nm@AuX(mDfk8Hdl_tN{VYf%^9b**ti>U-Zf)z#&ra> z(N*G$Ik&Od9~D|vQLN#85>#10f5{ZF=IqF5Sf21oR)3tbyn3M3d*fCqrdscNUi-h4 zZ{q@X&&<3Y#|ARWgh@;ur&(i*TQmZbWpi;0-NYx`vgfGMy&16;2PY4>!=!P@-tEx7oGi`0(OI_7&vCq3@U;>sjs|y@Qxw zXK_V$$mV{tMa9-LK;!{yx!|-nlhRuvw(c)J?`!|0;GXrKN%qiyQnhQH>E%?tA}dl`|Et-Cgv%Ns5S|(8N>w|27w zxzw8$qlj|ESPd2~;Qel>+%*9AVj7(Ka=kvlaY4IT(+vq5Lm@xr@Gw}=;hV?|hY=j9 zVvT)`$kNVh7#`H(V&&H-61US!``HlULYJ%!{a67;`RUiEJ|4lq@R+JUvbx(xpX>`B z^Z?6-X@H)F%0OL-z%_9QohiU+^xRVj3dnR&3a?Z0Sjl$`V-v1DD@7kwK><`6HT)zW zaI`tJM5h@>ebw)DpxFOxLQqC%{^lK#DNkbL=o%?nWKqSEyz7Px&f%AYrrqgN{iyup z3}y8YhK}8K9nrc-ts)z@=sc3KR>x=+NJ}d}qtwx1*aF#&XWD}4-%UD=iSS5I(sjUy z3lQ9vyWdxyiW5AHuZW}}CrN&|PBo`ACpSNJ@|q*PhKHi=FjR-(yX{L2?n7w$N4j&- zzr&GZGp_LpgS4o&90TF&@>IM#V>M{7&bk;~nu}#M1@sjmW}ANzl|U4 zB0cn4J%l_HA211+Y>U_STyfJt>&l`!OKfJ+H(BWacgf&Bj$#&z7fv zEhKoFn1g0>9OawD$(#@LbAs3G(NXkHT#1_eY zdhMk~qx^yV@@6D&qp$KOyYZEkQ0_*}S5Bcki2yooHvBtEGFqL~s$oIG$8Npz*^yR~ zK)WL=n3R4wt&b!%7HbTzcaiAKg*F*>m}4kZeK!0k{hxEi#8{|@ko-FwZq2T_E?3zb z8mWMzl!$v}wb;FACqhnAXd(DJ>x{`6Wm3rFz`sYNEM=_%BV5|6)xTnjt|37#Fb^xt zCGTF-m`dywz%FSu@@rlZjlGi1j*INwxjuh}^C3VLw;+dd>#z6Y+={U(n(5!@xesBb zg=~IlxU_X&=O!3-I)WS(kr9C4BQyV8J9QsCglx^7*qJbxh@5!Hz4HVxR;h6n>gwmJ+d*54(H=0i9qHUM@w)`WgrlO_PTly(28UsZ;r?xWAUGy&PQ~%z5&X| zOacRF;hq|;FSCQzc;jt=dwWoCdw?rDw`=G4+hYq8(+{b`+3Ha0!YoPI5tCx+ZkYzf z^h~`!D44F@Wxq@RZ-aa9+69=lW`vJ}QRWXLr}s$14x_T_Tv~m!T_nGU)D8ZL3Zf79 zL>ub*Dc|=!WZauuC&bG(mQ4Lc!=vdK4A#6;DL(Sj-26(f@y?y1g3gQ77;Hl`uVWc~ z>MYi|^0GyDJ-~y0*X^ukaf1Wg%UMGDPo5?@x@oGAs87oy$Ml9S)DfNc1QggX&A!Ue zbWdpXMZ+X35Itx!Pmo|ZS)tt}18v9Zos5iojw5EONbh~N_S2~3-Q`da;&|UrF)QL4OfM>amvQ z3a(h`WsUNkudT2c>e(j|m}o}?z6iS<)ul9m5$qxqD`{9WPQ6Qjm=gD$vXwovtw&HJ zc-rvBqZs!d2ixv{@t*jgmb?2uo|1w`rWLgLXS2fkvfFXC4S$=bD878`BSJiuLTV4J z0m(z@>d)wOkvmPFwUjoiAIHO<8U^WLPUuG}eD@T5uTAzc2OX@FW7wQGT7HBlG~m7D zuZDWMrSKEkOT*VwqslekhLb9OAJA`mM2YlA#Srq5C{X%pjXb12JCi1_F$)L~OeHC% z(NQa%&$Kg89w&N=*HP?b_$eA?LXk%Ttm*2ow}gNMOS3#ns!V$2x2mKMzZY?4rkhSn zyi~FAq9#_)(Qcq-7}m$g6>1J;xopT`tvcSFiU_y z8gETxArYN3t~(1{x4K~~g?Em?0=ey}C)<8c{(7x&A7*B_ zYzM;C<{!|G3_;y6H@)LUrk*Z1z(2X0lweC+5Jo=9Ti^d7D$BP#z}?VqIY&KG=fzL< zD#f;m`*B(jT2Ib9^-QO0``9%(QZ6^GyRF8e`8wUdis!H3eqh9Q;x?j0XG!uw5z49Q_{Ea$S5Bmz~nhXQE4)^>Q_Gah_%gBy7!ttBEH#vFi{5U_NdkDu{pS&WVlD{YVR-IO7 z$!Wic;vJ=pXfTObK9bVn_G4g`U07cBz2Nfm4VHQ#Sb>P>VoqOTq!k%nsF8-3-mq#e zs?j~jp0@)|92O}wtwcbbPCj$%mIU~nO~r6hh<%XL%91(QhR8zT=IN@`Hi+4`od+~h_pA8~&?r1(YZ#KP&w0EdNzg}HX@WhqOGpCQ!B9dV|ee*;Ph2ydBFzAXP?ZD=@{3YbMzhKXSWwvVsM5Hf|iSbWeY#X9dO8VP|tRt4A;CW zG``HW%}B32Q4EPXXJIW?YLX_}9=8*e2+a&#?%!(EI6=NHRjl(*{5VW@~D<}-*sSN?|r0f9!0fNpKTVxaC6s& zp4>vxl`uV8jXM=sVGk3^M%Lw^Oj;Xrbl(paPM|m1@!9GO1z0dH{a=J($mzKlKJjc4(dm1sGp>{#1 zJO6ipX&`x%z!Okz&h;ELgfgA^8WF>Nq+H{kcg!^x6Lfq|yxSuEhlsX~R_af&4%8A= zx>7eeWxr%w?;ZMZwsc#twyl2nqV-@n6}0h^mCd*aF&PUyUWiHaw539Z4pYd~%%FYV z`O5cWYJwbC&(%a_Wa(~SZfU>j70~O!(6rxorG^JSLV&Ce@DPQP&I#0%=PWVgoozt% zT3ZVM%Z!+61txD>om(N&vG_FdrBMQjZ8o4kgd?)T`mr-tJ3~S@!ZnwE_P?#oh&ijN zgeeyL*q@7I9zaB{Je*g>nox-zB>W4azROfEg(F!6)UX%0A~aC zz`=*~0q}=;|E)ZLa=KQ-D}72+%LZ;PmniMKI>sA*oL14rpY_>JbRx*A7@U7T$gAj3 zCnfDCCJ^8hj7q7YfyXO;wQ~l=(?1@i3g*+u?~N4o_e-UyGcj=4ye-izj}WMw%qM6v zMb*XUgqBLb}wuV<;-Y@5qa_8vI41Dwx0ZUu|shQJWGW>KLm|!HYmTX ze!(p|%PpLr5}uvjpzk)mp<8BDtcv;x#_$&KwLMj;Gcj_pMY^E$O(k{du_YG@Ye7Q_ zhj9nnxk_6HuRQTi>WQRQh6E!8CeyOaTq4BG46x(Y!2$0t(l2rLbw?pp=&MM3gRHy& zmx`Ro0GKsyr1Gy{R++@W$zReYYM}RwvQ)b9vTgdllD>M) z-ZyuNLe|Lpo^gV-sjI~O_q11^CUc!?_GgF2_RO5hI3LP0+2vxc__f+ zBPc~pibz~2F6kW_u#~+LA?qs<4b(YA+1$H#@OSF=#x@0#wmO_V-2g#con#2fPCsU& zN#31l&%nTSeT5Ube0!@!k@yxLkP(sh$&XVbBZcVSgwxzyss6rApk$zSy?pt%>_d-` zAAqxRymb-T)@wV-AUDV3%QjX)ZoVSES-uiz=JUY8!@~3!)9DzBuN7dg(grj7S2Zu} z(v7J(tAm{g!ApU!>$++^sRD{B4nF3#VPXpP_U|9(Rzj}U+^x(NU=IugG6Y0lSS!2D zUfDY4>CxKIn|CBRm)Nkyej=}BvtVGV-|2L%5X=xj8|IvPZ+MiV0O#Flg}!Jd=8idn z*N;8?n|}_=Hl?9I>xX>`4!C&hvCZTcNoKuK@OeuB*>u>nzT6p3?rjUuk)G>yD`SKD zDlfjvs)`WXFgRL|N0pshIm=iR+cqnM5Nb7aFP)(-)fks3h|C#XVlc6?^0PU+fBY_GxfM#tQlx6vbm zi3?Aovc8JL(Nq0m0xWSLBNvC+PdV&K8k=ID?NIrSCwR&!TSSaNCq<9x{Vo14A+UcOQ$D<#@~IYwdFzp}*(wl3zaI8VRp8VvgMH^YDUL!=}ncP3QtjlCjAZ z{_5j(NTfDTK=*&?YcHc>t_Ap=QY?}{Q&T`nP?*v}BBvR9L+92qV4$o({`4UM75Stf zq4A=M!7_n1&5xU$?*M91ZaMKQ0^OA(`SVt>jFHXK@;0LTLXB}eG>zY! zcdo=sX#D^+d;y35~_J}u`@e+IX^vbjk%vX zcO?S6UNQT1(X>KmFSGC1ao3@0gdkYhGv3>wyQMnhk$pxA!Ua>NpXMm}jPkBJwB@J; zMhtDp21DHt(vilF{GF>mVR$chuN`@z#ZNE%-rG!-)?>;6wyTUwq^&=_%*+IV*J&p5 z=FA%4ImViK`s8H$tcz4)P@vg;aB{`nou04e zd}z!ex|&MLG}mp5yiIYZf9xiOhr)TRD1>PI55VSfY9Brhhgw_OWZsok)-8lc?S!r_ z?z@Ac{Iq(Px3WLpD3P;hBFJoGrx)4^-DyfIzmNu{-gI6)|C#jFrcJ+YrB1pA=D2|) zlJ#vX74Ot`>-o|pd05rps^w)8c>dZJnFsIPejW!7C<4!rH&yq45msocIgx|o>LSgv zD(+`(y$Ywdl#NKHX)N6=_qp4AOWru_a(psoiXqr~t)3MWF|Qsz!LAgDy*tjtU)nwt zru#q@=gV-#65GvzUYSb|8{OYpa%07;Igj_3`M9ntPp^Nx`hx$vubjBh-fYX7sp0B; zC=nCULWXP@CiRYQOPX8XbZLxu267YMLTz+1njSm=w;{Q%1sTEo~gXhEfW*eM;G(#p=0_6fRs0Rli`s858WKo#?a6@ zlUy$V(CWp+x;@5$SM!n1W%9-$3mJ)jEhQ(C1j20mA{$&tufZW|zt*OpN$un|{?4Bd+AAA}KC5X*||qsKeTjpdiFfsT@{%C#-LXQV>4|KWa*? z!r7kJi?K}~Lp(Z7IZQdw?Zz}OTlnt#j+u=ydpgn)S%;tFw-@>sDk7%D3&yr(0E@;5 z564t5$u;R^HU>IXwBfk5BPL?h=OHSr-$@tI3pWJU_m!?rno`GGsS7S|gyoM;B5qnT zWp3Eyhe@O_=u4QD=V<5zPHodv1olLRH;+rsUnv#ILhXc`^GatQ-&%Ja7DB59j2uJS zPj2d-y&IK8iIiB)EdHlikN;*e9nQfAfYwSSivKrDa#DF$YDpi*Y+mCgFQbfJ6PjKI z(A9US>6UR(e9e@H^(&KZQ&P^~S)WiA>SS1eqb8CcI^p-28+@I8Za>4(4}o-+3)c|@~(aHG8YRotS5*{U#@k14(675mpJ{nPFw>P+(Y8pv+ z(U-(I^F^nRWh}|TQcBq(n%a_<#o{o=PGeQ~%}t*{ZVH>wpup{Mfjzg+B+ zO|~_$eO=Nl{^AUL_PnbJ`d*&*ftcHgP|l8YM9SQG_JfCX*1>F(lgT!iQ28EAt!u}x zALHgGLc@OTFQ%WtkEWY5pNw%~iVAZk$ETEfLcR6(^?qwE=o8?l=5{vONc>gwqi*TD`j5P*EAKja z@WQh0k0g##hi9Rpuy>WqkQU#{+wXFGy4SnzKpkG3JlUVH1CyE558$|qo$Y^`om{?> zdBf5D!8R0kW2j9EuC^%ElszNkh0xKSA*-`lFfyPJ%M+t{ac$#2RRctw~3%dQvGGs0V}Z{n8h8ZrNfsD0vZUE8>hvQv0$-mG0KaOfc@(ADA#5`;OMT<4BRBojl!0Z3goMCK{M6ys@P~lO;c& zPzt>8i=|wl#C3L9ck|Ox#}mk1!GPNvwXV0gYfX+qPwOepzvsiED8ZFS!L|y^vUr-AmAB|0EmE7Tk3YTa-s9OL@m0z zi1tOsiw?@N#8U(x@aakD9c@aY9jVCsQT2k%u1wtYs(Qkm?-7LaWUu1pt?u^K{|+)_xm`vj zklG4hP)_L@D%0GrN1O)^ANgdp0%;)6mXt%w64sHFK%6j=^mwoNVbW1E6VBOpOmVO-<h1@>+edB9?8rCAy&lG~S%~G+RkcQNDC0|v`p^v<`Y@>s5<*g`GkSbf4xcGh zf7h~Q4hHvvt>3rNOqD(u#Dc8uX8t4 zeYrgBLB#XqUJ>GombRsZyd|C9Aqm~kNdCucHm7kIdB{89BLcUw$r)-w&VYlxX$p< zvz)~1(s)hH3={|aMbD-ER7Ywa5L@1bI!`4DgiyQaJbkviO80~Fu}RH%gWR6@839c5 z3lXN_FM!3MC&-)@^Qm(*48y+%#4V$i+@h63ls|Lk>Zf;f)(XZrff&msn(P!q7Jhkg z#HzVb+2MeL*m6@q@emS(FL_r7)cFLTdOmnOvKML+l63P9b1<_v_;PW`?P88f{JuE4 zF3Owm5h~e63%QF++ST=S@F`NA+NgH@eNgN9s;opW@zX?eIi@jZm2FN2`t!DGgw3=c zu4)$+l0Ce5(%#;5d4vkyiFCqyMIWf?KGfJU3|JtvgKp5CJL>gg9QZrcaV!=U9AteZ z=;7sKe0f_otf4^)^kh%dPVP&+vnC-twAyCklWU3kIw zM{Bs5~Neis_6g@%Wi=n zxzU%zdiM#^DZU$%sid1lq0-)--?X!5kuqRF-r%_Tr1>@ktMw}7p+*G7RVrB6Ds*5| zX_xpPdPEz@GA)dA3!8&gu({{GE?qO04d-*Fe(9GLAcFI&0W~_d^o1J6^o6F(^o63^ zL_jx{QJYmIODWARvznQIRjsJoO&dCf*-0XrtmbiT(~LdvtI3mNAT$zuf1N}2tTJ^- zl_dnBdV@7YsRuH*thW2o5XDpK%6$Ft&ub~webYGwh?fn`o})$&V=9$nHWv-6teAfiRXnYQ!^=`D%O_jX+lnNC{*$L`v8o;*sJ^2!QzA46Wjo0^(@UOM?L1y=_X zXWJlAbLZGUtBdUTw~Irx;LTb1K?ajB?kfADhY3Gj*%tAU%@VXN!sInlDizY#!_7mo z!!9HpVeeM$6YSM=*@MS%Tr6HDL{_g2Gu4vi$OF}dPjwnM|87pq%DtB^aAVV|W}K|X zm5C(j<@e_XOS^A1wUavN&c535j(S9|L@_}lRep0@ljwV7p7zE* ze6qWkV$!EFmkhy8-5$+rgyB$J&e2}*5z>nNE+#>cu2G_NJr|i)~SVpe%g@+&$ zu71)w>+i=3m)|q59NkjR2%PG!3x!s#A=0b9(gV*4NL#3PWr1H#y2fJmv9@4rw^(ZM zL+!clT_3uZ=x}35l*f{}++m1)Ou=w3wT|3Cy!rnei215J?*KdpaCvrJKAP*S=o+R%aNm1Ub zwb@Ydom#RdES!7z;o|c|?EqG;l2&RdBl9GHMAxVqm)ns>suKTzg8s$_GL<(m+^5Hm zDlJLrkz{=k)(QTOpwcF^cpo;G7{s`(qg?c}Q;&xv%Jg$@weJ#ax=X--)%jmjrO?ALFS zN6EQHrAdGmdzoUX1>KBfYvpo9NB2N4ZBM`d<9KMhG=JD~Izz-p!BJa@#atq*ZHZf! zgPut%F&bM5!3FQNz{4)aMy=bnATsD$-7Q1XbE@-nUAjq8UiW}kU(qkGRCiTs1Wvt+ zl(YPcU#iBw!a6_Jv~!&+LzH|{^Sq>!4>fo9@Isnzxhv%dKIxd{$tROSujqK<)f1*x zIVO(rY7kRslt?j$nVi%K(RoxgA$D-*+vW7t-CozT8{01BB1(tHcuYF*1ka!rn(E*O zQ$q_=I3yCwx?jEWYL*rDJqUh770k)5F1K{hHB*>Pn0fOQvdu1n2X2+$wx=9)QD?xL zRTkw}5GA{(1X|6^$d>dLqYlUDUk<|6e9w5Dt%pwq(cCo7DJJkm*0#f8Zh95YQimZD z(+_i;9Wu}NcXppkd^eFxfiQ7Br@#3kH^x0dxXL54$PUHr{UVn%Uh4GK^OQ0Anb_C$ z)CFVC+`N>RX1W&Kky-AcGL5H07wgYUTaL6z-WuJnOV$6=dy1$>mVcb4f#s_rHq1nq zMoAX!L)zc_d(EAG$Zu90akB7(VjV{&ZieIy`cGq}SdROY9VI0Wcv5 zd0dWO`X{)SmBn0rDr-vIwuQICea^hsklIXvi9vy|<0!`%lc4=1A8^w?sjRMImAVWM z+HcQHT!nbyDNB-ogeOvN;WRcVAp!pnaseEoJ|*qJ$#s+NWJ&C|2x-8I`$@Z@qa>6U z@dA-b8U*WfM$whNOND*jx2n#>;EOEW^jc5P#6LLHv9Ky!c%W|5rtmd+5w?Uqsp6n>j zWbcUk8JY~)HYYyJ4X1GZ@+%W#|E@-@fe7ym3)O~4xxa&?2^8;!i^>`;@kRwSmv0{N zb^U~wv|2S&R2x1HpX=(GD&QL4Xz`>!)3F{tdU7fy>(Y@;J9#uVFog?ag>56P7a(iM zr9XSPm2Jx%cI&avzsfCpSiRuCm2MttzW&_G)7n_H#JlYz+;^u{+tlPeq$PskzX$P$ za@HprnP)9cSp1xvWsp>0!||Ld>$fjOD%(#s{8~qXsC5KEqGzK#2P|@*BK{3uA`HXQb2-Dno5(l;}IsmJv<$z66jorU@c<6CODN0T8LR2u$pkpMNG;~U^5T>gsE zE9nl~*SsCWX=?v_fcn=5c6%9{B3Gbpy5UC z>3gMEksCH?_qZ>%woxG-PHVWje$A1cU2ddQ``!nt);|!ZgHY$;w8wSN8|s%zOiNWG z)_$3BoK2Wz2tR3ERqK#bU_MpjG3rU=3z77AU~41!l`?5EE&?6s@o8kHP*ysIB?Xll6XV z*g$+1YJ}{w41Z$x4^DG7^c>z#&jq+M?=NZm@PZpwHt6p21q0*W&S3m>$P_-aU$*XJ z4;NVFdt)>yCwhfz4zK%^>e;Ha1h=h1UoM-~W>zV~d?CCpm9l(+_*>_fV7bBunGvhf zAXr>NyDlq_XS{oPnaZoOFujMRD^$A7tRoATLrYHOaAi}fm{4jOZx-28@Y>?l&|;Gl z&9-e@4b1LW?@lA-wq(Z!T5%1Cu#H}Z7o_&erg1{q;0!v^4GDJOfUcX(v5FeZ2+dW57$#0 zVcAvQFHhbke`!ySGJcsOO{UIBZMF9-R^}7JH&X5tH=(0BM|63V0ka0>e4&rfK7F4+ z@eFk8Xs}owP38+%)?BS&S(yqLPM|92NrFG!26MO`GOM!cjDp$wArFr{hob(GyD8D@`k-amEN>{f$#&c zDYahGrChctTC4mU?evl8$hnKPN8sxThO^MYp=g&`eLI*YGC5eMKf#mNu?Q06*n>Vv z*!F6LV76CrCkt~k&1c=C1=5}+XTje#u=uPCIWJF7;ch3~#sFZ{`r~OTx&i$4%8kzE zyuy2`Z`~YdcmfxrBkaQCeJYYAH!71&VDGQGo{M(cI};3SzyB=H;ZQl|ke%@(Z+`bS z*KoNtKy#JQ$$A#PwD<=ZpJ*dDkA5DWpQv+Kno>dS(*a-E}(3 z%Cx;;>v`E9c9IC$O5?ct<@LJx0F+NihvO(%*ZyX^e5o=N5POMXlC@pAH0L4mSKt87 z?7xRHt$DQ@II~zX#e*XJs?y?XoPs1wMY+VGo;s=S$(HBpi&re9#2y5K=XHJ(eEt&e zx2*0`G>A|ko=f~%mY>N`mSZxAsuUWZvLc0Ydw+KRH$>uZ0lSMZZ!{*0Z^#bY;Aqt0 z{Ho!j@q^;0@Noi#c zT7O$m?YSCNLn{5&BX32? zlt`H7tcy%vSdvwIN7c1BmS=(s`VjK^-^i;7{aZx zUo`0|ql9oU-hZs8dWkCBE628FJ6YnyUS}SAma(ODJ?R7k*{C6xJv@!Oj&xQ(q~S4p zXcb@=^ldg!&2++(ZpT!qyxklpecr`|&8In}{g^8CjyC-7rKg8lSo}pd2Qz^sTs&OH zHV=2?MAMK1Sw+WFF$F3Tw%fqRvvNCWoZ^?;*oz*>8vN`b>Wo+S%;+-4!B+E1-}j2I$eb^jp#G9av~t~n4(%2`3}9UK zEMMvI^#Yl!e_3Q8K(`8EsDMMW>Z89B5BWOJ@t%a&mXt6QH^(CcX50p>Ua^qq@!G5; z5Z(M5ph8ZVD{`R1xo&0m(+=6Gxa^1bxW&gJysH8g{z*wx&X+7#4Y)Ve>(`|$5y4}q zcEI-40r;lHTW6Mn@4H15eP3Q2eQdFOos!$3+AJJ!Hn)MC=+T^*A-NpijW7v#GfBFu zhc}5i8pi5xJ<3(j=z#yq64vjy;F8CA8V2slakvM6{md3@zh>0&9hCJ~s!A)PFogEJ zyz}-UXp-Xq-i-?JI+&7%hg|-hj=(4s!8y$t%o>e2QwZP6eCxPK3#HuHkv?ngG*6I| zBX5ykgI?-2Jf;v{!a78MG^F;}tvj}EiA%WrQY4jeq6kPwPTrXq&YVih2-$kzBFB3v z+tsFA?m0h7k~~|JX~^>$UoG>*E6pQAmzG4VY~=kyyl4j#F7xTx)ZXRsfn969x2dPQ z=Hm^S&nh=V=dOz57H}ye0hcxj^Lfa0;x~Esb}t4m23E3m7UZtPO7ZZd8%l3ExGrs< zZdH0;ajq@NM6mp@&EO?qk9CDU`sTNYcbN72@_I^n3;-pwtbWWo{Gw#?zjc`X$oz=n zbB?`-*X}4ImX9Ld)V`k7k0Jb*M6Uu|h4bmiV#-Y!KgS}CE9)v^MG)gCwTo&i2}Sqs z8FFM37wr{7jQ$k6-G-k!UdZ+?h_X}av~g&{KV%A;W2kQkdVX9sCnGnNw!7_aGOgZR ziP`%MjDE1@ZCoo)WnYX`BM0W;|D)+E{F-{azv(VPrAtaAm97DTgp`1kqyoa|9L)#? zkrYIl(OuF#KqRF>xhN$^E}`GV6UBf?mg#SCnGL-?_Ii2B6P<`O!gXhCnBVM zXEBaQvnJ4MBWoXtz1OqDu9~nazP;6{_UhB<{LP=0O{?ZiK^IL&E*+@8yG`rIc@x~_ zPln4!D&eZ-KZYcoaRf5I3)C;FZdxUJI@;Z3L?cG*Ug)SEw8f-3TN<}%F6^-HpulKboXfNz~z(S z$lLqx#gdLnNi0k;18lzuU%q8CE$tCXVUM-ic^(EvRj@smudFD1(z=?``YP_%@j9@@ zV(Q?xs-1u31!7?luGDC|guf3)S#LtNP=7fwmNy~xBQ~SC05*?dVsL)%_q$7)JmLD_ zk35TRo`Uc`j$4?S(L*kFV4tv><)RT@rzToxuthG4z0AlWzF&_W6NV6<|IAId^mn|g zC4_TMx@<<4_OKVg;P3BGtw-ZjrmB1zo97`J&spCdk)iP|$Mm5;`iTCRTwuQ;9h^E? z_3*p$4e)%5EU&ZLu=nB@#0mI4vuq_H;XxxWCfuBb-oI5%Kn4UE;>`(fo*NtZc&9FE z;tx24xn#=6@8$N)8_CqPWrR2FKP^t{denL7lGRy$R+r2$aNFyfQIZOxKtl#ZXx*IjKwj(?7h3LVo%H*CeF}`R&0=mj zIPwGZrl&4)c%*U!q8IhYpWSj6z8s{7bu!Nw?Z3TDESZQ&An2so=;`nA7{s=IEB~T1 zfk2qO>d|T?@Gd+6&)>pe>rnhGWzh3eYnfo;UzXXc*e7%60af?LqA)GutG|#Hp~kqJ z=+%Ngr>Q?G2$un8wkJ-`HHFW@hKYVB{}h$wM`+e?F-Yx-NvP!s(3(*pTRwz$6$(T* zP24qc)V=(JQ|}groxO<*ND8426v2`O zSEjd{(4i|l$QF86yG0-0g|=p<&6PVK2xTtcx8Eo(%O=ZvvCL^CmQ%&ikuDxe0hH`y z3I2DTL7u6x)6{4)dh~eTUt-X}$OCa}Zw@zPk&cZ?T_=b{addoSoi$U z8ITMuarSqO64lOWqp0q9&9qm5(9=)Yo z%{!}a;rxT{AKT0>Pr4wo!ROg`LRTSS&9`yf6OsEAu~DZSUny$3@Zakfg}B1rcwP|>RJxoXPpujG5zH?`brqoSI^#m}tFpFLmO#o+@w>(mD4 zv7)J~6a^2(I5?wMIrN*xR^IXhL0!5Jv0fWP_pcF$tDSXqo>%!e4SoB&);O`9O=1;OU^f%nCkgfvedWhE@FKW2ck7P0KzrE+rf# zpW1At_p>5TY4+q^j80|ethU)WuW9BVA8j$L+K4H6U3QMn$e2l*>lfY;21sN%NlEFew&Uxy4 zZ^`oo-i^O?M%$Pna)tW_Gs7cx!?Op)H-WYf+_6J3rBXMT1Od0c0iciQMFxJ2R)nhN zhGt%q&hTar`jk6PXkd=0w$gTd!m-3PxrrW|g-{WYD~3#{qs?DU5%lRPD08Gs0_+uX zVsruvMw*^;5SD4Rq0q*=kTb+7xRH`C(!EB#H~^a1pK5+4owGVOw<6Wr06kfG(lXY) z$RmX4Y=Jl2Ht83lu9*iwzuoWcwN~vc>`hG*HdY^W%agOVx8;SpURWAA2DFlvl@kCh zAlzv=!^`P|)}O7OjfMRxTi^3J0LF=m^_J|f%xG>~!cIP!sD4gYM{k;QxgU27NN0=& z4c4mh5~dh)Ec2ph$_W!6sF~#TvNeWccT9q}?TcSWM^Lj*%7OoEVz|=Sd!)UOxtQck z>2#;I3bQt~)DMx)cfs<27*jx-gHYDN=kvt9+InaHI2ui}0^Pz%8ALNYg-uIY`1D#LkcYQ!q)iWfkzv{%|bv?VL7wbPzhxiyWa=_op1Fi2BFg zWOI_gj)GE`CuO!Nhe8CU*v1`(>&*?ZEr`orgj-GH-vc83!+tYp*SI7DO(K$xJ~ zXKLS*HqH-Sy~!_X6vwX><*tr#1bl;E-90wE6lbJP(Y>M-f(tCi_ZK2YuKaQ>Z)bzX zUDo>{W{(w*@CzMi=Q|>NAQgP;%kyPbFrsIkbqy!lvtq$E8DE)aVPBcAb^8f~FmbLw_o&?^%gm-49niEc*; z+sX4Y_`qcc0waOl@&)TJq39!2tSe@9iA#QU!7E;)!EM(haC)$v+kWJUf5zH|Yy>2l zCeYDnA`PqN*?bh~>GHnnW~=(2r(eNS+E401>Fx|B>kVHTzEr9oolfIrPGUR` z>~%|mXcIL?QEs!{P=b|F;FqBNqCQjtLDdBX=IGeY_YY=l`mH9yE00 z(-jrULinUjzj5aXq+P;t$scZ$XKi6VwEpd87G21h;UdUm#D7C+&FV!vXD0@X)E9Tl zm6iE#+vLyXgKUp0D}TPrbc(W%9y=xC=I)ageAH%iQHd%q^J+ONgpRU2486K&MdQ_K zZSdU0PdG$J^;{DpuF@Mgg*5Ae=79=GpFf|PJe|4VKV)$O(Q&pzHzIpGw z@OxZz31w+E6d%wwq4z6EDFIPW6215-^KL4qp6Dc9i!qby(ebyL^))Vk=QA)5HJ$t- zC?F9C$fKo}`YRJZ<-LI_aK*6p3;)XVX*RwSOCm}2VS@R|V6x&@n!VybtUB$$PWGCG zv^dv%^#d#u?{+LL@D9g|*d7r);40|IA?%(H^IUfM9%ruz(iZyyvMJS~bE)0Ez?n_jdUbfFK$Y#8t zyS{G$-Wie}#HfSu5VGdJU|dVv1i=ZmL{DroVuR#v?PCxu=V%+oWGy@3X3}V!L_=*^jj< zTJ@K`i5IAEEGwJk8efTYTvMy|mfH(E!1b0Jgikq}!yg9wdxXq*OP(hgeeB)Rz-Z@O zsjq|rO~w7YOXq-Pxjv|2pwP9$`kfx+_0;9;J(kE*Yodk)G{zBgXkT1`j@G`bXicc+ z`~9RmNeML?x#&@Swj91Z8&4a3`m-!1LqMLQs=;Ux-u*VwkB(Y9=@&iyM~ul8-{K+d6P0PZmU^8<$Kw`#aI z&!s|0hvRuM8khy27z~AHFH~Dl?G}z#4#hCll%sE`2Qe8tA+u>e`wOu9e%z#r2|KQ- zPj{VqY%&z%grBkz=qr?ULuGvzR56WO-Lt;q+P1Cg@r2E0^x(9UG)P(mZ+OIln5eAz z0=S{C%Y{FMbInKe;5~7?3N$Zz)Rx}O(cj8jSX14ppNBb{z{G0Z@row*qBYS zs`pDmeYEorTC=@u^)9!^Q(`EyNrLx+n!{9&w#MYpCqJj9zcRN}xs|z`50f+rZqv#w zO~%?tpt*AP-GU8fu#Zu7RjN%@7{iOm7APaEdVibtmVSukLSAg;4Zlz4-?|LO)l)>u z%9Lx|kImS^K1L28QmE&C>ss?$H~#g2T}JcF{*zz4j68A;9_)AC+GSG(VVzfX=AUCCH?q!jDNnq@ z^{)w5zre_9$piDoV(LyBD9z_v*QO;6+#aretv09?{36D#B^d(mDRs{QGK*VzzOI4~ zab39_8C;DdCBzfNm_NpI9(bl)ts=9>%rJe!u-3-n0!x8s@cdu6seRF+-{M3yq*jks zz8&Oz@iN$AQl!$6b{F+;eugONK!rtuoZeRIgwOSCe(faB*88STV$c%(_(%~a$=nt93!>q8#nRtxv6h+w=N zqF$nERpvc;%3a-p`duz3*yU_}U+?iZhK{^-F&bvn;U;S%*$=7U#494p+AkTtTvQ;< zYF4IT=94N}?-~41(8i9f_Td~G?bzS{Cd;(_W(hso#!Eh`-aq%)a)2c>A`U!7d8Su4 zo=p(ilwV509lY;<(j;D1UaCQIbXpz7I3@33rBLmDX;r#UFpQh7DA8B!fHaXPf=ypY zb?^1;a266;B5V7h5@0IIlRtfU!=|;S%aod8QIQ9WjotOj-_sA89)h%`Js7!W*9#^* zXXAvB-K@>Nb^)$@Vg_fcoaIl7Ve!NcD!f_zu5K@3n+3#PyODd%rDOSbs^E8_?me8% z=bqH3TaM~>56o&=Zw*%b74$tkh?xtw1o{?KY#0j&OS&86XCX$a`pmxu~ zMq- zry5>cv#P*|so9is_Ew8e3^-SRu)lk%amG5SurnuJ6bg?JO{667V6#-iZRBj4n)z7y zlNEUiyW1X%I`)JhIg!CeNn{ZQPyj~jpz{F4AI-VdvHWnH*-HI^YLt8XA=0CFrYag?El~b~Q#X*fa7V|1Hwa zjHqbQPSSp@AU2UUs>5D1UL#2HPquLWj^x-kJ)1v8XTtk4M< zqH4p)nR#a#9Xl3$I25pY(Xw+{WwvAhSM*kQnU!CgT4wbKxbYHwXiqk=ejqJc#_^yI5R5J)}8 zGHgoT2h=QP25b)_^4U-R$wBmOk4)x@U*M9|B+TH6NUe$5a_$_djK;pf)?C!K<)TYs z*c|-8exiY$%HaMXYLCh&H3J=G*rFe@h-d!c@vPJV*7HGm#aQxDqBq*2-B_6#1s-Rv z-qd(9Bwh*gjf`Q1zuDS|e4(-c1{&JOGQ^m2oC?2d=Tt&EY@v$|#plHUZd=y!&h>{%AC3b91GfWf_L`BLNe?X{g`VB|x!3?B z{6KH)`6z=gi6~X2F_lwmhWH?yZ&L*kW?Q+I-$-4;P5vZgy9bY9+?)7*%^w0GEBlg) zJ|n}H12#*(5of)7a+?|($#K$ZB(T9TY(EQumw{iM&v6Mro;enq3v5cAec&PcVqyZC zSGK^sVJ|kmr!C56>qWcRzj%26(p^+93~SlWr%C*>vB@u2uWf%5_4YLx?f9jn7B1tj z83VIwOLJDNm~2%hC6D~piC_Sjnj|g$u4=ABY8TlGI+&R&sM1j{yO=LpxbvG?6V_p& zTP@k&K9GJlKeZ2r+J)W*@TjY+b<6R}#4brDVEyeUUNeVRuYC`g)G~s2<#T+^dIB*h zXdOSIvrtzh;w#X_XlzGsA6*6A{VsI9*~0IRYX=vm>uc?wsvjF9#CX%&pe`|+`T2MC zQ=B1S9hk(REULrgg-#oRO6zVQfc5TW*ql!T8XlG-mcZ4WY$)nyyxGdmN1yZ%&nt0W zv%#m9`III6to}No$k7|4OYkNoJ$JzuWW5ruX{2b^>gI#`st;4x5o@|l?LNlhn&Xt$@0SYDuJ;P26IF5--rAOakuL4 zzKxv6lQh|6WL^)9p%~jlPe$Pf{<}KEAe5Slg!(;lx9WM5HB`mAvmRt2ovkxsMl7no z;BXhB2WhombakYj_^@WodI8tFnT%#6gxzl8Pl!W+EwB;e*wz}CvFCX`PI8jEmA_+W z;-M`={A~GqMX zBzP2vPBOB@H0qcK1o-%91h+^_2N|QI5P9w|j#b>y>>s*5!QTx(h_$MfFesxWOM1Q8 z!H%1(uDcoDB3STL(G;cWfhW509}pbDZQ8ZD^`m-O!%3uC)2&Zb6s_F+m~mJgiTN0_ zQvnUSsSs(7fFHj*+lY2t_%)-G^AD;g7TxEctiTq1wR&RyOrfi&vWQ#wmEIUQjKyX8 zh+dBCB$N9oeu{>s*d7)uw-r@vBR?T7chs+_Dqb3)uJmU(WUAq*OA`Zp&IAt`1aCe> zgvl(r5WS0Gsz5d=TBTW)TVnxK}YRhcEH8I3CfrCyir7l+x4{Rr0H;gD=k z#>+WUwGK+3v*)%mOzt^EwO8q!(9|kH-(0TU%OZhFs1G^=kARx`*T5%Vp4S56Ps2^ z;KuFGD>&w_#5W_MlknZBjr##z(_l7h?!-mH7O(wxuJo%OfQT$xn?oTnC%n{X)x?! zbJ#Aco95(-bHEMf=?-@TP*C{qzCbz0wZRF6jy%rNJR1d}P)FB=r2+GC8ZHvqBnP9G zZeT8N9^6^;od(W!XIe?li`;NxNH{g$#o-AyvyDTD2^FLHID~c6x{KL51UPPdx@iz* zdN?5Yv;=A$RiJ%yrW*N4>L1-HLzsAIvf(v8KB5B?D}tEz_TW*!ijC*QwSk-@x6>~E z-sXxHbY`UYe}z}|Z`nTASaqP)8#wzl<9j;W}3MnX@KM ztvnRWCVwzQfdJ1(s(Sgirx?7V?WJYu0?-vikxhJHRnG4w-{TuH$T7%McFmYW2DNyx zh%F3-B*slf1Pje)xpW>c`W#s_H}WilmnM7r?O{_c7&sI|^mGa@ap0j$DJD(^_jPwF zJL`ii(TV3USHlUNe=5pflA`p&a|)ZZzig!UCJ3*`-94}UbDA*at_Nz;L!0nmquMHn zVoBJgq2Vo&Kl4S*=xhhd65?oM?u^-SMlr75&F?f?A_ofP&?)T;MbT0<9A4L`N&)oCC3K^iicy%hB{LO<6TD4h1HME~iNf6R8ryDOs zkSX?Xih>r{$v9byf8ZY3)S)UzST*mD?W(T)!I;~9?U<^$D4Y__UIfwqdnNPyh^xC_ zNAQTe*(N#XC)aa4CqWY~DIaWN35Vq59PRV-Lx<_m$=QYaVM=(446~3#^7Sm@r8D{x{YuOj z-g<P)Ca+Woupqof9ZM%P45s*LC$`bFD7bQ4@t+A`+!wkbui9vGB@!ed>)ONjMcd6ymlUQJv?@HhW|)TvNno?+b@xVUkRdGgYn_}o)omUM`$Jb zf~k%tdEM47(Fp_~=E+aDN%l53&ly^DB&8tY5+In>*inDVsGwu=!|0z?yFI2LFZAC8COoRVtm&Vc4U(XZ8zkd9fc^!*xC zsZ+Hd!TAy7ima=$boG%J7d=U;oiePw@``riT}PbGntypfS@IL5N|_uVd8;QfA@SQ? z5$*#TTSQZK?zBw~qxJN;FqzU0cR+}WcN+EcIuz91i!AD%HjT~XjAWzk4Pb=?0^?(t zbpXk%-9O+BS7_DuE?+{>L6a}?yaJu~G#^x>xF4NQ-Zx|wXvmD%=I9%JG8XJE%@7Wk zGNU2}4_E@rNzM2Q&rib?%Q9)vbBk~{oVcj!^GW$)K4%mmEQ#Za$B06|IHPlb+QKSLSfY8j~_pdOk|-krm1gMe$12dr^uWq z7T@IS<8ke4O*dBVa#Zj1_y~4wx9Y2PLssxk5()f~yV}wUfjXWTFIL5c9yi@S9^cE> zyc9Gd37OG*ANl5|h+>9UzsaW2SuRw>emH_SB3%L%@Wig}@i=wt?}tA@tnzbyjHz3U zCyll9sO9onp&H7D9{%fZjVE!cJk5d^T{4U*(w)gnO^}MRQe5oF30ts&fzYQ>t%CSq z9P&WA!-q(l+@xCAS__7Eo@=50@MXA6t$x*2Ae|-C%^#d4$HQzJk)ZmIJ5754FgrUr zg4YgJ?(3?~5M^M9gcN^PeRgQAE_PIcF_)ACT`vu%kb%x=f!HEL@7y+%b*5GH7z87+ zXZObheHiIraefL^%~3TOSvu~9S)s@ZOkKnn-M-O^DB$c^)(u^016ZPqp6w<$VruNj z`Pd2Z`c^1qUugXULzIiG$%B}z=S5qrZS>FqWt)Sk2_o(c>Z)!Qc5?ej(Gb$@fuskz z&}q8gx%YIs{!x7%XUF!t*}e3W3SV-W_GwOR-M;T*gt$5~N8)+*3Vnh4*-e>_(is5~ z%|5bcbs~;yy7nQQ)}*o*BSwl=NvTE2{0;O*FVqAt0>R{}@AfSp_Qx)8k+dw|0`8NX z5bwb<`>qUPg8Kzcwk^waYkJjcBV6DQJHRLQvCrT`(fh&-t>drKaD z5TUVUlFm~rTjp}SdVF-xJtyGcVyfxpOFsj|0>OlyD z(4=ohkoqi`EEFBtfnqhB6eOx64aF1kkIGJe*ulismS)pssPa3|veZUP2xGeb>Mqyv zVWzF>Psz?dQM7wkmp_*gz|rxg*pYZy`KmLWn<~c57tIH*N~|m>1?NcXvqETb{?ny? z9EK%%H!=z%Rd==U#<%b&jaATbbP&j!owKB{+R zemlDIQ^PL4knqp0dg69y@`)(+XcT7324V8MSRMD6U@@8Ulz9x{?@|KLqfPhtSqIjH(M9nIUwENH2 zEaz>S3i0z9{(>*zez+QE$6-7REzf+v36Sx785m9p$j~_WZC*Y8G}!lkoNgH8UUT>G zdR2>SCemiDy+f8e-Gcc5ws?gWey-%>@7=*@M)~-8K-}Ea$GZPPnlb^?7shP}9`B3S ziMbH}J=h+um@q2QN9RNL*|-x?h$CxB_jcreb)F{L4M?gDhhKCvn0ZTUt}=g~+&Ld+ zxg*xW867l696WFkoE^x%^D**y?5cO!hzZ)+fvgm265piOe`fchbH-c|3p@L@!4+Kg zIj+QFe|+p;xx{>3drwzq%@$u=_C)fW<^DX^#4CJp(I^S)GxbIcY5kg48&!SJu*s>d zkHS{6|2p0Er^K5FmJY>FS(mHa-dW!DD6nwgR;NGgytHf$c0XN)1sr89cL)qR#SyO> zz<4E9Qi-8c9NdTwfo^P$Rtj2>yFWv?qjXzbRg+Eme{U79il8b7@ z`%ydciWw~82%S$ql3ihBR8~*Ao=P}})r;>&Q7dav*O!-_+C2YEtJ%diC%&flEH~#} zrY2jCIt?o##4Plax@69}7a^<5Gxi@0a`*Pzy11J}u=8=4chhS% zXV>(5#usqznmM^i>H!NH)x`*3q)gkuasc0dtlf9j$ z39%Ew)Cwv!pT0PQtUMdLazLy*s^Tq21d#SC7x5$(wOT{ktFQkWcd}!DKER?r&J3EL zJ>(xy|9txWv(Gwgn%TM7M#%bkynOdQ(Kd(fo}8zw!k`OH?QlQX{&pHGRbED26|8@C zhsxUiIYwcM_WQDzs+m%hj%D12g8Bs_Wnu^qiPX-xv&Las&*D49bc^y&5Q#Uk1okm6 zH`!2_dRNTsT-nq7@m*&OEn4*9OWI6n7YfVxZD5q*|E6=FLGrrfK+zEC=jiBu*9Jlz zGQzHRw^6Y_H>jQQtoxi!Q-gfI>e}8Aq2BeZwhA5LarbgTL%!w9sax-AXj9<1Ul9z3 z2ELUtsI9!6_JwUpGmlexzp(ujefd@jG5_(~%Js4RKTzrfMrY(U#@x(|)+ejZzi~p# zK}P^@(g~3m;7y8ul_pXK*d_qwsFCj=;5XVjc<(fnXA`nExm-^?Y37#SHM35Np7oVX z6UKA%uDOqH!m_WA`f6DC+K`88!ojIN$O9&6+C5BE-jzOur!95Dyf_ZRw>Wt>6yk{y zVB^7X=j59g$RwMXp7T&>w#|W7-m}WgEt~L9MyGRzS@@Rx7;>vv?B0ldtsk_dFg0FT z)^bP?RZ4&264uk@gLA#aU28|aLJkpe?^qfc0DkGb!)keXi*~KIttBa31#(*yw#|YX zjJmh>yP_)ks`Ib~&WNe_2+U8%cI3$V<;V}kd@_Toi%pK_j_BlfK4Fes6FSm*9k+6{ zgGWMUmM+c|t~awT99ok5B{$hiXsDN0pdZ${d^XwOJW=`6Na=YtF1=l|B=c5nNa(l5 z(^rU!g`yWNjV)T6eZ-!=28G?l zl@+LV<^`9Z*~^X(`36AmJxRC;Fnw~Q$^U0C=ieuJlW^g5%k~oBnA~!i26m1nqExmI zZhW{auNXdT61L&d0@(1Qf>2EF<*U2Y2yRLr%pvBooYo=x&%rBEYubCBx#IA-Ob0(>SIY$aphfFpME1=# zVL8tS^@pQ%N|2wG{q`&?!_PE>nO}p$6eD@VQyPECC+R4p7~S?gpazCZF+u#YH{D{k z)SQ7vEK?X~kipkXr? zTP1x>Bp$bIjTPlC=VN-suJf^Vd@@=~bK^aK+xpwmt3q{XIOML6K0Ezk&s-bwrmo^- z&OI!|>{%j%`iE)Hj|Ka~TWMS|Jbtb9lG@g;`%N)~?Nq|d5*PGbe|Oup+F$5E)F8jC zKXnmi*4r_S>-K@0taEzOwrt6uUl%cindkUO^;r``7J#FbO03|Ga)E98)Gi<}Fu(P) z(Wan%hG=AIe1PR{jKI7P2Yl8y_s7gzJ|C#MK%3|}Q%65;3`_2my9YzJjcZSidfA`g zFGIQ5&dhNHwm_w48KYmbtWSJUQw!gcFHi&+qyZFfL2f8_p3bRfS?YhrYL+YlGvVvq zS7WK~$$v3iQ;D6ZQ2EvjA(|$BR^&k^+ z>Jl=?Fu5>nTI0thAu7cFG^_Q0)nC*+QXE*vp~G>+kBkr$ef@jVet5&~Ea+VsF~O$c3tbQwP{>CvWLJb)7tyZ?n79pfiSCgYCfMfl6_-(d^eKjb`gpG3IM z&)+JU!w~P!jb?wu)Hwp`15k}MxsKcXLRnRum*wmxN7Zr6#YU`W{e-cuAi;bl+GxhT z#%#aNrL{G@@$dRQadXXKZP;wDdOSu=Q5YAEs`HL$!jFpTCZ}f;-~QL;mHW5vE{f$fe?Xu z_bSx3@aFKC@%Hm{5425#bd)&=ZwOdJJ2gh_cW}y>24=jHtxB@K)7Ol?-+kR*0@rH1 zlK&x&dCj2dU&552s~T;MrtkGQ1^tY;!jlhtJm9=%II|)0^y%3vqal{ex8@GB;ha$3E_3BrJIZ`NgXjlh8N3tYz-4bf7tFM2MxRR9vlssh|$|r zquv?xKWNt(W|7U)=h)@qZPA)Hd39$+F4(b0lwUqoNtov%0Hi3np2TPiTF$OVQwcwi|Kq%yYvr57vu1YBV!~`Kjz-Gd&qz49dMLkF7R)`Zjxh6@Jay%KBQeoZ)PYsl9IInYVA$Yx&Lj549gB3%ld~ZFQqxX_x;@!C3?~@`MK){{>TSjlI8`xA$)k(6Cz=R zhl25Hz0Rp-@MpXOD~@bUI-I`NEDJ2#FrqN#6wNKHrU(t8I@P^xtK;uB!h#Q`4>GN7o zZf1da$g%F$H4_yX>||&6_l1VVwTJnh#8hRweUYmh6)7!`D^P#YG z(L|jzki{mquI}XWR7R#T1Tho`Z37u>js;IN+!c}z%dKz$hQE)X7OmkwKcy^LU?Ey`pD9(-Q+{ z$!9YiHVj#sBf0;o~lmNNxUu!zhgIpKGEa>2^nb48G33@DlC)qAf;fzI@zK_&?Ud1HI%G!ecbnf1gh?<2`k%0$JVM*K^W=L~RKZW3bet!6L* z=suy~{o4Ro@M&4!$5REnjwp~^Q4S0)0@GoBj2a;9by= zpEN@{UhFkuJay9G+tfq~Yow?&jcxt0R@e7oF2KG$ksz=959vVU21{z5si)YTUjM8c zn8rOj=Nvo>dE(pJ1(b8I_KF`dl^-fWg!7ABl0+6*p3-d|JlK2hh~sKy{^)BG)N_iP z%2QG2zSA~@3rpK@@LH4J%e)J#f;1v$Jy9);B$+<2#atkxv8K%2ya=z^JdF%C7#R`w zX6(q%9{(!SL4-Tk?_+g@MiO3&`SUW$y|}44!t~If#Ff|z*+uw>1Dv=~wHIu#tnDA; z-ESJ;bR~34CGcZsE{^oH-w14KwC#9I>77sJdR=u8b&yqg;J=m?TkbzI7$*l@I$)1AS`R0fvMxt)(EK^c!!%vWUrem@29c8tk0RH0mRs^Qvw%Gp$zc}t( zCZ6)v{QtpJcFDJTxw??Y_D<>$NMKONB9I#_0Z0FX84rdEJ4mk^=#LBN;*OMAuW)a| zj_tzZTRm=A?9@1PN&2D>T`6eW%UGK|7JR_t)n3h=GllmwFS*H3gTqx-A*9}1!KI!a zuZnwvqB&U(kbM`H69R;JrK=ExAb}Ja=B|yA zmQwJjeypl|85m)Q^Q}>KA?}%29k+$M#qJVUtL`}LIuv?xP9wz@G@lrhNGM`O@FN#m zY(Sf@4HES?`HKIMyZ(1jhaTfQ<0g!yoe{M{XfrY47YDl0l=;ma5-9)JU)^?( zqxpF2r9@Fx$dWH8hk<%lf7}=Xl=Zok2hI@~FR9}N1+u;aW*2PD@Vd-WiXja$%{m<~ z{r_oO6>YQ81DnV5zB5FZ#fO))TB4Kcz08wD3 zeHg^U{%9Jc%mXSVGUWX9#T-jX7%}{gfy72@lAuCEh3LdKx-qNHq|sWSngtBpk{F-6 zzV05)AhkG5K@?Yem@j&iQ1~u1Hwk4ms1EC!SL2zq`y!S2w&HMK8{o=xw60LD(LlOA z-+HA!F=QQmU;d;j3@{h7AOXB7r$LE)MGH%h<8(zcd!`FWwLw@IDfz#F_yTi`9QZc{jDBoGiF? z5p$CgoFewneZRSsa!Vd?RQ-IXPQ`i)9Dh9;0R!g*Ab!xDZP$)qM@PpxPE=lL{#{9Y zV8m^YkwXRL5#d`Y(0BlnBRxmp`nlQsDiZ<}Tgd%kOQ{Mo1byt&+$w6Abb8iuyQ^(+ z>4onW--tA_ZZ4ikdwlSp{%s(bYBI-_&b$NK-u{Nv->}NF7MWr95f-_&ITIO@r8rjl zZw$(0aCsl_y9+Ezg^z{DBKVYR&@GDB%j|@-Vn}oRd5??B!Zd6nEwHKep$@lb+XJG@ zAt)&w?t{T0F!d{(&on@46lWFZARgH54-Q>r!Ne#hUf#sF?nu69ZVJH2=!JM>n(=-c^#>OhjORNbk_vrOSso~ zg65u(BxMGzvutNqr}UdA*Tw6bo%*jdx#%54P)i_SI*$q4#)cy+PH&zYukPA1ACKxd zURxyUErhJzSd|X1BIaHh=`04F99IGTHy1Rwr^mruJ*yeGHP}(LTxS`BIvLvjiMrR+ z*PN@f@HG4sJNs7wRuHn5EH@lfzH`W>e`Yk#W5gwU9#nZ$4$d_N((GR6QZ%eV8)@tcsE)(#v zQ_&V|YLosp#m2{b&BMAZb)5*@Rqk|fGTzy4JU%l4D)@2-kaCF;utTk18>Z~B*;TcH zaYlpKL3>XaG-O1Ps8^Z*Cq@R|QfE`eg_DDcR_SFdq^yy$L^3*IWR#Dt^vVB`ISn^< z>o7NRq`!dWeJm>9<`DU6;l^&Ygw$J`u&q@9N$Les=7j_m3$lh@=42RC^3V{>Nn4FO zyk;y>9{gZ9;5{As$uiaeYo@}V>YK$*#rl2UI?`@}kTst%zDMO=cGS3QW{-+0JL*OJ z#}Jw(eRf02OMTA4E6ZU{PhQaN=JhwK?nQ4VBf9Hbr|*o z3#!ICC&Gogy`3=(ON(_?%wA51_GOIBsnWq+z`L^klXu2F?0-gufU);IREVL$)T!C? zt?mS~OIw}xMO80{tq#t@`ay_9u*v?}Rl(u>5g<`3L~M%rbMf6W5A(EXX~m55gdE*` zC6ix3ofU42#C)-S#-H~RE#*BOT>ioES@kV?;tNan6K7EtuoFLg+p;j|<)0ahlH|+(ry^%&rQ4AprCJfR-tz zd+(mRt@1WsU)D!{e|8q#3HJu~#v^uFegVTzw@vky0`F!J8)}r*#K?j*y@HOyC{k`I zNTvxfzN_8!T|;=;kNKazNyS@UrrZ=&_}QoyGiwv_D<=;7VyKQ>dY$~UKBX#zVjae7FBLgTvgcsI~x!fi2|LN4| z-Y?VNZCUL#Fw#!JH#IV;+oduJKeJBR9=lK8Q&(I^sV=uI*d;N>ZD zOj#U8d1%?KdRb9+*X`nPUlTdqH7RFj1i4)TN(JfTaYSzMPo0FqBu>s;HyI6rMA;1u z`0i<0tGtvwATi(hp2rvPZRBo-!O7I|nDZjmf?q>`>q%R)eYRgxR zm5}}{xCB4LEjocYjYtM0HUU^fvehul$B4e*4kfEC()EixWsU=%&U1dD(?wduJ#97b z96lc%x+jU2%An<}-S8$zrcTMOC41~)06RGrSNPw%Dxlz;07E|Ss;zBJ7oJ6-o3|Bh zUveDjF%FtGm}S95P{i7}@(>Qf?VH`>x8#{kJ)oRAl2!PEuO*KVQE5rNt~+X`K2jOkS?o zHL(vEjjTcydbyBPK<^UU%>#euIX)iQzh zZIcCHnMht)G#IwuVDsN&z5I@ye7!At=?8JWyZ!TLkbT1 zcbu5oO)d6mAUS9L;eI}tX-ITh^tAjklOc%&^G_AkU1~R8edy%Ui6R7HdQ4}lSq3D$ zOQ0#@r3$R`!BtGQuv-&Upiq>XxBcByF%fQN_1O#G>@sd(cNtesU~VEMKLQu$PfX;V zkARQJKN8d{F-%`wlMjQ1WFYnhcezs!2H{8ZqbyQ4KHKLhtNADs5W2_Yy-QfrYQd2m z&;CvskMTcj7?08CA)<7)tDKMgx>uA4>;8CK@7Fa$Tko)vE&%1lZ4{XL3*&yuNhPx` za%XFq2=zu%kWbo4u#GzZ z<{H@LA$vvBeAo>}=H!vvGgW*O&;6%YL^!=KE4EdAvCQ_ye%yKgz8t#YO=+up?Z{+b zKex@;AQZt$z+`+pY!F?M0Yau{;Y+FSbyt56*vEbERAQWDQH)rzbl0@sIyOs1zup{c z-8&U5*SbOWb$6dx#8R4`JRWzjHTB-@cg0u=!ahnE@GP9$ajk5+3bFt<&vY&NxDe0A zw^Yuwd;#omgCn*urtV+`4Fw=2{C-mzQ@{~}Ir}@jKZ43D{1H3+L|OG9JKu*!q~^Hq z&H88VM1#050rAk(r(zA}K_5hWg|ym%PUQ#q^{PSFM`q>>;S~8#ivY*%)B`UiDQ?7s zA0A=qD~f+w(?NOkJ06YrZ^4OHwx206pADcL$~VRGf!f+5-9~HX@ysLr`X&Ebl_VAv zjhwb$i%6d4DbmGKSX&RX2z$fz$=NEu_G#+<%1pB9sB3n`f`ysT^u|xEfCl_XRNdmd zq6h|HN=``%ymOnP-Bj1+&lFDr^6bu{lpj>knGh$LW94I|LsWEtY3e`!Kr%gmXT}L4 z%oOlDQQ?S@rprbNS1a80Qs=HQug7zTT&APn!scWb?g`*;#7QI@MQ)as=|7^~7;`R( z4qwxwY^SguGn07GRm?ppW`}@s*mt^#i;(~3{IUjEYYDj8PNPUL06F^}@3_?aae%hu zk5)V`%hUwsm10{;mI-y=zl5FQ#FS{>`QZZHG4aXRxa`2mw*8(cnj3yQ%BwXAEhx=* z4~2z`2|XX*0n9Ht-}1YIsK)=}>8rz<@V{_@5rUMW0;8oAlDoBpdA>Cc08%8r2+a2HEz4v*xfA;6+?9_YS^Bzpf!oT7GyBM%~tJ4B}!UqfZ z(WktgD-lB>Te-VUZ;JRL5IjjKi`IcTg89`=c;41s_SMrI{#48xD!lLb8k!qG^m!f$d7 zR$K|Jos%uzjc;v}_kk0Jz;hUcQBH<6%dw z(nh~1kEGk2>ai2^D!M(xr>zoqW7&9O?h(Cdt)~W)^?`o%>H!DU*Btb_%VuJ;mzPh5 z%W&&&?5?Agi}lR=rCc$Vj4z>uYUFLYvaK1ucS?YCvr7(dK73bAq}f&p;%ktqKfq95 zHdx-EGu&D`)iAaX{>2uIbL!WBr;V7jP-d+Cm=M*am*l+G_X8R!L^Ft`p`z0~vOGit znV~TpY1h3Tb`M|d{TbDPu1Yb)%O#Q5Qcf zJy|kD+Vf;c8}t3>dSjp`>$Z=3OAS5TT^QnF0hXq?mdcRAG9{XX<_FHI!l)eJny$Mh z8xF2jB?4&qUP%9M-z%6mg9px#sn=(cNQC(CAxl(4%8o4OE;@c{82j*88PsQ*g+okKoxSSLxt!q<Y+E}s*6o$*cbg&Zr6wWy-qicT-7^*f}Uth*26fYO;BnXR9mv!I-avT$L zQia30y7NM#v|p_J)#RX5d~U?wS}-UQN}S)z+2NWpJR-w}^O;X#vcFX$G(F|R{P59{ zeR8QwK1p4~R{j_IR$AJJvex~7N6F(w_n1jT7~HVXuMgneBwj%B*MYRT)>2$o5W{#M+JXlf(3p@zAU`tVwUUcvrKAWdAZ2Y_TkN3_`!#X*> zN;X7LNa&rg&b})qLXT-lOyI+fv4>R}&T7(rxnZF7(3U&q3VLYsoyX&R!OO@G_{h&v5XB)axVU~Ym}N5MuLgi30w8x zas_e-JHaC+Ixv^xn2T&->F|%tWq#aLuW@t?685tFf3_K2`Ai&`2^6FSX#G;ft--;f zyi@LP!^`mV#_`F;kxYQS4{rBrU|hv_#gg~Ka=19bd#>Gj(#rL{C+PMTLmX}yQYi7e zD;;y1s!fqm8)bqT=H+VzedD0+D>?J!`*8K_k-Tr7SH59o<$3ASy9&Z$%{$sT9j0!- zJUduSVe?+PZ5*4w`DBc!h^CN!<`lOMS6Dbm<&6oZY7|=rsa+4u=m7d%bAnrlZil;W zdb^utZZr-3j9m5`r)%BZoL&Ih4?j6IV!AP>tOK88Vk5J!bxwxMLv+Dn>UVI_O6AQ3 zrjq(%3A2h_hRSYaRxy-lmgJ&QPzom3>=_RKSop zSV=OxHh&g63cTVJe(Aa|r*mWo`ijF$ zbjG3cv{&b|Wq7B2#mjMn84+3?_zXwXXg}(!Vwl~@L4zCtJcmTUAcgoJi8&Q`Jrdp9qQdDw$soUjgxYwtc(@+* z-oYU(ibSHVwvbzr>~Emq#r}`rmf9qSM2*{RE68crkgEyq^h{CFR1Y>6&MPbXQsWGH zM(9eFs*=UC0Q%m(3F9xgyuf6~3NUT|)Q~Uc*I6Shtx2hhqZJkNVUrOY3P_f-t`71| zjqqTfLqoe_YwGu7DJ<3-F_FVE%0m=3fuzBU3!n+M`R+7bc4_4lDMZ%KHJ62NN$%o6 zBo^AEiDEobp7mp>8K~vX2+AofexO~k$&d0DvWjCJ(boVV7upEztanCH~L8|CK_? zJwqf#MGx2k`0+gAPib`Jv5Qdj&Xb-9jSxpeA_VpZI;Grnm^N1EGg(D`8jA=f%B%9Z zBonCKWyC=IIg&ER5cLzwe*JyxzDIu=C0Js#ap&Ui2S$+}kdPNCq*((*X}@|V9Exb{ zhRy~B`?ucSl-r)l>Fj}>zMSFpPQ1A20Y+R#SNPuUHaBAKkss_#o`;<9!?DydrFApK z;If+1NN+o5^JzP%pYMQ6pVtknl-GYhS!!Mq@hh}6MX%lMTg5rsF#dru#HZG+vzF4vr~$c)<%F0UmyfHQgGg)9Q{ z3Od^qui9$8IXFhO6D`xCh2WQ|Lho|dLwqEF3V4j$TqQ{x_Z^rmvB8a^(l@aU^FR6YS3v)VcaNyym(yz|jQftX(n+hDy{1qe>F8iU5?bwp+d zQf)uGS9ceIv0V$^MGmTw_+vP;ZhFKMU8TDH@MqP3aGb&kz#haL2Mc}g#Ih)>@Hb*3 zz9^+?Z2vWJwZm)OE<2}o$)7N7(fq6!TAe0I^)KAA(owi3^>^b1t`_EH$4|+H+2)2{ zyXTUg>2}%nFj6y!i#*=(>6p z)o;1Pv>N~q45Ap6zzu>GrVU~34oeU|D1%I%8#mPK+b<%0a4c^e%*n34OrB$dZm8RB z3+#yF&u(AXW&0v$a_R@dMU6f!R#EbaGwj+1v}VhM*k0SuL_9CKttHq@Xj^_7bb_k6 z_KUZ~coehkIi{1xXXQ9%qpbW}QPMTKkR)=kp5}e+aP?SGZ!N6af7O6pPFK?QhKDlr zer<> z>Z^i3Q>@BP*OW3~Vxyc|yG0soH?cJ`*o(Y;##Hf%#tkzln!yA8!HJG!}YtgM>=)n%q#SQwlXN;!w?nu zeSe*?kAuOv@%EyVa{aAQa$NP z&^Dv67UGgnA64eg`un6R1Ljr)ScG^W1)t-pKZUsD#kg>ocFEg5HeM;y8LRUoUy=|F z$?#`slULxTjQhzdt>#^Y#DomLv{-NM8FX4WUXFIJSRNeD?->6RdrDu97Ys(1v(R`+ zJn2}qzkG^WI+DqNHCZr3Z0!DNxipB)L=#ws^wf7)BVbQy*Y z&fgHVR}L>3C8LBJkP}$_l|yNo_&>#`;`=mX+vmJUM(kpg@2o*|WyCTgnIDu2Z>~0Q z$5K(jH}P6g1@P3>L-wb*H)dpJ%Ul3$BoXhBGe%;x9UC^0CE*X*@2}y87!qPXKWgH96JSck zYdF452ybmQYq!AD+>HD-u*0KwzhlmF9OY%o<(Tpyeg0b0J6_gX7p8RcjkQ*)rO{z1 z1hI3>56AIqW&K|s_bp5<%lE>3(0o&XJtU)3lTm+QRU*mX;{BupZlj7|ri$I~OfQ{5 zR?<(G*WzESIWyGJ)mVff8T%yp!T0TB|Pj%^dw)Z-3 z8MRuwI0ru6eEsyV*hA#fOBjpv!#bcP^{3_KrFs8CgmS*Mrp;3ah&hS;NWs~FNuJOLT^m-m)ku3j~sb+cPQuMcyIbOPYvBM zI)fji8Cl|IIKSS=ZC;RvSRG0>nYA@+<+DJkPJHY~0wsmM2F?K)E^b!37F$wcM#wh` z>J66~k$g!yJ$PBL)$^i_lQzsrG!*fU3mh)eiQux&Qv?wh#Z{(b={Vh;g)!pF6I(d|K#H zL4`)=dvWXxPlxgixpZl*f}EcCnBaF*S^vTln@vhE2gv2I{z*2u`%cq5kf&{(JkP}# zv5fawKLsER?Nib>=+`%}j@KF^Q7hv7TEz zIYpW1<{aJmTGFSu;tSQE4(#Uw2>Mv_Cm232^RzuQAq@8`P_2k)p^TSV@W z7#WV&uQ}<~SMo--XFHT>DZh}UO76HkkU6VleDSlCiBnFraGwbhVn@$_ z$xvs%wWgEs{Hbq=mV`4J*hF5I$he0oH{F6(@DIuxF)3%3RmmaWIT+%vb>w8~1|3V^ zJl9x`7*sY-Z6*$sk_X0GD$_~&Iur!W{zbmNFpvJ!(6!O`BdDkc<0UpwT`LyU5qX z`FLr^F$OP;*iR3_AHM>>9{Fyfxkkh-;`#~@MB=G-^4VUY@ns7^)x+K9Tm$28-Sy2J zM!qbWBIR+u!e(3mb5ma;y!Vr3=EhXaSGftDpxt%JTFW5ol&u?MaYtLq-wEFj5MEbR z9upg8PDD!mPLA*F_uFpf>9YMm{CeQUX)#WNUuuG!*5xCY__a;5XC#7%Aqp;3V`i^| zi@AQSu{qO2`ZYgczJcg#UKcoai@g{kcI7b39x&|oXVnGxq5U(`@<+4GVBo~^CyTST zrNi%T)bvjKHSbgE*BVpmi|EE7kQM5s;4O69xB1|1qjv@?uYT{#&&{n_;<J~{gu)YV`0KC{8zs#}NriflvnlXOVn#`SbGw!q%Q zH_gdYT9YgakqpTHd~|kU9~7so-QMd6jn`F*%B#BXi_`i+5%gu4Nh;F&q{UGDR~p7n z^`d_*2bF`(z}h6`^S_La_yZiFkwxj(#MiOA_S4kf|aodWR4 zn{b;eMhZOsX>tp*WJO8CC)hJ1oe%4H$|9oFpTJbz#X* zQCEwXhU`6+xNi6=)QO*?T0k6lGb8c0b4y# zqvw7oV=Lp$#|M5#447|Fw6!HFG1)c0o`M6fJ?7uAd-{7}v1nPy=JaL*7{Y4g%+4tS==)a-z_^^9zauj?jN z(erSTV!BS45ULn(Ku;F_jFY#s8CwpabAwgzG-kJI7|@5ELsQD!*<9{tLeYc?u2G+y zb1I%nmc1+fj@fjg`()iBK&269BXJ%6#N>Mllm4%?+L{sJ&(gG>l}HPecDETV zC??Roa_|<2NI5P(ko4>5U*_@vlsW+7I(yIlT-}(00ybRJ&Zz5w3WGGl2HxHS?jz&D zTeO!#=pY=ScjDN*m;8MhRC4X}Oxcjr9QcVRK2N$?NWxbhcDT+SLe+<>VLwki*O-`2 zhb@^wCy~@#&@5g%ET0}s3ZyB`W&#}2Zv1yWL;=t&J{|pj*T>p|0o~NTE$#&0pZAla zl=h;(Z4pEN$_<3+&KLikzj`4uPIBF77-g9^3f;^b2VuFJ=q7XNOA1X^=yr4cin#W8 z!j{;t>dDjx_LUl)7g&KgEI)8qOU(kHes+-O|6XKjmI%nDe&yRZW7pc|^Oh~07o0aX zPQROzC#Yhxrk8g97Fk*{Vzeab+$hYQ=3akiu&J{jdrj5fgTs{b$A=F%O(QWRFADD+ zy#Bl=Sy%c04L1Fc93mk<`&_y9V)1dg4;rW8^8In)U*_x|ww;Ar=q3>?i(%|i;>w{V z;%UfxvGEw#$1kmc8Or9pzraU3WDw1BHOD}#-p5td-aO&!^k6?bkE^bp#-mM%bMA9- zg$zx8EJ?GcUvy3Yen#krU8RtNZnd#Hv;=+6@3{qIXJru{Dgl#!U`qb~dD%u=V$gd1 z$DAQrK^VUpSq1CF+o|f&*HN>jAm6;Aqw6u&;s^a<)$&09{h0NUHvUwAxPrOQZS`JH z|D7EL;>)1w?KVe!zRmmSo`>rx^wMTEW>*M)nz+d6YQi2h+2WBUw9dg{ZK75qy0IV?W z)uq0#fE>z`>zTPG2PtkNv0isCemFO{2FZ-Duvinag=ZXU6eznNCcDlx_2LUW=rm!V z=>7DZb=&viIaY6A**`l>vhIF*XH<@o!S)We2IVRiJXLXW9wCnLY71YOGUU?f&8#m`31JUwgM1Q(Qy#_NI<5at1X}YPINP~= z|7)ZYcA<^DH9^OBeRbK#XBX!UR*L{7^_H87!IGwus6~>A>s;qvaQOt5&p>3!Q5Jww zb;JG(M%8Jlq72Zw!PU`$2tQCrO`Xjw==SA+i@}yJpo5O_;~0@Z-(_P=i`^z>owb`*0hH|lsa9DJ;SD#r|g*eQ1rZaWXzHXYC} zAsup*#&mO7Ys0u%``_kM(H)_TpUY1PtL_nJ53gLXFS3%i6`4tRIL=pg83qU|Y8h4T zWhGWhMK$$)ct*?o))4WUh?eBbd?=e5Wf6vyQk&ySAE?QfDa%gO%SD$+pOY=H^I<&o zfozghf*d5AQw3{w_cGbsdJ}_c91CbjrX>lXtGA5@#R4;=53fj~)GjPWyS<862M5g) zzjhOdX7b~-@ffM%Vi4c<{uwoQwzFQd`^V{7cSZbZwEkn_vw|-=ZjPmsN;}n>iVw+m zVg<*XDz>`M!`>u4_z=M3@*&^J`t*%x(2Z(iGvieQWMYynddDsigGHA}ELt`H<@t-= z#an%#V6$&V?PwXd{u!%3+$J@44ON3%iIK(r`NB*Nl&4DZLVf)ppmgJLOOW_r^ zg(6ohx=l`@Wt*i=d+?Eby)wB8r6q|Ykhh*s-UuTlxld}Ch?RkuwrusRJ3g_)^2YA3 zVRw?^*Vt#{_Nzh|c68SCrOGF>Qd6Zn&NCf~JqAWQwC0)~^o6M^i1VqIaogP6$V7;q zuaL)&&)!kW;+XcAzBi!|at9t5F!DVAx@h^lmK1+;_&?J%-kHwB2@C@{!4;-D+)#cX zw6M_#iSDCTu9*fojnqtXIV8<*@~6l@LL@kzvTk_Ks`klmIXJ=7V;P&G<*F}wZ(fvLcc|%oVnAPgDQ%-Es8ZptJ2$g_Sdb; z5=r}Ssn06;a}1-aT90R1GVr5_pGa|)zpVWHI5ys%2nJg{M85*I8H&=^PFM4-StF=N zxt;}t*;G*1n$bu;=~$V+Q81@yJ&mYnr!(6UNm}GaJi<>DspKB>8G-JKAVi+uG~)-n zME%^iHPjjwNl76Z;i=qv&R@L&teHH*Vg(uOEj#nVHb1fk=a#!XP`)X-Vx3}!+@oTj4QAWJV_2Bcd5`5C+ecKACi5eYItF^@xaSe- z6m?-ymGiZMH#stiZKuJf-!F2Uf~;%C8*lPL2KC#Rq_^mg__t`S6Pl(r_50o?Z1X-* z&-oIoZ~RX)n5fmruEG@0epuzACIRCGu<9VL;UyYMaW*pKS$%bqrR?lVI9pqvMBV(n zCw@9-Sq6+&&lm5A5OIx6SS9O){v?`qX-&;5v$QCoXcpe{q1lIJKH54UZcE`t7oLHdY=;v(WuWA4wM;U-@*G-Z2XPVGNk~Z7x zcY-V`<^-xzp8Zp8+>)lyPs8$Hg(z>Qh$ptAD>wLaIV?!u$}t! z5Hc3uBN_cg^AB)oQ75}_7kX+t?6%>P5aeD%-Wo14tz%ID`o_yv=C|BQ&aI zi-*?^m9SZ4QjL$K!T zcuq3H@@lU7z>7M1RJD`ua>;%vEtdy|i`A{xWKR5$g(6z>FcteOws zT$80E_J=V|>0Gl}CWw4&d?@?8rJXYEo}e6r>dUS;~SJHiV%z?9^RDsIH5!{ z*@8k?$-1eS7oy*FK@Ss@a_)ymLH&u5Y-BM4+v^+Mf0xlKZb9Qc;i0$HP6bZz2_Zg| zkA(2YT%Bpz>5g(I9LPQfH^!hdmI^hasIo8}4cad`$;Cr*y{5zsCBgSo6zJ~=Go+<2 zx3QuN69ja=H6ux{WPTKsiT_}2pzTYCEkq8-WyCT~vDm7XBuWJkq$i;jc4EXy8d4#m z=vzXDdrisq7PUW?6^~_r5(E?9_K-dFp8hS)cy$c)R;1r~6F_c02V({H3+vqun-LIn zLpUgtChPTruPDGNCy&K7=x+|k6Gj_BiNv;*wtDMP zMyS?5a+Kz5CvJ+mp>K!bY0+2hd>7}O;&teW7l0S_GEuepct7gxfhH@js_NAuS;d5Q zzNamb+j~tFcVvXGmu>X$+%EySkV&RFe}6A81IBne&mpI8pEgcDS&zpJ16gVnnASJT ziE6|H>gLHlr2?=X8?Y=7ZA^V!qRKj`Q#GFe=u#YTqzz*}@(MDwSWb_@jB#)AwjHYH|9yGuCDpd<`hk$#;{?YE z$onIm8JTeb{}OiGr{ydP9wD61{6#zXn?Bnb^?YQ+vm+fvK=Yi4+S9D0cBSPFd0%u! zCt%@_9(L7IubiSp+1(dLpfjf81*sE9LvA*6*Pth1n0?Ko*s|-uJ1=u^!f6EmeZ2FN z*e}2wU=Ap)4BNtUin!6NjpX3e|K2X` z$dlmaS$4kog3&JW;$$jI6D`GfBF}omI3=)3H)Z+H5=m&qWtxcW z!RtC+2>Q2oFE~Ye)3Vx*wS5V}o^fG$NjZ0D zz_KYy%Spl@(qyNguXFfD972;~BUYx76iHI^YW`lr>0z(iPZThOx6&;w=YgTNW0s@L zvagc{5lF#hgV(F%!7~WqW_8(mhWhou3c*7ZtpokOMIYH!PE}3dW4=`!w}QtNCiQ(E zS$^gw#n!cURDQSK?>%zW=OfMhkmxJyt1|OLTAj|W3srpUPu38R(L8z=l7`Ka1BLZW zW1_a4QZVe^an>AS=p{!i(PiVbE=JL(?REeUyR+xe7S>U`_;vYreLha!ULg@h%lyqi zKE%~-t&iG`K91>Y##v`EsEWb}G|+}J3RH130x?TYKfTt^q&BNX0JTUzLt4S+8+23m z;iRe%nB4M!nAit3x&#bl@-)NE92tJPS*pI)L7N};Zc6YXEXr7YHlYtRKPkmG9I$fb ztqL5eF2;rUIN+3^=hL455zq7y_}MN1EP^Hyn_dT6*|UXcO75@r;ikSxSa&1L&|W({ zSQ&G7CWj<@((|Mn)-H;NIB8!K`R!R^V`nB|z+X&Qc{|l1P|TdHU=`CSdwk2Tum7r> z2NXfffwS=!$5sFNyWs@`j82o4RbC2RLYFNURRvd{?mhi9y3oL90rBq7Q78D_GF*D3 zInnBRlQupyYcvajp|m{y$+CI|FnzJ7G={4)B^5UHyD41HAXl?q8?mH~@5r(FFd`2w zeDdDbR+1++nqdyMJ=28r*SpPM$(-C+O;jl18nDpkmKS7JTH?QGoQd_z`%H5)JVLCq zH*Q;7^yBS6wj&nRDwKUFmQyHYN`?)llj6EwW!ojU0}Hg*g&8IL7L$Ceq4}DKrF4d~ zdBi~q!I|AOsB`VZ;z4Zg3u;hjb{57K)Gm6XZ+ku+5_|;Tg^DjcWz@qEExNfzZfzDy zFif@YS2li6&`a~L0vX~4L?T+5A>TmX8(tIT&~*c2gzXB{>1PS&v-f_8%3$`gHF3-OndaS*+8;;7S0AmN4mfH|eH)YgR6gb~$-VaF#3k!^Zxe*8#?Fr1NsC+Z7P+7$?z&1@LzgLILwLzxJ9 zLi_H>K7xqp3NjX!c{L!`lth=uDvrjz8w!}1Y`KtjPj|ClxV~nOR}?T}Q3osXQa)XQ zCkOv4cVzV(WV^S?CEl8TOVMMmoO?4_>S#@{(Kob7hVN(W+PAtq{9KEXl6kfX7Vm4& zgy8ZST4@z%dox_9Fi&GnWT&hOijF4vTu4V5c^FIb<5Jh)0q$*Nkbsf?w6uPv14V19 z>(lD;UGN!&<;BhodJx6+LGR9@7Izk9F_h`#A4n`rvHTZU+027l=tvA|-zN$d)b0!6 z8%fSZ2T}M^^g90%N&T6!7{5$yJ@Cc&EK)oYW9SQ6V_KLE=%#D5TouzZQge?v6Mn_qv41c)y9toFyt~Fxh+-;xiai*-Pt;I7WYYhSo}=HDin~ zC%O;3rW{5G7D3}>q%h*r9*)sis`Afw?Q+Zg(jKQ`EBlj+BrVOhN5?O)JJn3FIkl zeg}JNxBPR%aV+#j(06_LIKjGP>erD(YV6VFMg{V=Try>U_>A7xNt?lK7hrvK6Z-LM zZH;m7Xmd8y^9*pDBbg%f**%&5(V)nwpBV_W9`F2sw*6`fAqK#%XpDgjlu3q3=Ufsv zas;#g)bv_~$X*3*ZBDLQVD2$28R`#x^*GnR>a3tT+17owLAj=E%gL4^PzC#~FGrqL zQzJR4gqL{G+b}x0&T#KbD*!?l@>PNYEC}L4@3%f{nJmw#hcFEpA^m3k@s_gd%YUsN zA{uRpAZx;fip#reVqW>kb0_6M=r!+!FFA#EUYq}WFWzeYhy>Xm^`h6680*_|bpEEd z4=>27f(r0tYd>-t6nKcrXg%CP2Dvb5J-tc`YH8NO^esBpsR#n{QM`+!-jL}QBsw)2TC za821uu0X8CTr}~TT=(ibGf3R^;45rDqH4nRBg! zMeM`$0d=}9IY;u1_ACEHMA2XwP;`bW#3IfkQKgjMQK1UQ?X+ey5H|>~rP9YS^6i}4 zK#&FQCo*9@`NWWo>5GI~b7_f)KcKk368sHEl^!S%{Ld&lmluLQ>8Iv#EseDOdkEHL zxhxarJ6|Lm03WCc?D!D(VvMH5mM;18bUq8!S*V&Fe%Y>dyG)9@x}iz?CX#HO*cc^w zfO9hS6atv;+}NQ!-K}w;r{~4240f-%-Oh*x4?+{d&oA$yV6-L{hk?wz7(?y=!Hs>SHNXe6)lRkIew%L^sBy@c3bjWx`CMN;kq|h$7a^-N z9@7(8smlV0uU~ERI=y^-4iPzzP4xB2HD(Kig`^&LGZ{cLcAxI<}FcgQ<}3rGrV`&^Og4d62U6{^8O!3VVN@2#VhO5pjDP;blx&_1N(ERt*ihszLKUM6?84W1M&o2J#?> zS-0o$=Pm<~9~VhA8K|O9rBpDd5Ua<0o=b1*6uNWm4oleXsRAulciAzi0KPW~H|pJ} zVAAG?BBTe5Py}wdLSI1|^gx;Kl};kbeUm->#BBUES*2Wgo!%-n zpoTyJo*w=?jygOh1J0-4J(3|2N9#h4&6ddhreU|L!M(t(+dm7{nXJ!sul@BIuN%!I z~3Fz;YAriRNPk-kV>;h1k?TlY^^D9 z8?Z)p$$keYPXX|(9cOA;47cid@gYrpmjCMuNjk4e7F~qy83RZQe<8nC(94u5v)xtxf}pPict?DiUd!DF#dUV(Cq5ZWVg z%Fbam>MzEW|N8tJ9;bBpnC##aTzw2D=g&9qY=;iJZ}g$xAuUfwf{P19uL~4Zi};}? z?c5vgKC45Wh!IdZ_l&}rn8g5kR$ckikz5z%RCW2b=nlkSnU>W?F?j-fE}97sO9}IC zHYq@fe^&Bw?DU&n5xRyOa3T$HKe_12)sXJG*y7t`!h^W&P)?NuaC6`h`b57|DM-K!(ZAJnn5ctxcA>48*&s2*NbM(T45 z;?R!Ke`%RI0SQ9M@znF8wKdgD$MjwscEmElKdJVWD0A#JJ3di8Vh`S4Qs)t?%3>~p znw$GY?EMR?58S6+I>-SwBVG;EHqQ@Xi|l(}>hD8&I~U0~$iuM(T1`r$hLwq^3kqTP zBgAGQ4653aEp8d!c)&9?`VgEG$A7#P8C|;A+UDndmId)T*x8#FZ_DJP!*IB`?yd_v zoa|ssP4{?ZE?;RM$OIzy=Ym|~DX8^MN1Z<>%kWmYjA>7pq{9LmM27{4@$s;xs4I%@ zcd}y#?Qs+(<|oiw2R@R;2AuTnaTYMVn*R;?jO84FFL^mChz#fw2w#b4e7%CXX-s$m zIbB;u3FmWpPeJc`A}KuJb}WX~s(9$#bjR%kPBLcrsfX1@X+Q^4{VOV|GI&2R2k8s@ z%t3stgbrDGc$j`Cbp0=3k6xOpk-k7dxCy>BN8Q8D^k&2H!0lzRP+dAwEDGTbS-U@+ zYaBQ@TYc)N%Yg*ARa-r^I?AHXe(<4bew~S#v_zP!a7z{-qBMy)alYZe%vb0Y&R^dP zi|VI9cT*xbP!6G+)N7`;yv|NR-}Z?hFs6?oeVaQ`-3#z9xuI>GOOvRlOF_60-HYdo zfOhtlmd@1eN?q|R;m@vnw-)|}PJcgM#d3;wjG zvcjBBU$~+!j+RQ%-;ZJ((Qr8NwZYB;^o< zMXNG750JUFuN{4A`o)v%;V=X3?dhLA+Gpa$)KXw}YSATOJ?L+DevveO=NIBBK2a@% zhnfHt6o`~``6D$|k4I<){39h-T@cO?L7I{Pw@d)u0K01?N%H@!IFdXifB zKjDA>MRiQ;J=u64ey;{cl6-d{WzAy%1Dl)60j>N|pJl%QD;q>Qb1nuLg0N|HQQ+ID;4hDX7)Ef`{S(a1eOHR;AIAi|Tx4dTA(oPX3>njP~V z8_$`BJCjlPSgy&J)_mCp5tN?K;!Hfb&fyM$56~#n~@yo2aaL zJ6`9tQlxK#h{)8C$og6koxM#E>cdd6h9CA?XWHC7vSQhp!oq{{DOBjQUBkW=RB-sr z+$PNt63+#kZ*M<0@%Ro{|C+x+T`V_3-bZDqDpz0UZSs+m(PWn)B}!~ z?h2p8y1T;X9$oE^C1HfxUGSFnP z^8>Xc__;Py_=jAyI&#$&@587+X|qLo3d-T<6ub>@esOdNyWa25dmR~Mol*9L7x`F< zQ@Z)F@wUn=WuT&Kvcs7?upjvj(jFKLwz5utIZDm+Ua**6FpX`D+LZVwH6;3JnmxBwKk^zBvvuS3))dcBwolvDs{e=fPbN8)NjS=YX} z(*Hiew&{v^L34?}`1ws+S1m)ZT(1^#aUS7MD>VF+_Qu?qWTERo)-nz=e&i;b*{W)^ z>X7q$G~3@Xo=yH&4ZHmABi(JhxKsdb*+k2w7esc`Rqd0Q z=7xI`oHW5QCn8r%Qk-kmMabkE(W%$eNd-W9*vb>sa#8B_RuOxrl4_l}h3h_cX0Z}n zW0OgeeQq=cMt5DH7KS)d-lq(7@kCkFeXDz0|J{^dSMWgP(WKKfzDfTSVeQ zd4|};YLz#U?P|+uHE5!95!kEB2mrN+Ud7@wwu;(DgRtMAZbJRdrYq-@5WJQqgw*ab z!g$AxEZ~rk(u)3e8@V|M*;bThVlN|c$|_!Qp#fuWR2GVQbz~Id(kL66$c%*!fBBUb zzR)IZK}_~C`Z`cH4|uA6>q5#5PH8@wa;M+GOwKER%7-Aq$A*}t#?}o{fn@Zo@Q5%1 z>`;JJoz0ML5i$pGj@<=pEBIbz=j!hwZ0X004c7x=iuxS)lytZ%JR1ZGu|vFjA8icu zHI6n4Asljw9q(=2lOaV^kN0m^?1mTr@MG?O9)^D#Of?UG85xoiPrKc>PIR3tBs)LD zHR3u-!|>_VBqXW=@{T1$=wXJ?(6vguED_S5H0bq7?#=OY!0JS;ND+xn*KYBLh@ZUF z@bO;EqazrEWZ$NMxOP(qH_<=*!Mh*N;~m&(aVFcp9Cr_MfTz(S9opOEq(a!Y%+~CP zqs!;4YtecXYdv--z;6zf0JR3OceS*tX93R;`vfiuKh@0gmRf&Bqu#lsc&Sv|I<(r| zjDxqAgPVGCJ>i}kp>yY(Nk!O#WA()Q-W-~+MxlF66w@YS^5XTZC3?5h^B4d+K5Pv7 zf_G7Q7io{D?jo(*jqgq~VKHG|AobyC{xBNbol;yWjM#@iu)!-KlPh%S@B9-}Kqm6a z^J@fi=cj_ohkBe@!eJmQO`OWRytn4T^;+lBv4SK^qMA=?-NDY<{n$qD;GaIn2P37TMp0%jq zfY!i+_WT{Yx{(WVo{WY$X0jL7xv9-vCeIzp;zz2epK0!rDPx4m49T_IVG3#t;=qf~ zQgTmFnxRpXu&nM{JBsZQtmws42k`F>ZQI1|zbBZM;8ql56rBM%%U(v+9?$*<>2Rvt z71ZHzi?0rV|Lz>0F$hCxvHPz`jefFwL+g43%7wTmRYpdb^?3$ghd}J!HW^uImwhrv z8fh7`K7lp-FIx7aLt0QT7uTYuB;5+1803YfsZ`}${cX65t~xKGt=EH@r+5I z65Q+BE2I5~ey#<(iN$u}utE{-hrI|~8b7Zv(!fp+93@g$XY;2$ertlZAZJP3TB@8l z0K(^MhUDtjneH(|E~k*@?AsfTBS-Sd{YBQ+Olku3JVH3DwV2;g6WD<}o4$Ct7OGJF z7~mO%i!M3jv*~jiK=C4|OA|#xyh5%9pt@&a4V8Y=86CAHTX4sJTSKDT8e<*J96ph> zyz?xT*o?&`yLmJ+^4rAeDP_X+7g@k{nowQ3N{D5a^kI5biKe++;u~@V_XfiGQy&`I z`Xloe?;#p`MzwshIZR-C)_B(-a=y8H)2D+=ks)K)_Annx25}jQP-nV$|xYLzp}GI~)I(ru?~RvAz$e41f9fs)E^g^!B6Ou;sE*_PESizw^&^ zT)vNEygz8V-9kO^%+H!08zZUtF0d*h=%77YwtPgt3*S21vdlP=r@Mw z_kZ`VJ;mph>M@^q8F(SbJxjNn=+O542e&`4VE{*aw2Nf)fB#c$fC?Hy`v#LF=lS?5 zX0233K=Us)1gI{3rmP6HT_`zJ)lakPXj_d&m)jWhT7~;Ro~S6cAXuU)MPxMy6f7BA zX5Zq{pGF^&LVC4!ryt8x@4s{JA@5hlb9VqgHs*bc2mRu^!`(7{yng)m>L29YYa&#k zU;+Zs!=m*x34Y7^+oNNKlgdb5Iw_}{*S0=6sYA2`3>5{JzWK6UiA*zIvtz}zY21t6 z|B$aW<09{CXUzjoz@KXd1pm03uW8oE`Kxd{tkZla&XViiN?rVN>(qa+Y|LoU`T>7v zmeR{V15DT8E*grncGZ5d<5^uHglqn`I@J>#TWMk%JFbo%B>)Dn-VYM@w8uC*8;Beh zK}`eg8QzdDZ<>&wi$*tdr9eKMfJK&1nA*Hsuauc?(i_igK+npcKG-fIPI!b0^Ihp0 z(?Xa>NydDPF{M1eP5aHIL#@h?tU(o2Q7}SW=lqoIp*h3EJY1ci_9>-a&nMa!36)XU z%Be>;I`HHNx+7Y(ICp({axrbO+R1Pzx9R^db(LXFM{OS%DGa0(MoWXzT?0`%L{gA$ z=>|at=QxNkH-W?Q;z42I=7~6&u0@* z)-FeFgbcZ5)y6bfJv13UeAFR&AP4QN7MEUH)p0$aQtPq*0_~2^DL=$dJTiIRsbJ0r zW;-zobKW$r2-g$pSC`B~;Dg=E9VD6o7S&<& zABDtA9T)=YBJigU{G8n!i_U$(GW`($LsOIaL<(0?`NO zyg~=Upp_S6)!oT=o{^`QM$F~*Rznq)= zGOrL~-I{N*lf^K^N0i&z@WA)Q?Blld)PQ>I4OfF|x0*L6Kj$u!TH{zIPEy{FRXOKM ztwkXht+2cugNnBQ81AL_VHMI3g&^MjtH3$G+|d<77B~Z%L8P+)#sR zf-)99oY=tB&8JE}PG=C!DztIWr!I@2B@qIae({dL9mINtCY%hac`?NGrfjt?XXHjH z)+?c{3i{iA3g0miZ?7=5r_9*8otHoiYvb#%;^i3B?Z!hh)zd=jJD3!>*@p(9$=Azs z(#Gh8iuFx%bZ=m1BQ(!VH0JVORU`sh%#g0L5XfM%?IP_RTKtHrqjh?*v zx3UcRlWYt8(1bfS&T!6TN`_S9AReRV*c~Y&5HqUs(q}9*l*qZ2`|BE>&s;g2`-(1> zLuG%)zWGUfL!ChMYsMD7 z2i_z0i%-XlE*&E=sO{W`sPdQZX6z4Iwjj`hnV7)22u1+M^%T4PqmS&-py* zDc`RdBTLRn>Z6F)XZkJNo8bhOWsRF8B1$eO=sZslQR6EUE=V1_w#JkQ!u^W}9a56x z(X_;hwMm}G&Pcy#kUgN6_G0pr;dPGMGV$Ju?`RK%8uhoaYD4XuRgQEFBb6qAR{Am- zAC)HZXSkv0gR=DR6_Z-1M=i+Ya4m3?0IzDS;NdQfO zLS!rYjetGD1BC3A4t8mup0XTqUYw1^Jys9lE`z+#rmy=n`G0=GaN09l1iX>z^?>c@ z{S5=kSl1VRl8g)$`$e;O-!bw0wM+EDF+=AHd8zu#eCBL7vv&9wS zuqqo@nb`(D{IMJXPbes}Aw#w%WS@uJoS!Fj-Gncr|1_N!LSNMU^L@2HxP?~KB^4l` ze276T)^ho>Grr1qXn6YSt{HJ?5zc7V)`qXdC(uBs3Ft=eX0O>wa z{85rDvz1>Bc7`t8~o;nMcgNGJUw&?^lqbQAKmVtAArTKVST0B1l-I zSoo=QMM^UMFLiDLi|w`#kuw=zm?e-}b6Jsf+|kF$_};vmDmOfi09ZSW?;lo<(Z2q< zG@dPoJZwVOCFyKGs%9RIjb$Z@O;MN!-|?pu-CPM~{b#)C8P8*<-6ZT*fPCL~MzD*U=wY=r?ubE35lQG$8%JzD|NmVvGSgk^7m^QnA_hB z=dUg{viv^$rOk$<=>j@A_4oO%C05dj&7LdUU0+w1gCBS(G}SU}92LlBiTrZ^h{;`+ zlDY8;IqGmb7?cAq|LfPTz5-Hlms{!nJHP^<31)tIIh*CB^Fm91=ha6Eq4(coRPjN` zRh-~P!3+qWo|<0Y6eFeC+%8k8k;XS3NGT+(Kh0a;$C!ctr>fbDi*+J=GuwGF9zFfd zJI4o4+a>HIGO^@F$ZcX*lYV3W*0mSIL5|b?6^rG*oEnTC12b-VJhx59lBb(95t!LD z7}2>%7fJr4>>Fw;52`YbUyqwz^B10rycsVR&<$BBs<*ay^Y>nq5bNI+m0$!f9XFPQgOQ> z%a8*UupxqA8H^^=Ycivo?9ZR$u!i+c_1I_7S|VY>x=A>}_~Rsvk?IEF)+MN~g)!vt)OLpr|?n>h~Q}e5l>J^u_e-2T3fqZJs!Q8X$n5bHTrw{&!1!pIyVRPRGwTb zFADx+;~zi>0S>}8;y$P`sN{W4DeinAG6{~u-~mo^(7 zsw1X-wB2}O8v#Qw9z}?I<%i{#Rt`kn1fZ?2kIVM*4)oh7iCty^mN+1{vCWhYArdlV zwV|+Seg*S-jeRm$NB8$f!|ySyF48#I@QlDm!Iin+U8Fnu39<+UZn~PfP18QtP{>{?_=y&G2I+RC_F{lu#ddnwtkVXkD$T zl~Z3ojNBsgRcgx?O^`rdMd`j4`~3Ek9aa7M{__*v`r11!9W}x2{FeWl1t50?8ZCdn zRI|LBbkIz~v#>lLX%$!Tj`%xUoKoLM>e-=Apf2RGsuryIZS@n^U3mCV6e{rM zx$3b{IvR%5ESIQoSfElqaQ;=#e9q)j_BWkCW1&rs2pYY z;%X=&tI?tYX))7Lj?UdzLlG@@0}?kM+%dax|15xsy*V)XZ8)rLP&$&W%Lt&_{Qums zoVx7sJ?{+3#eeg`r0MVD$2LE?nbMd0Wev!DP!SV}mJ zhn3{(dv!g(r)hLwWOZj1Bl_6F6^`6P@#lFU`l1r?BWZx-e=&4kXy8(;c5L-5mw$yU z&NjQ`eHhuvlOVkt{vys9`B@WUy6A6tS<>`nw|!-;_r7 zt1D7xC;6acLhm@ezs#O8g3m#D@}2$@MbPf)Vc(V)#ko`r6rnfo{FWpEg z-ytD)kX-P% z1)miBrb=!DXdSi8`qh6VF_fn#;{%YhI&^0YUPnnUc}p$}vV*q`Cj7sg!5Gc`k5^Ep zvOA-EbI`WifX6M+gg(;rlLCB^HTa2**Ls6ycV?6Z-x3Dvg&A=85OE`fXG;*=5W;G` z6NIj-pAmvhisz_P-OIfzyt3&cb{pC%$l z5fZJz5TXMpYYWM}&}sysa>+uf zp!&h8e1!|dT<+?34AP;fUO6?&{qnq-RhF~l9XyC%?7l%syV!KJM{`MQQp?-TwVRTn({O?M^+v&;TiT*e z+w0y(;QHj~tK4T^Oa;tzwJ|2%cMhjOk7EwG+pG&bJpMcO#C| zGf=X6&Us4NPQnOt51-Cob$PgRp>%;r2DU&AV#ND}o`!!@9+?pJ<+VsX{fg=YmSVD! zaaAGx_~N5C04ut_X&>x|K^!oJ9GO5Ng-8D_`9;N{!BZHjv_(~*X9p6rhuLzc!h|_X zcNjCLHq;rYl@ZqQkLL-TqjNt3x8*)bdlc~iUnZ5%45V_G3pMs?U^S?syC|W0Zheo& zep?NA-_%O+ezitKvxneIiJYO0IP5>}$Ckk;GRmS^2!7CJ-Ii6ZmortXK>Y)GLJ`{O z4`+8wVS??|AQ`on)iLyBiC1T1&@Z|0hYm1vw9(q%bo-O1n{+Kq%FW$YQ!ifeLZ>Pi zpLYG~6Oc!Bk45o=ueU7!0jUP40V{q8m&v3b!1hVM4(BH_nmF6fiV_bnJmCc)DN*x? z8xS)H0?)qCS&2GouswGbjQ&1fyUcs^s)Ebk51>|p{y+|oyY%8W&wrue;~@u__#1#5 zFn#$ZJnT%7z)&$HFQ2KFh?X7vFg)4ZCtGhK`+|GAT>IfWQgB5|Bm-Tmam8w;E6RK0 z``HY@45W0ORG43VxbqcL1n_$R(N85h#(Ps#ysuSyYde6Gu{u~$48pn1cZ-`#w`U2Q zYhFWN8!;f(Y2LhrGG+2R;%LaRMQ4LFYz)%%(;X4-RZP2H{e95XE>7&~TM(S}qSD4r+Buo& zasczeFnBvRADfvVU4j-Tc8bg_?Gq)-$8Sc556OO-auMBM?p9QBeMIIlCaeEDG!Q5a ziABihF!5l~V7)WW5NCK5IzK{vd73EA)955WByX{@Qh7A5`c`eBU%{l(>UKASVsFGgw!iRsF48pjDY79TihsFdG2O{fcft*wpTT4u>^G^1 zDQA2@hJXLJHlgL*xY8Rh(s;*bOfS=M!EwB^OZoZQb@EE8toleWUwK+;4-!`rOObZu zbr=K1gg~?VzWFZOtj+`$qiXnG+Gp}#6bjjA%>nM{%2RyL`>IB&3m3cd^)W&hUqkX4KV@`~b}(%oiv^kCG(Mz#NNhpdIS2A1 z;)At8C~r#2c*jSnV*3?CCUfm>CvHWpc-|LD=rXq7^Uz zm0oGdH~cw_O=Z3f73wAm??v;3PSf*T=_1|G0tu=>*MsFYObVS;HDZIuC4Jb}%~q<; zK3#{&{$SUxIvt!lj1k^bb!`khQAEaUm|wv&lpc&BUhD#tKo=>v`sW&AnjtGkk;AJ2|Mi#+B7RlSIy-pRa5bze^NcPkrt4<>yz}oT zn*Ts0z*k!unuCHq`|Gy`6#fK!xs&m8GXimq8}hb@R|eFvtO8c@x($|v;5EH4jze0x zReZ%_{BYOIk7e*QIue@>k|n)o32a468Wz>rVZSYfLu=zf3Y?j?-wg3`V8W^vYB#)# zEEyd^u7T_HnT)0y2L5Ta5%{>-sf)Ok4y>A!@yEwabX9VMS2;Yih5i z2gD4Hl84T@rVoF)8GQVJ@eqR`@6PC;91b^&%Ame~;>sp|&*W77`a?Y6&NpTk&Qv34 zz7lt?YuK55wPvrv#>2#YMU;Rsx!zKstOc35-eq^^2&<3gZObsv6lxoxhe-9LSMS)N zB@?iN=7Cp9tzEUcn`}5SF0<rVC$#6jOoVq)(Ku!!N#m{^e;J&g)W)F<>|eSmjY%(p+jA%;c1wD zMGPBQL=E!NeB6FR#pyx7BBn!nVEMcT*o?q;&E~f)5U>Hpx9ZKxQx_e5S5MT&P=4X=~`{<)lUej_KH0Ba2UajA^>-kY}}?fu1Kvc4dtRWCYQ zN|M|?R9jPVhMnBc5oXREWtfEL0;E8#F$-@uJ>|9^)u*|UYYk=^>B$+hjCr5jogp<> zmdlaO{I02&@#)p8ZwQOr_eVQH>}I;Yg#6d+p=FZw-{{5ec_aH3GC~fD)>6*<`&HF$ zOxxbtlso=zT%62WUEMMkG=uWYvMhHyw>VN5l6=+zFNr`kq*FrLr|>7T&U=7 zqfdE#>q+_(YLGlbyhxsHO(Hi@Qg^Xvz`0cW*+HE0ZhO{H{2kn3CoMS$078 z#-6~Qn|0idft_L}d@!J#WC)A{ThjG;Bc=MWSfEU^Bn@}+{6P0Aem}9o)O4$TQhyST zz4E2QaDNBae*63l|GKyr5DZ|=FnVF05DuAM7l1X{jB(a4UYlqG&pH1ZE zk)Lun0(Bh(u7VDoeg)Gm@B|H8d zy_Fwli0P8TLQne39N*y$QhLC6Qhe1kwkwLh}qK*^~a?FvU3ZjjJW~i z=plGepJLt6Ywc;D@2hwhp)!9H_|tOPI*|mcyt#u6=m@A9K>pU-5(L$KBe@Bfey#ak z@;)N@jU!<|)gQB02~0pK5nrWi1qd%ktQ_C-l^3(FL!{=cJdc>(bg)PV3=F56LXTMM za#DPHuF(GH>Jo&_vRz3B8FUw?WAdzT2Fb4L+NDH*QPJjlmq_uF&tx04?+3R0D zHosi%x^}tVx>VymKl)cd-rm}jo@L|<1MfFKb6g>~`zZ@fmtU{??Jj-X{M|%?86o)d z&{ip-QZX8TjB`UKY~>EMHl@6fe;LTo;6^#RhIH3O^!+-l>dxhJW+S{)VC|C`;aQD! zCVSb|Wks(Hp0dWWv}-9nWARC$w^m&H=HEj+Kgen(77bgpCY79Ud&1!eGgJj#N*ya= z{t&dmYkWb#x_{j0nJ%rh0^#nmBz%E#z!9Nxh9Ub|5-;w?U;GL-w7TCkoVqwTTHX<* z;4+JSWS9|c{rGHK>RV-!oQH0sM;K*bANoB*%Rg`Bh$_>db+T)zWX9FEt%!Yu=-guX zR#xwD-Qv_&a{u&fS8T_uBi>w-pH-_FA=~RFJ%>}#YpRRBRET_J)f;$rc(sW!6clu< z^yXOTw7Y7l&{GU1urzP97L-C43GfF=$sxkZ8R&Iee>{wg{ujsI!o?M}!7fJvZC_oX zVy<#-&yyX1tRX9yz<@lALT)t?;HWqMD=x!~pR$2FwK8-RJ#!^T%yC2eSrH{Mohj|c?mV8y8vX;J>33dEFSi0Y{ zJqoquIP>R52Cv7)-{S{c1h$0a3}T7%MkLsBkvec@x~>W3x&Iv?f3^M!W&=g7V|cr` z@Du&2_}N0Sn69+3fiO{Z{iefB+KN>E5G$7Ur$EF$-Dn*Upcn0m3|={-fQv*1OSC?9 zfPGz@o_kK}05mb#$m|dM0h8bBz!q=XdeRNkF7a|S#akW3+o@cUy~8MtET(>g_9Lcs z&A^QNq;Zj>YM=gHo_(^1G%*38ek{PqVaGzziSMCLa>%YzTEFqII9jxSU0oWSIi4#B9VFbiuc<>>Y3n=LHI?*U+F1+ zcir@~eP?BX?2vD-U_fvSSZi)cv1Tk*;PU`?q(dQY!4!C&)=K^5-CjpO-al<)_YZ6w zU}rF=PNUpMWTTNVCiLsHAvj$I-HMgs^&tmekAQOILX}vhCzD)vZYn3(l8HZMr0)-{ zcma>O>cLL9*AoMA#gJspS-Ee4swVxFe{6TvnO@~oD9v0n>^gD4ENR^L;+W5#Hx_17 zZ0!baETVK6v7nPgd#_+GLOkz771|zM^$y~oPMm|=uc@AKGxl+8$bXWvcD@soyqtH_YU((+Muq&Bv0*g6)syl?HSNotRzhE`6e-B!D`@~%%pYxk?ch6yyIAV% z4xc~A_-87Z=wGWUSWfpxxXX}Yny_KLSDm%H4Meo^#~c@GCq0Om4a@_xSD+m^SDIEb z03CH)qy-Hlh`;zFHzz6V)`j(vdtKbmsD7zM6R+D!$6Hu<>AN-_Jh&*TGKP@yv0dk4 zR)IdysdoI*{-5j*@A=o2mw(s^0$2cf@Jcf@yxyG4=$Z2C>5k>K;$~STCY*!uqyXHw zvmA7D_cOeG!q>DBh6*p#ZU#cxL7h@b#c@-7}5}dU8e2C}Kg5fWKVIk}6afN%vQSxw2Ad$@I%?)MI!~tRxw|D3rk#E;G z!+#adR}At0Bt4sPSTXElnm;OOC6(L`0r5XEGMj?lxOp4IX2O00k~YDt>}4eryZ^Z_ z`Pt;aG4iyZ#qO}iqnX3T5a?RK=JiZ}1Yt#srn8oGt{?r;fHRv`^xDgFk}$l?#~yFQ z?PtI8>tb`j{K^s#;rwcmIm2{d?0W;}XhMW^$UgJMZ#orHFbX)p4!mzBM(H7**9C5p zA}S_ThKbq}@C2?7Nj89MrxWdhpmy4%}Q zz@+HpmnZ^D6efRgB_B-aj>9XsS~{1A6(8;qpe+~H<^k(d6Q~km){SPxIlFwv8OEds{9TvYb)upleGwjaD<464P9GU+%cPz z=-%3`8d#qCgZ)>lBetxDyd%Rr+7+eZ_5BhlpmJq-m=obPZ~AFyu=t|nS7AO={XFyw z%^yd{^NWgEtKrK;BW2Y>OTV9*F$8FB@9=4;`E425 z>=|CccJ=F2kW&Rkx0_X2fNn`gptz{qFhGps>p7D9Z(;~yFlQ;t7e2Yf8CNF%Ioyhs zw~%uKe|H}^++5(GGwY+PBEpHAN87&@UTfSf6YJIGAXji_qYx&KEtQwW?`H?&?z3P$ z;&ml?{lg9i$EdWN1g*F7e#rh44M+p`ZzJTb2Z#S7obK+I8aKxfPDAb7<+)}CoF80% z+}bd_;N99(+4`ba9~_mLj~KlA$*r1%e&)@tSRtzB#`{y~r%F`MB1{EH(xZ6s_anua zjgrztcy}J&;g&*yCe`TnM+7P%NoqdR4{r+c2%b!GK za1RbUvbH2dYm{q(M8tiwYQt~*7r9|2za7pu$6LUL2|v+r|Ixz@)#dd=pv0JuZsGsC zI@y9Na-UqJf?I0XHepLDJu_*0Wv}d!3BUd0zC7~Jrh>+WShas87mo%XoQ^?N*M9-3 zIIsuDX9-Eq{#VKIZY#M?_4BP7rOQCG_7U3%$ z(WpqRU=NB?8Ks}s3$)O|t|DpCE;R?ysQ1B_6-$I`%yVFV?W)utl_u1; z`s#B~0Nr5wl0YmSk&@wpz_aFR7T$Tmdm^vQ7o^S5*GyN9O&Ia*z5nX`tOd6RT1;*!ySb0OwH3!#c>redc z%)v}4pi0_bQ&Gs`=EarrLS{Uqptdk22)jx0MdHfBt9xMq-Qp>QdBkZO&y;y~o;?q2CvrnwODRn(z-2sIygvLNN^L}PMr$1F`IxZm862!MZq7< zdhlV2tV164MrPNm=N$*oq5l6QP=P4M``fBrY*czO3sh|p*!s4x8a3Jaxr>?DWqgHM@J^{s5msS8 zK90CnR@GE>eqKn6b%ad>Y(JKF5_|Aid8+4~n**KD8oMK#@fR>F&z9Lnp_96zGn#E- zNZTr1S1_v6rdD&#>B@feQM$yXx%uR@k9|R?gN1U{P*!mwQ%Tnh6J_)e8HEKZ@8-B~ zyPQU`%NZVpI~%S%L5a33Pl;XnTOa_hy6Jb_roYtbOgR=0i1N$^Sld+cfzdzxs=x~4 z+ItQUqr~Y}?e8C3<+cD*>9 zFWh%=r6y!h-QwWZz^-s48+@+_x};f-zw5A~X<(*{aC2b0D5S5B`D%I1HWA+{B=Q{T z<}i|eB7D+8>@UBD*0Pfe2{WffZv}5F4P{`1dKVn6gcB=@|W& ztamCXUWlOqaQ*hK-QhW!>#x_+V5#G-o8fQPqnssyABxD$A=0I+T95+Z-^%{c_|Oma zu{Q9^2HU6L4dnZvv|0Fma}!giYVf9#4zFaRlT)wuY*_=__rhN_Gvh$!xeyx|$aMM8 z?5}aA0~;fL%Os|REJ~?5KMAzn5y!qF7{Y`@TMkXePYECq5NS48W8cIg;b$e_!9@1Q z_4EU88vv;s|d&8qQF7rJB#VN&Q%XT*;`UL0il)AS-VV}h%6d}9{O18|q-KPL!1RBc?h z47|)EFt7VXn8Q%1tXE4r{loytex0KWX$V`J4uf7KB03u-6-CyEd|SuN8n^Y5IF zRiYd4*W8;}vAuYGk!Rt3Nb{RdKzZS%Q2ZA^{ZG)Y3I+A#*dy@+f9%A+%YKdb%oYI* znGmk0mhz})R7fd-Gn0t9yfc|K7??Ote8gBdcs244=jLjR)AJzxA!#*Mbo(|f#(An=%& zR7|fZIM#kMfv6wlSr(J1WoY>}7q_t9Pe<|*;X&S|8_%)Rz+T;K_iSFTmGcu(9ZRiF z{kek2N#3THYSU!%PP543k~?*OYe)vQ=j`^C6`}rRJR^z&2=9F^Ui+U`kEz`_JRsDwqu0>?XmvEoU*n^m*df*h^;xJS)S`~%wB|y z>gw)zCR06~nI~yj9^kZihRg=Shnp`K&vb_1M-?WHh0r8(I1wa6`t_s1CKtl1!TCn? z7DS$Oiw48@Yxp>e$)?XA>4d%)g5k4HYKbgDVWI`9|A*;6|4W;`YX0d93|4xzBFq%H zJj#N)X!6%r;KY%}pVg^(ZbxXdQf7*FRJ=pAH5aF zInzsBV*)Vvzv^=)_!=NkBq725VCJaE#qXQ)Tu&$Rg8XUCu&dXZ)_>FaabJkHwgO6< zcrdbU>39dO^gjmt$*mJ#dCL9M0YFj#qzSI3jE>w(+r%%Wt5v9fw}7cKm~gJfK#QP`ZcnveSGuon;@!US$V==W{GBbUs)aIYT;J8GMtx zSsA?Z0&IGmw`l`C-PeCX>0kY6!hApBM*OS+cfR^s;M?VRDKCQaZ)(4Qg6m-ApTj${ z0l$g#X?j{<=?mVfDpNANh`lF#WLU1kQJ&`LdOC%tP+O;=pLig{mJi?+gs@(K^4{23 z48NV`w+dn9Ev_SyK~|Rs&Xt=k+cjzQ(juoykJaMSDvSZ|v=cjsnN zg<3%jyMcdM3JO<;L`5}l{_pIzjK*ofZZ7aotd zHgwp|(~;dIxKS{EtTJ$9#z(*SNQLPILj7MI`Tr0nT@oKD|*RytVLCdeC&0z zjkH5xnn%;_{;|5L;>RS{Fwr{GjP3MvWXjCZS}n3-aaJ}V_oD5^1HP_s^A})EYgF6b zsd>Th)UsA$PYuBEkSZi-+t=GPo*`*anNtCWQD-K#US0*<(x@%b9A4TtZLN#} z=N3Dt)E_8yC9|LT3kD%t-^6XTeb2W~?olS6phlf0#Go4Tgw)0s-$tRBj##<7W+|-^r zWwjf2Fnqym#EySa<|W!=x9z_wSwbs*q{^wz{lb%FKsI*gfuKS)@K`E|q{xeBai5?W zMIJj+#umX`^qvZJ7psj;$@VN*0IrmM&v0z)eF+tIw+IvV;xaJ)8p(azzOvo~U;LiG zb~xdN?Pq@CzlS-;bhTMu(0(r%au8=#Nf=npj7ZpiyJBwe6i{v*uo4XWT=W4%iY(i( zKqjQGrg_5oTJP|pao1Ta5ZtU$rQa)$KD7xXq3LqLBFOnCZ)tr~RiEN0o42)+zVPSn zsCUjCi@GpyQ`=+=3KMsXnNCbBrj`8@3SFK)L^&F?FBq>_pJ^DI+# zP(k3`z=rszU;QSHost*5m%I9?Hw6wdFjW>kW2?V`lWh)kA~4dO6eZG1sDn6~BQV3a zJnjB*vWR;xrU-g`;}-!wD(<|wRptS0e*4vJuC($%Y73ZgSBZ^l0mb{wZ~INn(5@@;@JP=%*sFqp23Y@R9q;>Y<7Jk$GF|e+W)WV zCn59}($LVB%LCR!)wk^r@=MBeicw7Iuho~MLxR?ry+fHoDNS__lY+_l3*i6RR>-s?QQH;C!mJm&Dw3#NTbiVWc7 zUAwzm+dLrNv#Gsp%t774M3tq==FV$Q2TwV0v7%Dmu+egXctp&_Lfpf}5q#pWzR^hf z5j!B3Qe`jl2c!i>S672lN5ju~*OHXs-|AkrwN0-R%G&yQ5dT1~a4)>4ROCiUF$opz zugTevqSXQb^ytd>7w{Jz+oX$p=f{ezOhN;;mgn)sX3|vYhw4V&TM^CWL=~ z-N_!P0#Uj7Qdw<4D&k7d7!pD={l9Md|7XjoL=o47a5J0^p}gmS5O-c)89zccpiLff zT$HT#a+NddZoR$>VIsMFJjFfg;ynPC)9KfepgFJ=5T#*y^Bfe`+r5C_R57fnMr<_g zSmDe@*dUW8BirUan(!kHw_|>b$HRNr6@{zoo(|&icZ*h*zXLtlPHyew8A{;bZm&#R z=z7nl-pBwoFeUp$<)+C$uVJM;&=_5;cVsbY&`%u_7qYoux>xQpx0(ph2+Rq8^{53O3=g-YnYny z-~VFgK!K2OxGfOj)NLd{fhfk-yRHgfz$D|0lp042VI-JGs9S}Yt)CW2Jtqp{?Brxp zxR!Wlo%fUZ)4{PZdwk1V$M`u50Vt?%g}Tb4VHm&i0e6(vh#d~<&1)AIUl%#DR?Aw+ z53(l`&J-K?!B*|?LDI2_XwGm)RS>Y^l<{i|ADVskzJu16a zc~N}T#Mf|?Vle2$%f4o&y+&eAE@7kDEO3nrBlp^V^5=R}zVdl~#^9lv-}9o3>~?*H zA?%CdBG2o13Mo(bA8@Rv*?VDtiGcwEN#|$!ofFtZ;D*=yrrF<*s=mt)$Wa2Sm)vcK zyly?rr+V8V!=ML#AZ(iOBcAW-=hucVH1;V99{iF>LdsZHW?PeJGNK|PCH=7e-Xtk* zN=mnIU{*UbjG<{3o~CffCHPefNbvsj75~Mblmw7VhBc{!B>h{HIJ8f1yi@oQ%#PJr zcox|Y``PN(q{&z9@VZLnRKw9ckm@h94zv!v{2wBV%C-ZQNDTna6;Zw@>E{^WjZe1a z%7N)mpfo@38MkEw{f&Ji;&)8>rt)TH86dG6IUn*PT`B9v@P^`Zo{P(QH%Oib2B%)J z+-9e|!X2syul{58A~L38JF;&#vApZi+343v!30SNdQuZB>u=JpG%W9qD}Xd2k7a`BoP6Od3R*~ zFX|01hx!~RcegYD9Rw;o7gcADX?cvfcKQ!DZJmEh-QBa>e+-Nnti*plojVAnB&N&& zMws96*G)8Sz@A}yr;?;W5eZ8bQNnH|1lE^>r!`Md%RuifDfMV|_cH@_B5^I7Q+&nm zf33cGSvp;F_~@INdBzH+b`@rJqj6<8%o|*>*?r=DIWYdJ5JBaJXyb{u`Pkn5kv18~ zRdz>sPP+y&Y2V6`5K2U|X7{!*bR#6M#&>IflKaBYnw5vMoA6Umwo2Z#42?xkIxsyO zwPW({o+Ib5o7&o)Qg)J`;G#;GmT+wUnnsfC3cvvjC3KHVTgDUcxyXpWXVP_?h^Ei>LfB|83DE94+FRw>*-Ih9_>@;`b2$YiTIz zxBOwCT5@yxr)Wx0P0(ULPxu>HsSDY9GfKc{^!Jjp9RKP}CxQHAW9<2`y+rVXcf{SO zQWr)F9S&rflbrptpSZTouv~0bP%n^}niR_&)uh8Bk^62jw}cv*U0zx+3Vc+b=bp%S zYH2fy7Ld37tYb?|vy7`0|yLOfvEk)edbH_PNipZS_b#T zz_2du;6t|GnY{_WSL_DoI9BwTGIFn%NcYN z=ki^3)1{2Pc1;D@xy<%0Jjy@s?&6iUIEwZI$p1C?(M1R(GI!bOJ1L-@1gc#6*~R<_ zr4KS3sulA(s3XWYZoQAFXb~H4hP>WDCyHRL5AtmyHb6ari8>Ro1NK{mW$s@Uo{Q|% zI`toG2Gd4$X%Y+!hx}y)G!${%@c&F&03v+PZgV0ukcOD4+pbOi<=K7tJwNr=C70hU z_{`hhm1mF$F^}9Yd$xY(Q|b0cR8EkdA5C>#Ej9m=Ih{5Ol}qufc%IgG4?VQ~3D1Pt z6peMs)us=hx0 zrF=6w6NJt5b6U1CoY#?n;U6+3njz@P)yDQUEw`7i$m-|Sjd0w>YUaUgwu1-JCdPLK5>%TAZ~Gmh&*qUAJSd405ynH?!sZ&$)nLv*7k>Qsx{ zJvjEaL2ZoP@56snwQmRPK@g@T%aNnWjsIQWDI)jG2DZ7EbF9m2eVXuDVBX>RN!|ZY zUfFF=+-%%9JT2;e2Z9<&2$_#jfk3gI>*syMsV)Vcx*#GH{K;Vsh_zwjY@zEGG6c@H zVK6}s8hxTHV1x*M_{9r8+^XhW|N8E`DHtjD&6;_uaO_&$pFv5SW}+kzShlA@trv48 zi$@$L?uaJAy2dZ?RBU@<49377mlz>6V5N{9OCvePvB})EU00t}&eBcTElz$fy;gny z%zM+7Eh6d$4|38|@oDx3ZzWT>MgVPdxso3JWOJZ}Kd4nXR?#h(CwQlTUE(?oV|Zzu zzSzu@SEAmY`V(2qaU}4m{wY2_NEhLISPA3Iea~b%khF*y{H|gt(q&-d)ije>hVKX+ zQG}atUojr;EN3!QW&e#?Uu`-B@g~uGy{qB3C_MCEhV~POuyqM}{5|sU6Pttha=P^J z&NaX29zIo&<5o5jj-Vg32Qy9(QsIIT01F~k800T05)_y>hlzpywePywqG0+T^I0=> zJNQPu&Nm;(gXn`W!pTZGAr%yCxaK4|Wz6@8s)?Lwi{l1ZKz1f^+Iqjq6W?3Byp^yh zsZAr^lE@D~dE_RLN_4O6M1y8{S31U*OJ}Wg|-32V-bdOSyDYwWze%*M=(as;+*!56 z4|^Ey(GY1a`H9zCY(&nH``s(=KJm}b==^I4j^GFSy^|S<8iB9D2t*tm%Z~ZjQ};eF z0p6rqJhyTKDFt=Vr&jTUo&1nwd>ZEB8vBWYZfX>@2w-iZhp<`RT4f26L%0cyHV0W% zL0-V;Ex1K~MVph^<~NZlGC!WLe$x)#UzC!#v=f^Cmxcri$BSEOmz#mHe(5q1O(qFWd8ZD*;b+1B_%Oo;KgFzhOM@%e|Q&P&idJ8**cwkIs3| zZy8O6Fvjd}Y_hx5_(6i#K9Dfy1@)U#Tq@zvC|Q_gQ_2u|_Y+(^vX|T$t=JgIxg_cU zY*y{S&4_X}ZqtxXbjkgu2*f4>MfPa$PEndj+7@nt{VMU3!`D4yi-n(M=^?(dU+JqQ z#VPPRju;q2c2kzGm7xr+m;dz`SeJ+1xOe>@PhTC^bo;$8ieNC1QW&LzpdiiYdL9r2 zNnvy-4I?B*!&H=#frxa8BR3jGhe#=1BSteirJMcU_s;4$&V5e% zXq9Ps;k&`1AMRUcKWzWmf@g+>vkv~R!{L5(5ADm`{E5ngd-mUC z?}TLEX%o3Ec`cl+4U2dQ!@mW`xMH=J~h74V!D zzA0s4A#<^#I(fr(A~x)*_{b`Zg%_-}M4~t+jDay^t;cOF_+%y#cDaWOZ=)LqKU~ zx^8|vM~V6G)HYtY_vfD1|My;I8z&D`Mj9$MzQ92#6=WCwoT1@@L2_i! z*%qMmlJHFZ$y2Az-|Y=ozvOE_i=yw?mie}Q!v%_Ki6jLcyS#!|y==vx9VKwioaYot zwlLT(D&noUyPxyz^uv9FeDMwWhw9*WpHu@YIHbrVeukbc>bf$;Zl_Q%fZx7O&xrPL z!pE3Wu+C@&(nxr6F9LJDkK7U4z_XX9M`58FTBckgVpm$=ML76qv7u?s#XrN#`LC?6 z$~tp(w=#)005q8^9bA zbG>1mcPdV~R?J4(mjB;MHE|H=D0w&;ftOGDH~jlIAwGL=oCnZQzELcZw*cQ?{xjZ( zG|)sb1G-{1&Dri({~tq@=7o{ys0NaRSrIBxQdwe?3!+x49rfY=2W0cX~$H%Am;hvj1^(b)8a#@i}pAR1W`!k)^ zEedg%=JGQ}tWK%e{mf*#b^O5!Rgg%)5!1fSad-}>%m1f=>#Z)4r9HCEV@HaoM8-1b9uV)p zIlP>D2NXWT{+^~__U3y}P)Z~)dAE(~%u=^&O8R^7>9IPb4&CrZ`YMbJ2Hj_+2a|ci zp-Q?Qx)2Th-t|0^KYr%+-`pU&OAV_XKH-bg=1eYJ3}`K8X1*LDy}2HRL9Up@6gsHs z+kSef_+VpGUC)T}6mI9B>3@{z%lhbnp0U3sr0mSob%E?zXqW4Z#kZ0P>Ct4T`Uqo4 zTvI4J^Km0E2LVN(Sl8p-y+UmGBW^)MR81qXK5yM;qDXFVL1bgPIRu}Kb8Ho|+1!+7 z3qMbr=Cb=!DQ4_A_aJboWVRxZlbuDD^{CKreDqyMRm;b1 zU?!^mmUsdLV5iOcSjW)$vDd6!YYh#-K*K6)tK;{LEFu#io){K!)*?ZF90k z@0p$JX?@Yz3XTbU!L?(HpOuW?e(C0#7KK<9{`_GpDMn>o-a2J1I^*4TJWyrOI*%8b zQ3s~NJO75|h3wA@@K-8+z@;P?+eNP} zHf@_@z$b|RUXiC=0qunoXX8#A3(sWD@H z%-kLX^nuJoOw z@s#k=Z<$s5eUJ*`6a_)K2e5 zdcE=gUmIW=$K~P0SZ(mZMt-c=&-4Zjfpk3#GM?~Ehi*GU$Z=quLw>>saNfm?(}&0g8vaS=>#)QOYO!?Lj{}TLzeMooCKXAShFm^xr+v zyJZQ1iC>RSsCPN+zlI1>>hsB)mS^=G!s#_$1=3NJ6T`Ep96kp>6ghB|(R(={R7qj3 zm&&!$7bYuV#Gnb7mnUE5Dsev(u`6h3s((@Y(@hh%e?O$qm+aOI(v5N_{}<)-sCjo~ z2D!PwVP=}1B@n{EN&ZOj(x8{35XuSC`!>t8q#+Ttwi&yA>i>3nr{@vpd0P=!at9G5 zTc|^JkB)Oc)?Xj!Mvis$iIRG?UPFF!Fn}26nUeg4Z2i7{=JD${koC=D+7TlaK*dT- zNK};?JN4$et2FPIg?_pSIBiQsHHs^Z5-;nKK!)eA12TL%DUTN;@Qm**nbtnh?DIM1 zU3o4zFT4cJ_aNF*pyNkil|WB<-Yq6kDmAF8{)FO3wZT^crjK^Nz2;7>{@2UZ&;6h# z$CHBUpU>Z~LT=n`T+dRE(iXnp(NKD$g%ZlPXIHsO5%k8dS|p$EX8wA@XY?R##_e)s z4^6~;<%9G~h*fEM1S(p+!7z#%##z9&dYCJN6?_ddoc=Ji((OL@oo+BxV}^9ykKeAP z*QWTk(d75MT>r8DwDkA~YPZdTSKI1cS>F*ffkt9Bly{epp`8t*Cucj$6T(&Tjm*v? z(W7f?za&_$AqJ~bXl3VV*FKbA<0igTiij7{I*#AazK0(!4$6GjsyOE`iad#Pduy)J zT))-$K)KoW5h!ZXaX9d+uK9bOYSSYYqO**4tnuQ-izg-H%F* zSIj0zD;)yKgCEPX@xlD$>&s3Y&%vNTQ&SS9fwzGW(CgE0=CfQpejipHM4SJF%{P88 zN#VEov@y<~U%;Z9{F3oXUB?HGLoreC&pZ6Cj5Ay`jx%EJHrL0C1DIPJxJ|Uyu5Qp) zJ}S44cNZH#ZpGBq1}2o`TbUO&7K=+qoi2!(HeUU_$1|$s^4@#5c~`8*dfMv|h@<_> z^sv!;(3L0TYESH|Jng!ueu#?zU6RPz?X}TfA&bSp;8q@XFGLa?Del=xgVLH^etz8Q zyu(0szH*z5tgYy4WldklmD=oX>ToX6ApSb;F$rdU${}8)_t`n0-G>mY3b@4E{&Imx zq(?q}AoAdg$Q1>j2Cp@H8+<;c3DN9OZYH0zK`tUef~OAjC|JRk5&-5z@~J_ zmMU2MF8@aIFpbJJWui}*EH63y{kC3|3E?c{LITL}^ZNZj2hB~hJf1JbZdC_w7LP|4 z#WtCV3va9cuQ@3Lyi8Bz8v0|p4hYJDE9eWkn??xqcdp2{-z~Znze!3W!$H@YnG?wA zo)j12TdKyNQXyaYd`NrnC??524>kAAp1bp4*6jQD+h-V6_Q%p*_J@@{>0?J<)F{$7 zx6S_W@U&Q_T;prxKKaIWqQCMgd|JtRqCoPR(ttc%yFwx4W#x?ev5M7L>q6g@bd|*T z!_H5)gSu93A3*|5B1ou;%-}2uiKBwYz_!ayi$A1w!^gL#de)$9Y+7S+EcG}O{#Sz5 zA4~yv|E{uarA_-dIfLMV znM|tladKcm%rCBmw`5scO}5rj@Q?ACu2m)%ZBhOsQWvj=O^NIOy@I2;9I2rB9ar1+ zUlq`PirlTyG@4G%>0`LyHBmB;=8xf^pbIppUu2x&8n zu0WfjB2XK2bG?2{0Xo~8{&?J}>C{8nQ~$x-wupP-GH-wHrTTNn!?20&{7~1Fd=nVM zXq5K8A%Ao+@aP@T9Mf(hwCjr#>kORlV9l|B$dSM4>&Xc^u<67mXZ&m=?zN8fGwO|Y zpQ*j&O?r8^clw8M8e8*9<~t>moP3j>W*M33>$1RJC0Ks~8Jn9*qmT$E&`ReYFY@h$ zib6h$ffCcE+VJxkLVII;CSK_48n^Z~7pOalRgxK8Om5Fm-LGUWEOOVb7G8YBf3@f0 zQd$D3hZF{sVKs}lYnOo^Amd%{MN6f*!nXr`d5nfxE43B}lJmC~3Fj9C?Key2aV3!! z{u?bb#o+j>%VhB)Ayf@vwr67N)i?#w)#X^~@EexIfh+~P!9PW=ZGdB^=j7D*}$ zyje2(2J`l2$GckBJ-yD1b%v@4m0ZBn&0@B*!g*RTakBabk<`EuL}ofWo)^<&9`EJ~ zG)YJz&n<(L*>u{-epHWb|puKZbhr1*p3v#Nw8*8)f-O*L1Xw87y_AZTaM z*@V7+=#iNoKSt2(JH67c>dN=6l@;r6T{_q;n5CV=bemysQWakiq#}<-}J2S7J6F2H_ zRyo29r>k>>r`vC!j{9&n(ABOV%K97fPv#6n4IfzIiW zMB{brq3pEL+9_gd%M|K$-uOB;?us4$_?t*d0q^J-yf@%bz?vGr$5ThX$@93gfV`y)glOAWm5Ynz&RX0rd~$1!)(LQ# zARaxYipNRqtORxiR5QT0?BD+n(dHxFP?_hjec3Tm-F)T-QTIg9RH5 zi>eZtG+mJWkLtfW)~y6fvVI$RGAOto)Z8_sg?%VV{WCP8lEHOQ?kAmwau=z5yw`Y5v_V;ENsw&jEgM@i2>_l`1F*W!q z%+9OA-Sr{kg_0y+CNR3Qt-qpjppP*xxTgfXv1|d2XHYVQ`Xnnbh2UWje8>e9j;*eD zcOuR+c3-LTaXNgDo<8BhVi6uPwv0o%A+<`Kg$K&S<(j!-R3!>LB%2)XiCEu7{p=r+lht=Pwvt-3=31CH7$#g{6cC3+WKr!2f0 z)@{e}LM&+l2dco z1IPJIn>g~EfXhH>mpSN~H|Iddre`t`n-9zcf8MQ(ieZo2?rZE<4;0}QJ?xX4jL<+n zO+G+h2)whl`Dq*2b^LN{IYcR>{Zz?%W6N54kDutgGe7IKvb;C@YfYD70XCI=9A`Q4 zG_sUEj|b-*k$@{6`Kl;VV^__yRbi4-wU#L|Vu~tZ@7yT&^asu0!&0S2D*4h!emOk7 zp4e~zP8|rgfO8h%`WucW#X8WFE7;+o=m-rlsm@bjOJPZl2t5YH+9wsO^%F9Ey-5Lj)>eef`8a?4D1w8diyQ7ux?iC{El1goM zQMz_AL?~^0a#%u7ft@HCxo))b`}K)d*H^!vT2AT&tq4)dXSRfdp7w)_nS*p<1;1}w zC}ZP;%Wz&M6}S*Pp_f0qV<1G5NR*46pQbdN%*8D>G0-Hot%^_5=vlkn!htgCeIV21 z1yWrMV`~hzC%1VR~|bk<^&d=iYqIxICSkWhS-^R@E_#7 zV6d=l6FR^e-*p+=m_ngwlcyPrkhQ_&Iby?<8k426;wcG_gGHAd@|={T0(}sk;{z3s zaeqcMsZeL<(I&NHr*&KSSc$?yOelvXZnuTFT2{8;K6-QTZUmyb$9RJdX!**Q-Z)Z| z5~l$zpzQ3ziKd6^>qN1}15?cg%L%g!AT;m438EE0<6i|)zW?*!%DXIT@9pO*!jY>_ zvTTy_{Nc>^#5@QEsgd|6G1P_Gt7}!h4rM-nQd9gUz?|Ry&D2vD9KbPs$sQ%E>+Rf zuwSo}Gt8gb=;XaJ>=*23yJg1Ye9^d&`E8+2eK6+=_8|&RuJ&43MJ(#|FkSD zpd=y#kDstzYTL%0x|M!8kTEjV32Fa&xIej?StG)na`vfdNOAWUeUmvnUgZ?tXTkM{83WxQcc?8Ikm7cIv~8);jtqo!@uQaQs<%B6!E4S;zg?Y zB}5oqjW}|DB7f!T3oZa4#8mf^IX|qCmnS~?1{UN)Nzg8}F29^;X+sjd2MgXn7b6FM z!vk!F-zxQ@3G0suy2i4*mVAoI16%fu11qzHA77{|`m}OFF3h92F+=61s&TPFvt4}1}tJv~~cLA>g- zaz2CjnU;dJAa%cs2QhkC=dpqmEYZ0(?^^5!D#7^I?o-XeiEyncov`4>!hy8{6Mb3A zR%eKakTYS~GOHiasTys7{HpR(V5h&^&&s0C5Sd?Z=#MV&rD~%injanT+k_JtAPC^N z?%3TBnCp!Nkl|_STW4?c2mM6J2l-pkX}q~MXTY_f;k1H46kpuVU>1gD)|&xMk5;-kv|K#^(iqSVoDIao1TxK z6LYxB9^4$tJO?uj%V2AARM0;!?7Wuc zPpbL(v>gI%O%m#Gn=)~cja=z%7L7c>7wp`jY8`w4^dUW|)>73@UUw{aumGLQ;O z9AH_tu~QVX*I_w8a9Et7a5BK&MIa42Y(cn3ddW6IudrX3kO6tFJ^1NN_vy@YkEKmfg8i6(aMQlI_(HjB|I9@o7DZIxkOKXnab_ zMY0+`Zpo)16VJ5q?UTD(}<@2>X?W+LBqd>aE>3kr#v zb3_~>OO-yAu6>h`>3uym-E1D{dd`kUd@I5aj%+vutmE;EzlTRF)nSil^zg9pVN_xv zNMCBTm-^{#QOb>V(snW=QWU(otL5i$J}M3b zcPCu6Llww(l>MdfEQ;jAkpGS}@@Ye;f}jzoer&T5W=n)5%@Nr_(t-a1(f#%Z2T24l zO6wx{3Td3G{37ubPJUQGgX~9d+qmo@A4J>dN4&3n5~~Cs%p*7m+hq%{1^eT*YI1DW z5;nV+KMHesvcWqOV+h}r5DDUVQ!${-oNXA_XUf_cDXW)AQnZWHjrGvm1OIUj(N5+( z7aCD9_Y>YNa{n1Irg5*H{3%bWj_K|ysa8zzVUd$V*pIe1E>#)8vNp}mpb@f45s|`` z@-pSx)x#GOPF1ecZ=49MxBxx&qj6Xea(6ALz&NCNzmuuCTSpx?ncmDnSx-cZ)Nr zA4T#98U-b$U*hF`0re{cIrR?+1pqQ1-QOkV2rWi^UXD#5I*_Vnmv9fN`$9R9tBV8R zE0%YxFV;@-B_!(@W&GUSH)2@b3MGqvV3*=#j4(xdjLmsWGnsrY&bsZ9+(}8PS|UG3 z4QU50^|(Eq)c4C6@hsI!sUhNzAVeF`mcc{8SiH;lIBk03@BZ9`$aGKTpE6?Q9qs;W z6@GXaL8p1$uCb71!M{&XRbgKzuWW`m6v^8k6>x&x4s#%=FZ5O^bp{DSUvfWIZlNm3 zJ8-qix4Llh+c+(XUMYv?=gYOUnAJLb$MrF)RNF$!X^?3L2X@|iE2r$$p7h??(Vnco znR{Eg5p#$sZ4OR7c9;v5f_W6p_X&JJxJ#F!=62)hX6kvb&zhIqLQu@e?aT84qUgc9 z?T16n4=>&WCs(X{mH+c=0oYDw5{@BHZlcR~-{7$5+8o)NI(t)l=v}bntCK2pSHUqf z=bxc_!0PnhVu?BGu&?lc6<~pwUDfN0$Jd=&jWFqjAkEbsm>+c79xLUXV0jMn>Kv}( zT1$nyy;*IU)??_kNziE?$&$_n$H2U~uBnXZa&6$YGy3bZlWuA9q!ZvjS&-SZ*^x4q z3u%w~9UfQeDc}WH&F#xK3OeA9;1FY-f|G?)u|4VcNU8l>K}86d(|OY{VT4kYtyWfi z+tv4cV}Ev0J>*o(*^JG3LdyDtod!oBe2zdWuv3UuZX~%TQ>_^28TQbla9YLgCE-`- z(~&5rt(4mNQ1FYtm*Rht2ETv!xuqIL}y%L1qQNOLO ztn*Ihf_lH@ZaP=0`UWxpYPwNFd>Od4*;Hw9A~kwM4j|vVZ~*w|X>I|ZtZDmliE>{T zqgG?{e~%QHd!1vF`TSHY6Hv+R+Ba}Fa=F8q#(RWGMQ+iH?Y_>Z5)=NJ-4j&xn}S_% z>g+k;#1y>UcoY@_I~%m`03Vm3lEJnG<^K+?#V*bdezp8MjJ)7NWy-L3d(QI?!Fy7` zkWQLsYKWrU(e)04L=!CBFc=@dsNnC!%y;;VeZ+Ib&RwX054k=0&1mUP*XSGI^>bi^wyWF&y# zBm>}!LVo=cz7zoXntbW|GIzzfxTdivXS}BA>%Au^CG7x5p?I9S?L?@C z7qhsqsd~l;$OAW3##PDsbz5%a7etP}*<&2fGDT{;*jV1^y!O1sVZIFq3kk+cb))+2 zC^WE|?S80K4*?M{{qFbIJEL}7@GQKFF{<<_TSpF(-I@W+$m5Q@yTR_U-jzU_KR zIlEHVT(*08YQJmAM^{!STM=u>McUI(PxeytuwcYs`LdhZ*LWxQfO^XlRB>uHvsm|J zL1MN^EX?8PSv}{txFmn}VVme!p^dq)`q&WEhFf=G^`a^_9;fA3uVkr&uJQ2i3eN2~ zg{YQc#PQN6H4#d7Yt{t2A4B;b{w6%J!J~L_MFrr#bioIHCBZ~I6`h5hR>lDmBaXGm z5@HIWY+MDeeBBI=b;2kyJ2G<@P7_?Mggdi4bdq-!Q7N_uiM`Y;NhRFdkWNF@kF;_V zORaX5NOnBt8juJML+sb^iT`#iOa!o^ch>Pp`)yxCfF_x z*{|RJPkn`%SQymyy$G7gm_O_Ik_!M`;@R>1a#Ta?K8@=SO4c^#aoV!7AAbdWq%-_4%mFO)p zWqi|kVX|$A9IhHZwu``QcxXqsPU>e52b#4+FYfZ(BKQvS^oO!XC=HYhx)n*%%;9mJ z4!_Qfk#SI{e(h!>w+q9GI9M0X?BFtwLSwpppoNwMqhiBLf*Li^vve&hi zu;Yk3E?}3dNX|D>mh7?<$IQRgOG&D?Fm% zR7rctc9W;omauH^^4&BJIx0SJ2$*NtBgu2W3MYL=&SCgQq;a)Yd*pYqbgm~c^XC}^ zKM_Uie=Thhf0X5jRB3yPu_gcfO^)<5KaR{FJf;i+KuQ?^sq>eo!5a30uJbV+?!)1_Ox(SgDYkP*-WIKsi1;Y~8t#xD5g;nRk`JM@_7l9&EBd3uWhx)fT3R1h73 zCG>)EY{oT_vbCONaAE*}WUm0NfmEPyVrFa(wbbj`#|0o*_s_i#ERAVLuu20O{>_Ay%Uj!s0G5>cINtuP2a)^U^;IVK-ubosuQ*DojB@R($ z<_ds~diK0%J!kKyvHD7EJCZWy+R)x^9;ES0{%{}_S#$JCl>6WlOws|jH}N?# zDd!}qaqZz?<@MSmldc&B$8`Joj;5 z8P2Ce0l~pav5)o^$yfas9l(hBVtjhBY)F{mnEuP5P{;Xst4~}xwszO?sb`XP**@L9 zvU*7;Y{~tHuDL1)_zgeA-h#=ASHRxDga^9>jKv@4b;If_4)fJ4neXYSC>u+sV0?e1i&!MrmB6XC!$U6MlC&Ng zfLYhWlX_Fik6gQV{;pHHY|griWhf*0WI?rkVMkP{sL2NMNZoXyt&ZCT+0IdoyDkrA<7 zUSEJLDlpXPK>ErFDy!=VVI7q^I&9n?$Z2yE5d9lx3BZ)b^Ci-@S>W(?Zb+|UrjN+f zPtSaxv>~?vFT-Nuhi8X~wJ62dP|}(2$pzHlkj*Cll;XW7cqa$3{;_Hd)GSvIU1}i_ z4K77Rq6*$@VrOacBtBLct_lqF^;Ag}4X^$^rZotS6ARl55gJhArkLqrdg0Qhp-yVP zT<=asUY{tSu$Y~g=Qa%=!R@;d=m!B9L4@p%_}#2%Qx|~A>`#{t*#6Vj!paW{h|TGd zOd-jTOXls(SSmDniS#C*hXv571v&Mik!fd6Yv;3&FNa2Lrmi`zcB_p5akK}pMj@Ru zFPJW0X?RK{c>FZz@O)0rd%zZcfA@da&EhHpkw>ZZ9dHb;cRn8zbtcbbRPikCyh}Qh z9;yxI*0@x7ER6Dvup&m5MMc8{MMSg&2|8-8xfFsM`dvb%Twn)Q}s zO<|@BgwFg+RC&EUb4SL;Xg|aWge|OGbw`(1=AhI|j6sPAOa`Gq$@#*PZo2(Ju$}@% zL2zW_?kfT23Ap{%?({_||M9Ej+kyYVZ3=0^svMU9We5Pb8Z*BInRJT;`mDhL>+ zHtK$hQ@WE=@;de7vl?8E5f67 zcI*kiPj*Gc=rN*EdhCC5co->4 z9NQx!53+?CHU5^Jgb3j6mA|CX^>H1hFk`J70Op@(mppPnyFKHmPU(%<2=o#r=w3Lv zr2#1nNd7q^&S_M=+yRJXUs`20}6yrnzkKh z;2a-ccUGF_wB%U;vs8eEM8(#u0l8$)>14X^7~&a%(=x`{yD|6;_U#BC`644!2AaP4 z4n43S^ikYlQ-*zY&_k+VIusQ6+qvEB2sd_f-O)8h3dXVbkeP#7^-36bJh`OaLV;%h zl1{I|Qu%~h85$2z?YnsoLiJGA)&NaR4@}QHUMTy~mSJj!>emoKAG-B_4!=~X+lHWB zpMD1`fR*E6co+wZ)5d9ChmYq*#p1NXGIMeifX=bO44lW0ecZr=5Q`}8s#l#Ji!Xd*K_Q#uM^U zVu?czB1Wmn+IMYM-oTVZrL1eVA|l^vBr@T4!2p*YKBFYJF8`xddCX%@m`do}<>EM*!3_98N@+4)ak2Fk1}tkFrq^ zcP=_ld&4XKN24tOmUOQ<+6W*_3)J38nVUDNtKOwGtWa7O>MxOrMi{{f6AVo~>OL^A zRO(H%oU?^xi&GtTvC-WUL*S-`Pj5B_!N7M1hwU}#a%BXsPUNw7V4?epubC8b>a`1b z#b&xi7E~3)*7Xz~{w=>A)k{(`>M|*p>pNk}p?4R|6z*W5`EDRK<#}y>Oh!$9+r!-l z4Q8lB3v=P$28popA?7yh%}k3&8`l?`+e@BLtFQ(TZLdzw?vK7CRyZ-751HgUru~rY%ec0?> zW)=hgD3+(RZ?$C2Cza$nQGRS^B>Gh6xAF!ku@t*lHJI188P4RX&q{UvsRB7Z&dPQ; z@cPIBX%*n)*X#_T3&m*(WN4?;lHtBqZbO|0N?e0nMs8NxI8q|NJ3+oGWBN=_-(OzDGwFSg`MG)9V6#wz)n<(h^CM_FzjhA@Pqw(e+OD~HRjeg9VPiJ?Sam)qkv zfP$Omr(XCK`H#OyKbf+-&GffA@HFxY?Cf6qmn0xW9?|sq5?$gZG&NMsj+6X%O>v8U z{a7<;=WqfUyiNthJ-D&!F;)?2jQFLV<0*TPJy^+_CLO^vSv3aKr;=P*L`=7iq*G}D z5A9p+_|=hMm`pKFDFnjuHiL{Gci`q>k-xb7l^M(@+mui^B=l7SioQ^*topvSg` z`wJ$3O>bxgC{#>z>&QI&;2+n_jqY;xrQcW#z{*P}9J(8$G=VTFIIRR*jOM*4%M3iz zH{bcXMMwJ|9X1dEg5&+Wmn?sQMTl4wgYV>`pH#2h#7l|AyVg0Uz;+$f%KQvWzH3j8 zrb%J3rwN~yG&CwSo9(gA?()+Li;m=Pkr|}%47+Z-TWnT2OrDqrS{3zq$ML}w4mo93 zbdC5m$d||umuSlt(rIZQ|J!GL>f?ffBd@~B2gb7emdP^J)tX?olO-wBFDbSo#%w&7 z1n$tjO@>F)y^z@m6!2xV#>yR^}J+C^ol2t3f5iR_?WLtOX-e*OM&m6BtP{UfT#MHV-79wVbVeWnt833Q^2= z#daz9hnwo?25v5=NI+_Q7;v#kL;(dxOxIhkjg2rDWx)5eursu_QFGcbu|*y2)ozV5 z=PO;h37MA4+q`P*koKFv*BzfSyO!or6B>qO4?u7%HqNuZ&N{mMKPP`=u-MFspFp+e z;F@|DeJ$iz2xw4y6&o|#o1>64-eTvG z;2fBD%zjV(TE)F`(K?s&YDpIkbFGye9j;<*rL*P+4uG}$-pS&d;5`d{e==6lcG(( zIthC#tL7@t3ehE~X?W4@g{<@wR`F#v&L;AQ_2Kf0)sZ8WOrzvGDyWGweH(qU{^2YpPh&n{3mkZdlv+9Kfs?U5Q@U_+iIS|h zk}PLsnWU`-sHngsf_2KkJ~Kbbzv{qdJL~w4Ia3G&zNSU`Nl{V%-O(|CBtd0MCs=ve zWG`)|g9s?Y2sc~7aZv|6e~NHWbIZyMMa)3xDQqcvZE#Z*Wv~EJ9C-0}@sP5g_3;Q~M;su`O^ex{n9dgfrI(|2lMzuIV>b(m+JHOhO~kiw&c$-oRbWDXvc!D{{o zgByNS8Ub<0DYiMln{bobQXuXfdmjS~A^l>F|MUw{Xn6jAQrLl%VY|`6E|1~r9Rp4h z_2WimU-KO5N#66#2w8j50x%@j{>I>R;!BKnnZS8!sV7~P4kxl|Hhnm>^fz^R*?>4| z<-*dvFKs06#Zs>FD;YIcXyJjK(!2D3O?qGywx_nc7?xwq>Pu!bg~4gazszDsgT^PR zQyb46v-%?+>McBzN^~wb2zK_cQgeaxc_<3>7pXPl2sVLqybH|^mmy<-tk}lMVn-ax zQ5jI?CaofXHX)RnLe%Q{&sn{Cxy$#_Yte0IK#IgYgwR!XU)IVU4D`4wj{BO-P+Uvl z{SM3Ae@pz28f-+>^WMxU`);fzC^nZlOb5v>Az#(I^uYAliIpqR5!9M${u~oW+vx}A z)548UQ%jJ$3@$jrYCcR1}6St#K8wf5ja+m0ZCIdlHl&- z4Qt~DJ!1m2L}Ju&_7gEU4veJs01LGz2HVCiu_M{LN{|Y!hYKk8jc2!v-5sS&@faJ& zuX9JGk6_J%J>ftJ<#9tO%Au$OefXmM2}OaKXk-_2FTgJ>0j3C$D%VW1y0Bi@G6!G` z(WT?=#8w+#f1Z;ur^w!^tQqq98YWz!)r@_`W2M&9`of8sHlMZ5%a+K5*j>;(T@n8b z0#wOm%?P+JowE_QfNZIFHR*t8_~n$gp3h#=YobrXIjP6Q*^R5y(*px$w*!1@n`Q&g zxT|)m@Ps~rGlKel$Uou9XWOC@S%+sU1z6=tBZO&94`$VYr*9;Ho?I){YJ4cgYHeT} z=!$!wD$uwqxMTmDM~3=o=@5- z{CkHk|2w8>-UP3Q;$l1S#?=?V%+(90%(s!$74^#fp|@M@bNd|Z@Qu9sy!OoKX&49G zdOI&km)$aUL9ZPsqYSfxdR6(?bv_zeHb8l zUwlB){f{*8f5vNud{;A#cmtQb;vpq%sy95Z$+8rYb(-co()6t;!&s_5gDuMM==gMh z&A;5*!F>r5k{`cvnA#IM`k+?_E>aG+SvaoQu&Un$tXa%!mIkNfpVTck9Oy$lM-L8K z#8Qi*6$LJvg)l$U{Ex&heWKf-R7=C(@?+vHeIv2vw>QDObzd*NdQ3kpP_g%C{xqaD z0C>)r^|gy6|09ii8)Y{7YWenh{uR<9dp+1xz7vyTaCq-<7Qx8}H4EewT9A1>!y;Hk zIoYBTJh(`_m^^1A;^G5-d!g$c|H{;2E8qe=x~5DC00ry2C%fj8T{X$hld1Z_|BCfs zf9FY2nvg3nlEiM0pw6wUR};yu2d^bOmhxUsuQgaTS~d+F!42aOe871&|A+YPPHY?9;S~sq(0q4OA56NdvZK|so-ji>^f66_5<2# z34Ib~?ETIn4y@cGi|%P99dkO*RKR@@Yt@0Sf@B@rmq_a?2bj>-7T5O`fNdUhG z@qpQS9+DsC-De%L9tlp~P1;R{6Jz#c-LPD7cUx|Z@7+DoLm@NJFY4*F^`V927p53M z6EHMC_o#?dftt;|7*3DBPLI@H9l|X(kHp%&kn8H(6BT;(x=qqJIZ9Q5@H09JZNl;r zkWWSZq%Z9sd-eiSk`)g~8mt)J944|@?Ce6*{t$v+QX9g{Uudni~we)764FkO#bsd)GV`E7oHgG?6m?1SZ8G2|nD|GUumhL2S33uE8C>uHQMmTypS^`uQf+eig!jv8%Z@d_E5QYfn7Pkk zH?pDC9U3pD9dhf<*z{ z?lc9iY{m1o2hJ^0R7YiNe%<9Z>i4L2*@EphQds{SCghQySr7Vg^?q{a5>;Q}3%35A zd#vLW9^LNC5+Hn{hZpd9Dak9VZ>6eK*wk)B>oUwMXP|cjjc< zt}UxmqIxd;=?IWVruf=of4dWpgjxrkrLQB;hl6>FUjK^74^QPwPqBps z+dc|zC|vjb)N9b{YVY!=$=qi}M-HXJ@;8;q&xixmFe*kC51hfvo4?K)mEISzh9zMq zHuL6JIdYv_QcFCt4X5uG2l~3Bq-R_>q%Kku5iB#3oJqUGoj3fg$?iEuT9P&lCoFd{ zkY~u1V(am-g>v!H#o)0dLfVnP49$o{pt;3PR2)!}(DYl{%>T>y04xVooX5HW@@b{a z_L%RSFSU8COEj3KgykV41~42kQ`e|NM2n)|^v>zX%+0dxJ=W7x*d9wcG_3`M^y84r z4n?8hN0hrMVV&fFtv~Q04Ln`^^Rl_&%GHr8fSQARCQZ7O0#R?f01NE^#QuzN*MUx5 ztRI@#g@DI6BCVaA_nb)9*3u7(lUXP56K~iHacV(f6^5wHXyW|hbn;c4J~klpwPk2( zqJ%~l!tU2hl_B};kt1_n^z(8Bg^hgxB;Q9?=9eCU0ZF&=cwiR)p3?M2hQbLC~ zf;1^AO`53m1c(rd)L;Wqiu5K#h=34^2%#y(p?9gF_Z}fYsDUKEJK%iQcdh3YJZp`A z;iBZ6bMJEPYhQaGx9Eh)Y^=)q^3TeXof{r5+AJt@RFS~rRbM}Q(wHxOu({$;7wmQP zoo;-kt*BYER@OI( z6LDwd2ii0zTV8sz>|BYv%C|@W{qsjpc#yX5$oeOeP7FKJ0%^Ag0e$ROUNEstEDHM~ zrC^HAZ99$YfD@cWpYr%tGCEt>w#Ri>&_^bRw(STyMg!NBo3EI1Ipub^_~MDc=Zl@b zm0t*3HamzR^RgkTlN@V=;X6 z=A%uNidrF&hFkqw;U^11C0q6Uq85khcFV#NB01@4dmHTs-($)*yDf%%@j2o-Q1K9M zowYj%60Iws{pe?dXt?k%s;J4s-wJp~v@t!}NY1xy4dtrtxUci~#!wkL*@~22@w+j) zXx;S9vZ82rVFCI8L0S{8M(^sF9|?!Sw2iUFhFDv{q8TIRsM7a?y_lYXr+DcWgW2t4 zt(V>WQip)VJVlK`{bgL@t0><#A^K>$eWfWN^7_0MZ3KJ2-d|2OxF4H5`yQ@73U9%L zDTLm(zqBOD<~G$YBe{w+o-eq76JeC@TiF4*_f3t$+I-I@dRdIys^M|i|?#hXnl@>ZgbFkecgN`bLAHo4x3B2 z8Pxyyv`wJl>tVaQk9+#pHf!uMW-5o`GT5nPy_Cqy?NR~hwa#PRTet(o`;;15>6BrA!h*n*vX2PwAIPHM3zNw;8D$fU zVr!tta@KBp*%@SD!iNy$nbErNoE?IxOnGiiGeS^Sca$&v;8#!YD9}r1l&PTk4+(UM z1I^thGP!rs>F)6~+rq`^eJ!e)C1^CELxf(<08a(gmOWi;nl8lgHUs*xKYo>~i{GB;G7OHN*_=)7y(;_$g?knt4+I?vdaUKId}ncB4nOnm4|( z*KVS~S%tUIjeJ@t6WBW@?DLo2_wIPJPu%^Ou(BNS`2IrY+p|z{!&WF93c8VX09T0# z@IAz;cW0l?OhL5Oh39v^c~hP{QJxtaf^D_Li#B^wXqWETrz@!?G~uUGcJJ9=su1L( z4H#TIX<;6gi%Z@NB-}NJ_fj`Wyt{X_(2>QA#f8cbOr*=FC+a3NO4K@eAU_U=khkQgNF)97oCXj4qz_Mik@z@#{;)DyXCX`n zCwEa*-ilT|l~}gB89?mQSC*zHwS_5%h-VoiBpF`UJJcs*WtbhH^(qAGnmV=O8FE{Z zFs2R;xNzo+c7_fUliX7cmA~~%p7cb9m84%4S;1%5+;A~PND>mkNQ2@M?fi*&=MX@R z`(T2vW9$$P(c|6gygONJA3(>_s{nFM1e+6DBpBN5FHLYVFiIq6DNJ;UJWh*g{h_&X zyGuQHc$C0D4?Cn{;?1lhf{HmNL&^o#*u-$dP%Tb%7H+tv^+N`V${fEZQE}8{@ zIbTAzYgg&;n(=UIee2VqXJOLX@vM_Qo@c9o?t@)vW}+W8`4UKKEeFj_gv|oXanL2W zDQAgS$c|UM!OxD6GBa;g2!-c5w)3ZKkZO51|AJi8z*A@o{Ig!##5u^Rm1*%Ca3J8Y z^$ZF2&SR8Z)x{p++dp5iHw#*G+K5_O#}r9|iF4T|3CBgbTl?^O=)8z`$fZz6ZEBcO z#l8C1GzY2ZfwDqEOa>_m519vkRgg2l=j ziQAwCxv&0kXIR1T&`)t-1&zQ8Fv5+PPU1i6a$@uz)_iVTgPE4$45i)Oa#JoM{xP!U zf%WxN9&t`FO_N2GqsXGt0C-n+z?08%P)JBw0X8adJQ3ws!dwV(xm9E)w=KaEvEC+E z-y2;=z}-{5Yq@~3Z6=QcvNdk-q;Norv067r8f)eu1sa?%-Ue2TSCQeiy>1I$j+FGh8 zI=!h+)PhIVvQnM__9g3Aa#DFY7?^Ok`?i|-Q+rS@;kH7>67oTCQ^~bOKBvq7%*EYs zGBEF#oEBe{HYOM2dS|_gY_;FFeouUAARp=Fge_<7o>@pz&2+Zg?f2 zJUik8?x<8KNR@>^8L}|HLxJM%4G?qxFh@4!^jtz+I~x(^!$VD#Y+9G1DuhbSwptrl z7(s;cv!j%}%`?j`6=z3P-^FcQ2G7FtjG+LaA{Ooqd!dh89@Yi=N3{$~-;jiMQJxHV zK=FJ}rdm3B2N3b2!Sn35_KGUQK#=2bks>C4f~X(luS$bi+^0Xx(~W!o{nN?e5=+d7 zQ+#?W8ESHFW(Gyr@Pd*|3wg%WW>E3)Y4nU9GV-33eSe2EUWBW5&@giG^n`<;?&4R7 zT!mV-u3lR@4R_m6x%ypZzzZNm>+bF0un^BOQI)P9)0<s=oAkd%-~wz@K8}U|lU6 z(i`PV?fq;dH1ey>o@NkDj5~5P z2z2@rEVT>T8k)#%gBskZMEY1vHtd`NSN+Kr^ps5*hp@Va+`NyvJg19cI`%W@B}jz| zOj-J9L(lNhqC5*_Pb-_|Ryii>W97ouU|sy;NGH9b6#g|sfgpj9Jkd}U<$TkNj1E&& z^hi7CH-ILDV{I(F#z2l82gv*oxzLy60~WB7Z}iR4jWCS!x3%BwKrQ3mu0EDL_xs*V z(>6@KeA_-{9XE$&Wty&JX|;VV>t`pEf08qLdEJ+~pCwhw7taX^ycn3Zfkf$<)vM?c zzQ8S_aEe7{-_qr;YeE-g4QAE)?Kz_+S@Vj%g61y720Hc3$D6}({;C5{dG>pP(tRZ# z^LH(3QZ66FgFo1|bF24anrBTe#&2}2-m46opft9iagXKQ3){ICYW&}UElO;Tj%Dt+ z^Ea&nN7_Mkc1M=fvCO0IH48LhJAiR`4I$`pLjGVj9N)kO;XLzUyqrEsh9#72Q&4-9z>(BDi*_%bB0XEFR*x{ck8b6@vG6 z-(W%0MCfz&No~6L*!{}SFqnyb(=`88hZzX7+lSVER@!Q;vs{cGWGQ{!!G|=VnJBjC zUc_G+9EO!x+tM#M4b&&Lg+Qmo-b$C(zAQ_iq1?_!ln&O(MR?S*^V9U0Q zq!WDd68mFBuxG~gp47mWGuY0{1Q-xw%t9%AfU&Uq=Yfh+6nRR%w}7Q8nv*AT%6q6* zikiC>!HO~u`eqxg(4gZP0zj0Zwbqc`B~aDQ2+)-%x9y{dj&qYh(6m-l*HM9yC#`He zQCtEDUSh~rBb%5vurs#wgYOY#uw~^K<6z!$fP;C)Y_9|khMO65vm!e0tU-VVrhvWEA<2zDd021-CkO0EG7vG&8b4)hSCg~S_E3bTFJ}F)Y zF1_E(%)SIPwRv_Ec2a=dQ8*;INJpjZa(NBQ^)qYdu^)s;uT8dU)sDt^-sQWA8p=@I z{bx9lt5um+CyYmb(nA)2DMDgpI-C^qoKHb#VZh{lh(ZPVPN9-X}%%YrW(Xs+Ce^+ead27Mhv__{7xBJ+2E8 z+iMwN6fsazb~QM_g-g|S_xLfYxzuzoe68l(_A*2RkaeKV%Q$GDT+zUe9>%rcb?31G$qfdR?Ir0=G;0E>5q=+%iH4$UpEyr$r;@TwXSPf z!*0D+Ojegrvt#=37ftdw^wbjI9)~u{7A(`){1*o7SI3R*F$DF(m(foOAx#vs?DWYl zIc6FnCdDc+E>26B0gf zgFdO9&abLWqRhRF9n7SnayddXYx_;6ILU~XN86Tb%8tjdCk|u3vEr9Tqv{Kt%jKFm z1~}9L1X`SJ5gF-RA>z}eS71#8%ixWdnav2h59?WZSQ6pVn=6K%3|!ZuChs^T-aS2u z$~|UmdM5BmF``AvPo;J4yO z?bhf!X`4+NtyWX8;(C9(^>t8E#b$r8;;)1#lM1h}DVEYV=*w?2U;>5viG7SB=)Y2_MAp;?w{;2- zOE6OxWCwBRl|%M)vedzzZc@nldNYQyOg=aqTwI?n4PNkPnPWvQT_}{M=US%b+|es* zerk=EGPAiV5!8ERBd+*$4y&&A=is=O2gkn|Iya>C@6q3S!%cy8F|(b^Gcr*6<4ckF z@pmnOj3&wPYq%)yUJX59b=FDjYR!RRmf?`MzSj-=%h2W0T|`X+pxQ=Wz7@?B@ojgy zQ|VeZH|#h69FW~UgCdyoJaPM6_fsAaOD*afg(N|$@C1k)W(=>)B4lW1HFax;9S%j7 zsnrilJtV@e1f8tO>iYPrn{~(4w^gIjYpi9LC`b_N=ru^E>3EnlkUu4zDtgH;>8fL( zds1WKJzu_C<51t~&%4#x0Utv6{?=Yt2x^yzEdf0Zfv{@V(`WR*+8JUq$}i)t@=Xw- z3Giwih7zjW4_Bh1;nhS%%Gy}Wj9-dQe3fkry?HhIP%1jExz^t`$EcC7kkTNze zc0KP2g{S{*G1yeGp3?K)ncKr8c)<=`zZuZH(>ac)3AL(=DEl4& z*MN3E%nQ%kDITB#L6%3(>58Kwi+c5;7U*2xUj2ow6}-`c6M8LflIMUJAwx$CAd#e= z1{=4}>4lbRi+JZbZg3l-_W_0hwBTMFF3D5F=e9s&sf!&n>hmMF1<;YDZ!G3{&3G&z ze+TvFAgCihT5(+23+m!@i>r7{H&IV5OWZYDBmVn1Jt{DXJ{Q!w9vB=7+fCve7&=m? z?(kKoFL8Lt!Hb+%RI8rd^HT}&+0Dl$)YsEaa8&y`xTP5>`s3gN=#nQ(pd%7ukq4%z z+SNLP@~eE_o+DHx-#FKGZ2*@Q@U9%8394@@@~lwh&f<`2pLQ-*FYa-G>`yAoS{ z(ANN?x@|!%GV-10VuzV-F`x(2A*a*8qPT>*YL0?JCJE42%0d(lFGb|7p`091y_NP4 zLnhv9wFR5=hO~cbRu(ZJ6@9C|dXh6KBKx+EszP=`OJzcqc!D2e-CY-sWH zz?s8Ch?Y_*v%fL}TH<=%7li=FC{4t{aS@}?KZR!>cZc3DF3tBd)}sX`av@^fhCX8- zxoYnA%6av_aLV|*Kd;k4?&~$eO0-|5IwajPwLcozIYjPqSKKIfV%jN3?=Ji}IU&Dg z_m1sjhK6fb&TG0lp(#7J<%lY`S3jO2R}bH7-lc?tnJe#{x|4G542^*FWUM3YVJ}`RmL$WogNA`E5eF-A%=-4@?afS**=p&o|R>d!) zP<`vUti-Kj8O2MUH95A(4l(Pq{(gi}HKZJCYMW;(8UuQZM_5C4O(R7o?V6R9^Gee% zH(vlVOuYg19IZJ4AN$%^;>B>`_G;X3J3ym1P0LHpJW6>mAM4Z7^U!{<TuI3uq|;@H{hsywT$hpt;v}v>O-J(yc(Z}_V|=;y@`iOQz1D4$3NIc+ch|PF8r$I~+n$@V+>79>o%jHf3aw=<{gF&(z>9Sy4vdGf*A?TQH?!}xI zOB#Csg0H8W#^<}U>DSM${<{-);Rd*LKJ3^Y^VTeIfy}2%qVtwl;0%VtGl^Q;)K7un z-EEnzA;xYN9j0|QnOodhDp^3F6=?sDRpS6%hf%r))%D0xpp1VU83J*%d z=b1-zi`3YI*(^|R7v*!@(X z>YJPnomO-^g*;S334O&YV5e=qtjYT9$qV427;^H4NdVj_fqex}h~}h}iIXi>!xd1E z+Cr0(x4ZywYRJX#vNmJCtAUXnh9b=%)^~37X^FeBMb~D41wC2R_jbo5V;mV=Y1N6U zEv`$a888=!Y~tw;+Ubz;rn$=F8-1TY$d;CF#07rfl2%jwsbkT5K%5uMgS6%Ay_vq{ zXgo zCV%3eOg0c#WsSV`42_j)2w$7dQ!;Y}e8`UnGFm zMzG7XUNjXgxIt2P$P()p6U*c$BcWE@j-_qdY8q~BC1flPJS|n4vhv}!+`*Xh1vb*s zxg!?>0)RB~lyc5wj(yv4Z-xj#$${J0JkvdreNO|GDXViFG>DfBm4PBukw-pxJRHbk z6B*H^9RdCr4^vF^pQP`k&Jp(SlXweW3y!A!NPZ$t18gJUILWei)x3aQQf^dRlj!|;sB-j#0ZWGDsNt*f=yxgnq5<@plx(ieVJorx=DHs=q*-X zfF@cGZ+szt{KpRMARcC@{LR5zq9Ai}-F!ufEnWo|4(YDh4FtI~1dM!wTner;+$YRC zcieb=N*ViWMF08?zEauH(BrmniKU=zBOa8;FK&LmZLQeaX$87_2%uf)iZ6dG(^#&P zs13-94H3?1LT%OlV3!zAj;ey7ZQXQ*o%#N(i~^kp zbfuw)%X8v%aIRmw3Jk`JRO!=+i0aD{HpI^SBD4BUyM|7=kSTdm&tJF{-XMKtW<%w) zqTJ*y7uKYyi3aF*cQlC4ihC<_7+vK*L3mzjKG&Cz-RjgiCsxcO-xRQz-5?hhni}q2 zsO|QGwYQh|d!hI8QgH>PIXP|P>t3}9rg&7q6px_82QYKF5Aykh@aSuLo3AOpGm}U~ z)Mjgj?vPynihe4#vjiE3x8Rb8j2w_rwt#RJr$-Wi-4r(Th)@KLQcE zWI)EnatWX4vxTR;Me2mqXo&zt0#G&XZ2?fbK^N2eF~it6W9;NTaqJ}t8X*(|75_p`z%eWqK+UB!8cTb{am$Ka#)Lf>Ko7P{?3jtt0KiZg@HXOIQla`gN48Ix=7ozL2Tg%hvzK6T3*ZyS zCxMi8BKHB$@j$-eVfif!G_}Wq*0}<0+nbF7S6F!fT<$0nZw6!1UWSc1jItn26YZo9 zn6@P;tT><3F{-<7qa@ab|M6x6^e(lEA7YSKTx|+D$fDayD2GXPR`?UCcXjLZpRi`l znM(2KaB|D;Ij|r_SB~kc&su_IfdbavsMm9f^_dM#fNkE06!P<~_cf8lui0KBe8;-t zBfGPqa*Wt^HQry9&L^z5Ie^M2PTo9P{Gq&1 z%-Y4u@Ry`VKQExW7Q5|Vr{O6Bh+vcS6nEnghXJ&m`af_xOwOIaL6`Fbk_?{$PqTX( zs;`lLGGHJ<6HiFctc{H(@&{LR2@#gv(q<}=Jk+5XnGm3c_MN@g8Ggh5MeXd%~A~;9T`S<$XAezNlM2P_Q$K3nBVdHuUH&cgFgrH`03R{k`#J& zm#|dD=E>q@YE5J!*EwOX@blpD5QpOGkOcYWVB>;B`%g2lmSkG9Ku$rVV@Rd7YfgZ> zv1M`sJ#~EY{ojQUasnEExx%ltF5FFNt&mV>BpG4LHU&vGU^o`Js8|?1$Vk-sbCwpj z>UDH|HX!$uIs>2B5Za^det}gA&ZM+*jMpr?&om+R8FV}{J5Wfv95AJmrgOTSctf$v3J3$Pv*^2feN2< z?##5tyt6H%nbq~w@h2jPpga!MQoGJruJKTQqJcaP`pkJDqLQwmt)-wxv9Et)OrWF; znS=;%l0h`?>sWV^KRna(lRzFWFYs7yM*1!qeScu`FNDBp0>?L4xi7o#PFrhv-d*55 zIj_6gJLc%s!cmrjqY$BfO@xe|#HeMRIBUE%E`KbLCIlJLx0$hM>2w=(88Z}+&V48^ zDURX>LN%9Y$nvL`DIK4Q_ezJd!z_R7(by;NDr1W znBjZ6zo%FRLZ35YS^)^1aP4{fZke?2jB*=>7F1x$WpU%r`!=nkwoDwUVg67#x=$+8N%6uxx&*}DI}%Tl zqa6V?c066MLV*qFzD_6Rh`iHv=#mi}gRTyFElzo|g|mYV4%y|NKAzl>5KzU9q91LJ zoe|$98JPcjvT5K_MR{ajFfKgi&KT<9GO6M&mX4J1a3L2c8~3(`ZP;L|#JA-##Os9K zT2VdI!$T!wLtg>80|TfsxT?$a?tzV@5b zB)?&llac9u(ZIx9uL?}iVeTX-HW^2Zp|Zlr-N!O?zc2j?Y8AGsGrQKAU{g6_JlnV4 zwxIFlFdn#zgjl^Y&RGTZ!EQ^&>VzY=_?x2dUo=6ZXLk_L=8ooyE8AXzxv1R={c407 zp!yg|62I~qZ1n_s`%(S~*I%H68_7O8p9(yMA;9vS5#kGU{`HHUhOI~-rBza2w%fe2 zaQI!=Y%w%qFqT5#&kU8=ZW3}UKEpx*wc3+WER}k2?9;i;ba5ydz=UBF9EqUc?BKp|wN*n+Tp)_V|Ro7@mB4d{XR}EKPUkZm(EN6!MHNO zo*u0_H|jD|L)3M2QZSs^^~qVxup>TzP0M1Kla(IawE;wE-cIbv=Ja?Zj!MoM)R}m{ ztqZlfn-ie*IYgOd(4{8=en~YiygRKr>?44xAjW{1j+)*&e$LCdx&C`22vlJPr5f`ZL~&E2lqT|=r;Dj=*G&iZs01i^aDK@$e%AA4|AVTEgSHx4CS zObg*<-tzn^gGC;&8=T6hB+9@Nh9yqnP}!wTt?ANq_yaAjVnh}mta5vXxMGb4v<=`r ze4NM@54OQf31A9V`Zv;7*5S_pP5pi2FLHQ1J@w?MN{N)OZASAPPt`YV3Ni%vedjPecfUE9izAB^ChoG_RD@^qhzA)eAMPOZU{fAj z;`mQ~(@)7LfB0=mAU+`(zQw5afS}lsTdgs0%f#@>95`y@$8x? z;+Y2m`Mn=ETF&-`k8AaIi2h1Jd(GTT-^*>zpJpmu1)m+fKAG@k^{|hXRO^{BV#jm2 zdeR-Or@Bg^J78A=(2H+3-uL|)#IQ%Y>AxCWq>BzXkhFgM_W zmjJ`rU?xu>k@q?o1Ald}I6GpV(*O0KK_sH))1kz$R~j)H0*bFXxSrw(Tx5sBhX>}k z5vv7lXZ}b+TrIH_ZmCHANk2yN8AgqKO#c_J8b&bHp@q6s_Yrn%701J^8MS2D{B0OU zzH}SBz?W!xzw*W~bH5p~3T4TA=Z{|r@OU)ReO4M!rT9rnE@hYk=L1yhuH@hbN5%)11qQ#dpg-QD zp;tmdxul2uiDC(^S~V2yAP7bGIOmk`zRY+gH^B4`T%$<`${Ryoc6& z4k_)tM$#(Y+3NWG05nQQVNPF+XB3}=>b3a809{5cxelNKqhN0M({3%UK7Twh6(1Z( zTl_p(t@QndbZ+2bx3A?uCJXv7PVg#Wt*1uS2x44v;0>>XBV8k<~fn_L)rNyG*8c76?mYQqq{hqT`E+ z?bEk%8rxec{|ORq*?}$*m2vz9_E3EzLtcSNh7Fr;ilP$0>K`0G*qC&8Gbrbd<9dB; z;0=mSY?MY;c%cRuURrdW2cS%OTOBxlj%u9|i+4y&PepP|Wx+Q(U(qH@9=aVy&yo+F zcYJYm-@_iwgHU-#E%X8dgK@pA;;39{wRq-*Ae*k=tjch7%!XyZl{+8Q<2--=K)g0A zH7R>17P*G4>hFFUL{9b?21NX`JG=~}mS9|-$oXjGgKOiZhc)(6pTz38yh;RH zs0ftKqU_5$Ix@B~{PKsX-%cR?n};ER|BtADa zAa$QtSDz&IEIUtaFV9$%wqs;ZP*7KgPnb5}C2o+02Km0zN1DBnUhPE8P8SD(YkwaZ zpwQJB6uS80AqKStRQjJe#*Vco_dQFg;9Z`d)bXz=fu|NryT@;bWo+iLQ{^Zs%7WRd zVX`rG34{ma!wM3g`u$oCO0r zT@SeeLrFExH#oney*J0&-C3$JJ_Oio&~6urhqk{(>;xxZ5yI4$_xn_#(f+*0w=@#| zSU#nAZp_&uxbV$jy`=i5@#SYl8(I`hr0T@>+-^kp33MOeK!@-LD9JZZ;?v(_) z^;+?vN~A~2Gcd_0b})f1mu_AkPOuMLo@&n9o!}X-ZORMrGLFmx z(925ppOZlI0hJUa4JO)2XGgTX6$H!Sfv8Z3jFdMAFpWFDD7fMI})0@=djZ^Rxx|;`mtU7jsrE)cw z0NZ5k@|@YO zGnIDP?cWU1McaIAfqWfrXn`IZY)YwLP@UbB+Dk+Dsh*Lq>ocX6L3eFA3y_*);VR^|u&+o%G(9h>H_ef@U ze$=;`Oe>ha&A%TLnV5~DK{U#mNORdwYQ0B%M&6CCW}!ehx}^8b&jm$2xo~{TO1nn1 zfGN~2wA{Jv__K|x%x;2UTHA!}?v@by*{E$`1QPTt5qbdgVi!#lX5CpQ{1UB!-9lHu zIDH;mr01<~vVievzm;;>1~hILu{e>TxDQ{e!S1!uFPXyow#Rw$*#!9XhLCl9r3V}} zDWQvWw8K}`P4=9B`oz+bdJk*4=53m{-P@73Gf#qS7wfGm1Ccn~ZfiJUw|iG~IRyXKzf3az`;r@D$ZLY8f5Oqbua}Y!c_JX}df_HZ?x-AsvdB<(9Z8zBI_>kaOlbiTInccNrEWEcjp z)w+x7I|h4sa||M|Tspdn3hO2xs#vI@3@8V35yNE4OT{>$&In6tkcrOITM|dsqSp|y zlJ6dUFT4=iQK1-(;pBl)l5%%+RcUS`)^1E6IvaM2UMYR7r#r@!^%8Vg4_9yU9S0He z4fXPQiZ_0Q2Ah8XwWbnAsg~_KRqB!d^G7ob2m%D#he3bt6&8l?s|NjMVf38Db_^x; zUf255@2%{#+H!ikZaI-&VX1$%vIn;u4%-cyt?xYeKbN|2BNE)z#lP70+AE z-TwQR_Pps&aISezSM9*^7RICfKfn3k2kkw@{~jH~F8(uy|BT^(G37sF_|F*rGlu`0 xMwt%%mnQy86aT|c{~5!7#_<1tV|Y&Iv%S{%hfenT1t#!M Date: Mon, 11 Mar 2024 17:12:54 +0800 Subject: [PATCH 06/59] [BUG][Zeta]job name Display error #6470 (#6471) --- .../seatunnel/engine/e2e/JobExecutionIT.java | 20 ++++++++ .../src/test/resources/valid_job_name.conf | 49 +++++++++++++++++++ .../job/ClientJobExecutionEnvironment.java | 3 +- .../parse/MultipleTableJobConfigParser.java | 3 +- 4 files changed, 73 insertions(+), 2 deletions(-) create mode 100644 seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/resources/valid_job_name.conf diff --git a/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/JobExecutionIT.java b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/JobExecutionIT.java index f67dd22385b..0d1a647ded8 100644 --- a/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/JobExecutionIT.java +++ b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/JobExecutionIT.java @@ -153,6 +153,26 @@ public void testGetErrorInfo() throws ExecutionException, InterruptedException { } } + @Test + public void testValidJobNameInJobConfig() throws ExecutionException, InterruptedException { + Common.setDeployMode(DeployMode.CLIENT); + String filePath = TestUtils.getResource("valid_job_name.conf"); + JobConfig jobConfig = new JobConfig(); + ClientConfig clientConfig = ConfigProvider.locateAndGetClientConfig(); + clientConfig.setClusterName(TestUtils.getClusterName("JobExecutionIT")); + try (SeaTunnelClient engineClient = new SeaTunnelClient(clientConfig)) { + ClientJobExecutionEnvironment jobExecutionEnv = + engineClient.createExecutionContext(filePath, jobConfig, SEATUNNEL_CONFIG); + final ClientJobProxy clientJobProxy = jobExecutionEnv.execute(); + CompletableFuture completableFuture = + CompletableFuture.supplyAsync(clientJobProxy::waitForJobComplete); + await().atMost(600000, TimeUnit.MILLISECONDS) + .untilAsserted(() -> Assertions.assertTrue(completableFuture.isDone())); + String value = engineClient.getJobClient().listJobStatus(false); + Assertions.assertTrue(value.contains("\"jobName\":\"valid_job_name\"")); + } + } + @Test public void testGetUnKnownJobID() { diff --git a/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/resources/valid_job_name.conf b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/resources/valid_job_name.conf new file mode 100644 index 00000000000..11e25cb0d72 --- /dev/null +++ b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/resources/valid_job_name.conf @@ -0,0 +1,49 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +###### +###### This config file is a demonstration of streaming processing in seatunnel config +###### + +env { + # You can set engine configuration here + job.mode = "BATCH" + job.name = "valid_job_name" + #execution.checkpoint.data-uri = "hdfs://localhost:9000/checkpoint" +} + +source { + # This is a example source plugin **only for test and demonstrate the feature source plugin** + FakeSource { + result_table_name = "fake" + parallelism = 4 + schema = { + fields { + name = "string" + age = "int" + } + } + } +} + +transform { +} + +sink { + console { + source_table_name="fake" + } +} \ No newline at end of file diff --git a/seatunnel-engine/seatunnel-engine-client/src/main/java/org/apache/seatunnel/engine/client/job/ClientJobExecutionEnvironment.java b/seatunnel-engine/seatunnel-engine-client/src/main/java/org/apache/seatunnel/engine/client/job/ClientJobExecutionEnvironment.java index 7d9561edd0f..8e0f0c689bd 100644 --- a/seatunnel-engine/seatunnel-engine-client/src/main/java/org/apache/seatunnel/engine/client/job/ClientJobExecutionEnvironment.java +++ b/seatunnel-engine/seatunnel-engine-client/src/main/java/org/apache/seatunnel/engine/client/job/ClientJobExecutionEnvironment.java @@ -153,12 +153,13 @@ private void uploadActionPluginJar(List actions, Set(jarUrls), new ArrayList<>(connectorJarIdentifiers)); diff --git a/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java index 137da2e0a17..0a32b0cf00c 100644 --- a/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java +++ b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java @@ -251,7 +251,8 @@ void addCommonPluginJarsToAction(Action action) { private void fillJobConfig() { jobConfig.getJobContext().setJobMode(envOptions.get(EnvCommonOptions.JOB_MODE)); if (StringUtils.isEmpty(jobConfig.getName()) - || jobConfig.getName().equals(Constants.LOGO)) { + || jobConfig.getName().equals(Constants.LOGO) + || jobConfig.getName().equals(EnvCommonOptions.JOB_NAME.defaultValue())) { jobConfig.setName(envOptions.get(EnvCommonOptions.JOB_NAME)); } envOptions From ca4a65fc00e9d7ada2c9840b250dae8204880d2d Mon Sep 17 00:00:00 2001 From: MoSence Date: Mon, 11 Mar 2024 18:04:07 +0800 Subject: [PATCH 07/59] feat: jdbc support copy in statement. (#6443) --- docs/en/connector-v2/sink/Jdbc.md | 7 + pom.xml | 6 + seatunnel-common/pom.xml | 5 +- .../connector-jdbc/pom.xml | 1 - .../seatunnel/jdbc/config/JdbcOptions.java | 6 + .../seatunnel/jdbc/config/JdbcSinkConfig.java | 2 + .../exception/JdbcConnectorErrorCode.java | 3 +- .../internal/JdbcOutputFormatBuilder.java | 25 ++- .../CopyManagerBatchStatementExecutor.java | 195 ++++++++++++++++++ .../internal/executor/CopyManagerProxy.java | 92 +++++++++ .../seatunnel/jdbc/JdbcPostgresIT.java | 18 +- ...bc_postgres_source_and_sink_copy_stmt.conf | 50 +++++ tools/dependencies/known-dependencies.txt | 1 + 13 files changed, 403 insertions(+), 8 deletions(-) create mode 100644 seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/executor/CopyManagerBatchStatementExecutor.java create mode 100644 seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/executor/CopyManagerProxy.java create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-3/src/test/resources/jdbc_postgres_source_and_sink_copy_stmt.conf diff --git a/docs/en/connector-v2/sink/Jdbc.md b/docs/en/connector-v2/sink/Jdbc.md index bfe49277eaa..f0b74414a4e 100644 --- a/docs/en/connector-v2/sink/Jdbc.md +++ b/docs/en/connector-v2/sink/Jdbc.md @@ -56,6 +56,7 @@ support `Xa transactions`. You can set `is_exactly_once=true` to enable it. | data_save_mode | Enum | No | APPEND_DATA | | custom_sql | String | No | - | | enable_upsert | Boolean | No | true | +| use_copy_statement | Boolean | No | false | ### driver [string] @@ -197,6 +198,12 @@ When data_save_mode selects CUSTOM_PROCESSING, you should fill in the CUSTOM_SQL Enable upsert by primary_keys exist, If the task has no key duplicate data, setting this parameter to `false` can speed up data import +### use_copy_statement [boolean] + +Use `COPY ${table} FROM STDIN` statement to import data. Only drivers with `getCopyAPI()` method connections are supported. e.g.: Postgresql driver `org.postgresql.Driver`. + +NOTICE: `MAP`, `ARRAY`, `ROW` types are not supported. + ## tips In the case of is_exactly_once = "true", Xa transactions are used. This requires database support, and some databases require some setup : diff --git a/pom.xml b/pom.xml index b10e63164a6..6c0bb7e719b 100644 --- a/pom.xml +++ b/pom.xml @@ -104,6 +104,7 @@ 3.5 2.11.0 4.4 + 1.10.0 3.3.0 1.8.0 provided @@ -329,6 +330,11 @@ commons-collections4 ${commons-collections4.version} + + org.apache.commons + commons-csv + ${commons-csv.version} + com.beust diff --git a/seatunnel-common/pom.xml b/seatunnel-common/pom.xml index d462be78ac0..218ec7dd9d5 100644 --- a/seatunnel-common/pom.xml +++ b/seatunnel-common/pom.xml @@ -44,6 +44,10 @@ org.apache.commons commons-collections4 + + org.apache.commons + commons-csv + org.apache.seatunnel @@ -63,7 +67,6 @@ commons-codec commons-codec - diff --git a/seatunnel-connectors-v2/connector-jdbc/pom.xml b/seatunnel-connectors-v2/connector-jdbc/pom.xml index 2f91ef60853..5880036c90f 100644 --- a/seatunnel-connectors-v2/connector-jdbc/pom.xml +++ b/seatunnel-connectors-v2/connector-jdbc/pom.xml @@ -192,7 +192,6 @@ - org.apache.seatunnel connector-common diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/config/JdbcOptions.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/config/JdbcOptions.java index 0493eb8e4f3..7f0ec48f365 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/config/JdbcOptions.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/config/JdbcOptions.java @@ -151,6 +151,12 @@ public interface JdbcOptions { .defaultValue(false) .withDescription("support upsert by insert only"); + Option USE_COPY_STATEMENT = + Options.key("use_copy_statement") + .booleanType() + .defaultValue(false) + .withDescription("support copy in statement (postgresql)"); + /** source config */ Option PARTITION_COLUMN = Options.key("partition_column") diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/config/JdbcSinkConfig.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/config/JdbcSinkConfig.java index 874eb807f33..8860703ca43 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/config/JdbcSinkConfig.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/config/JdbcSinkConfig.java @@ -43,6 +43,7 @@ public class JdbcSinkConfig implements Serializable { private boolean enableUpsert; @Builder.Default private boolean isPrimaryKeyUpdated = true; private boolean supportUpsertByInsertOnly; + private boolean useCopyStatement; public static JdbcSinkConfig of(ReadonlyConfig config) { JdbcSinkConfigBuilder builder = JdbcSinkConfig.builder(); @@ -55,6 +56,7 @@ public static JdbcSinkConfig of(ReadonlyConfig config) { builder.isPrimaryKeyUpdated(config.get(IS_PRIMARY_KEY_UPDATED)); builder.supportUpsertByInsertOnly(config.get(SUPPORT_UPSERT_BY_INSERT_ONLY)); builder.simpleSql(config.get(JdbcOptions.QUERY)); + builder.useCopyStatement(config.get(JdbcOptions.USE_COPY_STATEMENT)); return builder.build(); } } diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/exception/JdbcConnectorErrorCode.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/exception/JdbcConnectorErrorCode.java index 22438de84c3..90c1ff30023 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/exception/JdbcConnectorErrorCode.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/exception/JdbcConnectorErrorCode.java @@ -29,7 +29,8 @@ public enum JdbcConnectorErrorCode implements SeaTunnelErrorCode { "JDBC-05", "transaction operation failed, such as (commit, rollback) etc.."), NO_SUITABLE_DIALECT_FACTORY("JDBC-06", "No suitable dialect factory found"), DONT_SUPPORT_SINK("JDBC-07", "The jdbc type don't support sink"), - KERBEROS_AUTHENTICATION_FAILED("JDBC-08", "Kerberos authentication failed"); + KERBEROS_AUTHENTICATION_FAILED("JDBC-08", "Kerberos authentication failed"), + NO_SUPPORT_OPERATION_FAILED("JDBC-09", "The jdbc driver not support operation."); private final String code; private final String description; diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/JdbcOutputFormatBuilder.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/JdbcOutputFormatBuilder.java index 4a296f9b4d8..dee1b58e0ea 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/JdbcOutputFormatBuilder.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/JdbcOutputFormatBuilder.java @@ -27,6 +27,7 @@ import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialect; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.executor.BufferReducedBatchStatementExecutor; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.executor.BufferedBatchStatementExecutor; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.executor.CopyManagerBatchStatementExecutor; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.executor.FieldNamedPreparedStatement; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.executor.InsertOrUpdateBatchStatementExecutor; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.executor.JdbcBatchStatementExecutor; @@ -63,7 +64,13 @@ public JdbcOutputFormat build() { jdbcSinkConfig.getDatabase() + "." + jdbcSinkConfig.getTable())); final List primaryKeys = jdbcSinkConfig.getPrimaryKeys(); - if (StringUtils.isNotBlank(jdbcSinkConfig.getSimpleSql())) { + if (jdbcSinkConfig.isUseCopyStatement()) { + statementExecutorFactory = + () -> + createCopyInBufferStatementExecutor( + createCopyInBatchStatementExecutor( + dialect, table, tableSchema)); + } else if (StringUtils.isNotBlank(jdbcSinkConfig.getSimpleSql())) { statementExecutorFactory = () -> createSimpleBufferedExecutor( @@ -185,6 +192,22 @@ private static JdbcBatchStatementExecutor createUpsertExecutor( dialect, database, table, tableSchema, pkNames, isPrimaryKeyUpdated); } + private static JdbcBatchStatementExecutor createCopyInBufferStatementExecutor( + CopyManagerBatchStatementExecutor copyManagerBatchStatementExecutor) { + return new BufferedBatchStatementExecutor( + copyManagerBatchStatementExecutor, Function.identity()); + } + + private static CopyManagerBatchStatementExecutor createCopyInBatchStatementExecutor( + JdbcDialect dialect, String table, TableSchema tableSchema) { + String columns = + Arrays.stream(tableSchema.getFieldNames()) + .map(dialect::quoteIdentifier) + .collect(Collectors.joining(",", "(", ")")); + String copyInSql = String.format("COPY %s %s FROM STDIN WITH CSV", table, columns); + return new CopyManagerBatchStatementExecutor(copyInSql, tableSchema); + } + private static JdbcBatchStatementExecutor createInsertOnlyExecutor( JdbcDialect dialect, String database, String table, TableSchema tableSchema) { diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/executor/CopyManagerBatchStatementExecutor.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/executor/CopyManagerBatchStatementExecutor.java new file mode 100644 index 00000000000..b485d39de1e --- /dev/null +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/executor/CopyManagerBatchStatementExecutor.java @@ -0,0 +1,195 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.seatunnel.connectors.seatunnel.jdbc.internal.executor; + +import org.apache.seatunnel.api.table.catalog.TableSchema; +import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.api.table.type.SeaTunnelRow; +import org.apache.seatunnel.api.table.type.SeaTunnelRowType; +import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; +import org.apache.seatunnel.connectors.seatunnel.jdbc.exception.JdbcConnectorErrorCode; +import org.apache.seatunnel.connectors.seatunnel.jdbc.exception.JdbcConnectorException; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.connection.JdbcConnectionProvider; + +import org.apache.commons.csv.CSVFormat; +import org.apache.commons.csv.CSVPrinter; + +import java.io.IOException; +import java.io.StringReader; +import java.lang.reflect.InvocationTargetException; +import java.math.BigDecimal; +import java.sql.Connection; +import java.sql.SQLException; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.util.ArrayList; +import java.util.List; + +public class CopyManagerBatchStatementExecutor implements JdbcBatchStatementExecutor { + + private final String copySql; + private final TableSchema tableSchema; + CopyManagerProxy copyManagerProxy; + CSVFormat csvFormat = CSVFormat.POSTGRESQL_CSV; + CSVPrinter csvPrinter; + + public CopyManagerBatchStatementExecutor(String copySql, TableSchema tableSchema) { + this.copySql = copySql; + this.tableSchema = tableSchema; + } + + public static void copyManagerProxyChecked(JdbcConnectionProvider connectionProvider) { + try (Connection connection = connectionProvider.getConnection()) { + new CopyManagerProxy(connection); + } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { + throw new JdbcConnectorException( + JdbcConnectorErrorCode.NO_SUPPORT_OPERATION_FAILED, + "unable to open CopyManager Operation in this JDBC writer. Please configure option use_copy_statement = false.", + e); + } catch (SQLException e) { + throw new JdbcConnectorException( + JdbcConnectorErrorCode.CREATE_DRIVER_FAILED, "unable to open JDBC writer", e); + } + } + + @Override + public void prepareStatements(Connection connection) throws SQLException { + try { + this.copyManagerProxy = new CopyManagerProxy(connection); + this.csvPrinter = new CSVPrinter(new StringBuilder(), csvFormat); + } catch (NoSuchMethodException + | IllegalAccessException + | InvocationTargetException + | IOException e) { + throw new JdbcConnectorException( + JdbcConnectorErrorCode.NO_SUPPORT_OPERATION_FAILED, + "unable to open CopyManager Operation in this JDBC writer. Please configure option use_copy_statement = false.", + e); + } catch (SQLException e) { + throw new JdbcConnectorException( + JdbcConnectorErrorCode.CREATE_DRIVER_FAILED, "unable to open JDBC writer", e); + } + } + + @Override + public void addToBatch(SeaTunnelRow record) throws SQLException { + try { + this.csvPrinter.printRecord(toExtract(record)); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private List toExtract(SeaTunnelRow record) { + SeaTunnelRowType rowType = tableSchema.toPhysicalRowDataType(); + List csvRecord = new ArrayList<>(); + for (int fieldIndex = 0; fieldIndex < rowType.getTotalFields(); fieldIndex++) { + SeaTunnelDataType seaTunnelDataType = rowType.getFieldType(fieldIndex); + Object fieldValue = record.getField(fieldIndex); + if (fieldValue == null) { + csvRecord.add(null); + continue; + } + switch (seaTunnelDataType.getSqlType()) { + case STRING: + csvRecord.add((String) record.getField(fieldIndex)); + break; + case BOOLEAN: + csvRecord.add((Boolean) record.getField(fieldIndex)); + break; + case TINYINT: + csvRecord.add((Byte) record.getField(fieldIndex)); + break; + case SMALLINT: + csvRecord.add((Short) record.getField(fieldIndex)); + break; + case INT: + csvRecord.add((Integer) record.getField(fieldIndex)); + break; + case BIGINT: + csvRecord.add((Long) record.getField(fieldIndex)); + break; + case FLOAT: + csvRecord.add((Float) record.getField(fieldIndex)); + break; + case DOUBLE: + csvRecord.add((Double) record.getField(fieldIndex)); + break; + case DECIMAL: + csvRecord.add((BigDecimal) record.getField(fieldIndex)); + break; + case DATE: + LocalDate localDate = (LocalDate) record.getField(fieldIndex); + csvRecord.add((java.sql.Date) java.sql.Date.valueOf(localDate)); + break; + case TIME: + LocalTime localTime = (LocalTime) record.getField(fieldIndex); + csvRecord.add((java.sql.Time) java.sql.Time.valueOf(localTime)); + break; + case TIMESTAMP: + LocalDateTime localDateTime = (LocalDateTime) record.getField(fieldIndex); + csvRecord.add((java.sql.Timestamp) java.sql.Timestamp.valueOf(localDateTime)); + break; + case BYTES: + csvRecord.add( + org.apache.commons.codec.binary.Base64.encodeBase64String( + (byte[]) record.getField(fieldIndex))); + break; + case NULL: + csvRecord.add(null); + break; + case MAP: + case ARRAY: + case ROW: + default: + throw new JdbcConnectorException( + CommonErrorCodeDeprecated.UNSUPPORTED_DATA_TYPE, + "Unexpected value: " + seaTunnelDataType); + } + } + return csvRecord; + } + + @Override + public void executeBatch() throws SQLException { + try { + this.csvPrinter.flush(); + this.copyManagerProxy.doCopy( + copySql, new StringReader(this.csvPrinter.getOut().toString())); + } catch (InvocationTargetException | IllegalAccessException | IOException e) { + throw new JdbcConnectorException( + CommonErrorCodeDeprecated.SQL_OPERATION_FAILED, "Sql command: " + copySql); + } finally { + try { + this.csvPrinter.close(); + this.csvPrinter = new CSVPrinter(new StringBuilder(), csvFormat); + } catch (Exception ignore) { + } + } + } + + @Override + public void closeStatements() throws SQLException { + this.copyManagerProxy = null; + try { + this.csvPrinter.close(); + this.csvPrinter = null; + } catch (Exception ignore) { + } + } +} diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/executor/CopyManagerProxy.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/executor/CopyManagerProxy.java new file mode 100644 index 00000000000..54d99e345d3 --- /dev/null +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/executor/CopyManagerProxy.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.seatunnel.connectors.seatunnel.jdbc.internal.executor; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Reader; +import java.lang.reflect.Field; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.sql.Connection; +import java.sql.SQLException; + +class CopyManagerProxy { + private static final Logger LOG = LoggerFactory.getLogger(CopyManagerProxy.class); + Object connection; + Object copyManager; + Class connectionClazz; + Class copyManagerClazz; + Method getCopyAPIMethod; + Method copyInMethod; + + CopyManagerProxy(Connection connection) + throws NoSuchMethodException, InvocationTargetException, IllegalAccessException, + SQLException { + LOG.info("Proxy connection class: {}", connection.getClass().getName()); + this.connection = connection.unwrap(Connection.class); + LOG.info("Proxy unwrap connection class: {}", this.connection.getClass().getName()); + if (Proxy.isProxyClass(this.connection.getClass())) { + InvocationHandler handler = Proxy.getInvocationHandler(this.connection); + this.connection = getConnectionFromInvocationHandler(handler); + if (null == this.connection) { + throw new InvocationTargetException( + new NullPointerException("Proxy Connection is null.")); + } + LOG.info("Proxy connection class: {}", this.connection.getClass().getName()); + this.connectionClazz = this.connection.getClass(); + } else { + this.connectionClazz = this.connection.getClass(); + } + this.getCopyAPIMethod = this.connectionClazz.getMethod("getCopyAPI"); + this.copyManager = this.getCopyAPIMethod.invoke(this.connection); + this.copyManagerClazz = this.copyManager.getClass(); + this.copyInMethod = this.copyManagerClazz.getMethod("copyIn", String.class, Reader.class); + } + + long doCopy(String sql, Reader reader) + throws InvocationTargetException, IllegalAccessException { + return (long) this.copyInMethod.invoke(this.copyManager, sql, reader); + } + + private static Object getConnectionFromInvocationHandler(InvocationHandler handler) + throws IllegalAccessException { + Class handlerClass = handler.getClass(); + LOG.info("InvocationHandler class: {}", handlerClass.getName()); + for (Field declaredField : handlerClass.getDeclaredFields()) { + boolean tempAccessible = declaredField.isAccessible(); + if (!tempAccessible) { + declaredField.setAccessible(true); + } + Object handlerObject = declaredField.get(handler); + if (handlerObject instanceof Connection) { + if (!tempAccessible) { + declaredField.setAccessible(tempAccessible); + } + return handlerObject; + } else { + if (!tempAccessible) { + declaredField.setAccessible(tempAccessible); + } + } + } + return null; + } +} diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-3/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/JdbcPostgresIT.java b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-3/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/JdbcPostgresIT.java index a0181fbf816..7961bcb4740 100644 --- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-3/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/JdbcPostgresIT.java +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-3/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/JdbcPostgresIT.java @@ -69,6 +69,7 @@ public class JdbcPostgresIT extends TestSuiteBase implements TestResource { private static final List PG_CONFIG_FILE_LIST = Lists.newArrayList( "/jdbc_postgres_source_and_sink.conf", + "/jdbc_postgres_source_and_sink_copy_stmt.conf", "/jdbc_postgres_source_and_sink_parallel.conf", "/jdbc_postgres_source_and_sink_parallel_upper_lower.conf", "/jdbc_postgres_source_and_sink_xa.conf"); @@ -259,10 +260,19 @@ public void startUp() throws Exception { public void testAutoGenerateSQL(TestContainer container) throws IOException, InterruptedException { for (String CONFIG_FILE : PG_CONFIG_FILE_LIST) { - Container.ExecResult execResult = container.executeJob(CONFIG_FILE); - Assertions.assertEquals(0, execResult.getExitCode()); - Assertions.assertIterableEquals(querySql(SOURCE_SQL), querySql(SINK_SQL)); - executeSQL("truncate table pg_e2e_sink_table"); + try { + Container.ExecResult execResult = container.executeJob(CONFIG_FILE); + Assertions.assertEquals( + 0, + execResult.getExitCode(), + CONFIG_FILE + + " job run failed in " + + container.getClass().getSimpleName() + + "."); + Assertions.assertIterableEquals(querySql(SOURCE_SQL), querySql(SINK_SQL)); + } finally { + executeSQL("truncate table pg_e2e_sink_table"); + } log.info(CONFIG_FILE + " e2e test completed"); } } diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-3/src/test/resources/jdbc_postgres_source_and_sink_copy_stmt.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-3/src/test/resources/jdbc_postgres_source_and_sink_copy_stmt.conf new file mode 100644 index 00000000000..fabfdce9ca3 --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-3/src/test/resources/jdbc_postgres_source_and_sink_copy_stmt.conf @@ -0,0 +1,50 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +env { + parallelism = 1 + job.mode = "BATCH" +} + +source{ + jdbc{ + url = "jdbc:postgresql://postgresql:5432/test?loggerLevel=OFF" + driver = "org.postgresql.Driver" + user = "test" + password = "test" + query ="""select gid, uuid_col, text_col, varchar_col, char_col, boolean_col, smallint_col, integer_col, bigint_col, decimal_col, numeric_col, real_col, double_precision_col, + smallserial_col, serial_col, bigserial_col, date_col, timestamp_col, bpchar_col, age, name, point, linestring, polygon_colums, multipoint, + multilinestring, multipolygon, geometrycollection, geog, json_col, jsonb_col,xml_col from pg_e2e_source_table""" + partition_column = "varchar_col" + partition_num = 2 + } +} + + +sink { + Jdbc { + driver = org.postgresql.Driver + url = "jdbc:postgresql://postgresql:5432/test?loggerLevel=OFF&stringtype=unspecified" + user = test + password = test + generate_sink_sql = true + database = test + table = public.pg_e2e_sink_table + use_copy_statement = true + primary_keys = ["gid"] + } +} \ No newline at end of file diff --git a/tools/dependencies/known-dependencies.txt b/tools/dependencies/known-dependencies.txt index 9ceb7f18463..988543d53ab 100755 --- a/tools/dependencies/known-dependencies.txt +++ b/tools/dependencies/known-dependencies.txt @@ -3,6 +3,7 @@ commons-collections4-4.4.jar commons-compress-1.20.jar commons-io-2.11.0.jar commons-lang3-3.5.jar +commons-csv-1.10.0.jar config-1.3.3.jar disruptor-3.4.4.jar guava-27.0-jre.jar From 1a02c571a98d02d95895e598b958b31afb832439 Mon Sep 17 00:00:00 2001 From: xiaochen <598457447@qq.com> Date: Tue, 12 Mar 2024 11:08:09 +0800 Subject: [PATCH 08/59] [Improve] StarRocksSourceReader use the existing client (#6480) --- .../client/source/StarRocksBeReadClient.java | 25 ++++------- .../source/StarRocksSourceReader.java | 42 +++++++++++++++---- 2 files changed, 41 insertions(+), 26 deletions(-) diff --git a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/client/source/StarRocksBeReadClient.java b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/client/source/StarRocksBeReadClient.java index fdd240b4c5d..07a5a03eba7 100644 --- a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/client/source/StarRocksBeReadClient.java +++ b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/client/source/StarRocksBeReadClient.java @@ -40,7 +40,7 @@ import java.io.Serializable; import java.util.ArrayList; -import java.util.List; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import static org.apache.seatunnel.connectors.seatunnel.starrocks.exception.StarRocksConnectorErrorCode.CLOSE_BE_READER_FAILED; @@ -55,21 +55,12 @@ public class StarRocksBeReadClient implements Serializable { private String contextId; private int readerOffset = 0; private final SourceConfig sourceConfig; - private final SeaTunnelRowType seaTunnelRowType; + private SeaTunnelRowType seaTunnelRowType; private StarRocksRowBatchReader rowBatch; - - private final List tabletIds; - - private final String queryPlan; protected AtomicBoolean eos = new AtomicBoolean(false); - public StarRocksBeReadClient( - QueryPartition queryPartition, - SourceConfig sourceConfig, - SeaTunnelRowType seaTunnelRowType) { + public StarRocksBeReadClient(String beNodeInfo, SourceConfig sourceConfig) { this.sourceConfig = sourceConfig; - this.seaTunnelRowType = seaTunnelRowType; - String beNodeInfo = queryPartition.getBeAddress(); log.debug("Parse StarRocks BE address: '{}'.", beNodeInfo); String[] hostPort = beNodeInfo.split(":"); if (hostPort.length != 2) { @@ -79,8 +70,6 @@ public StarRocksBeReadClient( } this.ip = hostPort[0].trim(); this.port = Integer.parseInt(hostPort[1].trim()); - this.queryPlan = queryPartition.getQueryPlan(); - this.tabletIds = new ArrayList<>(queryPartition.getTabletIds()); TBinaryProtocol.Factory factory = new TBinaryProtocol.Factory(); TSocket socket = new TSocket( @@ -101,10 +90,12 @@ public StarRocksBeReadClient( client = new TStarrocksExternalService.Client(protocol); } - public void openScanner() { + public void openScanner(QueryPartition partition, SeaTunnelRowType seaTunnelRowType) { + this.seaTunnelRowType = seaTunnelRowType; + Set tabletIds = partition.getTabletIds(); TScanOpenParams params = new TScanOpenParams(); - params.setTablet_ids(tabletIds); - params.setOpaqued_query_plan(queryPlan); + params.setTablet_ids(new ArrayList<>(tabletIds)); + params.setOpaqued_query_plan(partition.getQueryPlan()); params.setCluster(DEFAULT_CLUSTER_NAME); params.setDatabase(sourceConfig.getDatabase()); params.setTable(sourceConfig.getTable()); diff --git a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/source/StarRocksSourceReader.java b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/source/StarRocksSourceReader.java index 9ccd02b5544..7f68d4e3218 100644 --- a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/source/StarRocksSourceReader.java +++ b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/source/StarRocksSourceReader.java @@ -23,14 +23,18 @@ import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.connectors.seatunnel.starrocks.client.source.StarRocksBeReadClient; +import org.apache.seatunnel.connectors.seatunnel.starrocks.client.source.model.QueryPartition; import org.apache.seatunnel.connectors.seatunnel.starrocks.config.SourceConfig; +import org.apache.seatunnel.connectors.seatunnel.starrocks.exception.StarRocksConnectorException; import lombok.extern.slf4j.Slf4j; import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Queue; @Slf4j @@ -40,6 +44,7 @@ public class StarRocksSourceReader implements SourceReader clientsPools; private volatile boolean noMoreSplitsAssignment; public StarRocksSourceReader( @@ -87,26 +92,45 @@ public void handleNoMoreSplits() { } private void read(StarRocksSourceSplit split, Collector output) { - StarRocksBeReadClient client = - new StarRocksBeReadClient(split.getPartition(), sourceConfig, seaTunnelRowType); + + QueryPartition partition = split.getPartition(); + String beAddress = partition.getBeAddress(); + StarRocksBeReadClient client = null; + if (clientsPools.containsKey(beAddress)) { + client = clientsPools.get(beAddress); + } else { + client = new StarRocksBeReadClient(beAddress, sourceConfig); + clientsPools.put(beAddress, client); + } // open scanner to be - client.openScanner(); + client.openScanner(partition, seaTunnelRowType); while (client.hasNext()) { SeaTunnelRow seaTunnelRow = client.getNext(); output.collect(seaTunnelRow); } - // close client to be - if (client != null) { - client.close(); - } } @Override - public void open() throws Exception {} + public void open() throws Exception { + clientsPools = new HashMap<>(); + } @Override public void close() throws IOException { - // nothing to do + if (!clientsPools.isEmpty()) { + clientsPools + .values() + .forEach( + client -> { + if (client != null) { + try { + client.close(); + } catch (StarRocksConnectorException e) { + log.error("Failed to close reader: ", e); + } + } + }); + } } @Override From bccf9a1845ece1a1fe676f85a635b161e6ce1757 Mon Sep 17 00:00:00 2001 From: hailin0 Date: Tue, 12 Mar 2024 11:40:49 +0800 Subject: [PATCH 09/59] [Hotfix] Fix spark example (#6486) * [Hotfix] Fix spark example * fix codestyle --- .../seatunnel-spark-connector-v2-example/pom.xml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/seatunnel-examples/seatunnel-spark-connector-v2-example/pom.xml b/seatunnel-examples/seatunnel-spark-connector-v2-example/pom.xml index 2b920850a9f..16f974585e9 100644 --- a/seatunnel-examples/seatunnel-spark-connector-v2-example/pom.xml +++ b/seatunnel-examples/seatunnel-spark-connector-v2-example/pom.xml @@ -80,6 +80,17 @@ spark-core_${scala.binary.version} ${spark.2.4.0.version} ${spark.scope} + + + com.thoughtworks.paranamer + paranamer + + + + + com.thoughtworks.paranamer + paranamer + 2.8 From 7c0ea2ec1f6bc5aa95c9dafecc3373e67d84e2c7 Mon Sep 17 00:00:00 2001 From: Guangdong Liu <804167098@qq.com> Date: Tue, 12 Mar 2024 13:21:58 +0800 Subject: [PATCH 10/59] [bugfix] [Zeta] Fix the problem of class loader not releasing when using REST API to submit jobs (#6477) --- .../e2e/classloader/ClassLoaderITBase.java | 119 +++++++++++++++++- .../job/ClientJobExecutionEnvironment.java | 2 +- .../client/LogicalDagGeneratorTest.java | 2 +- .../MultipleTableJobConfigParserTest.java | 10 +- .../core}/classloader/ClassLoaderService.java | 2 +- .../DefaultClassLoaderService.java | 2 +- .../parse/MultipleTableJobConfigParser.java | 20 ++- .../AbstractClassLoaderServiceTest.java | 6 +- .../ClassLoaderServiceCacheModeTest.java | 6 +- .../classloader/ClassLoaderServiceTest.java | 6 +- .../engine/server/SeaTunnelServer.java | 4 +- .../engine/server/TaskExecutionService.java | 2 +- .../rest/RestHttpGetCommandProcessor.java | 2 +- .../rest/RestHttpPostCommandProcessor.java | 3 +- .../rest/RestJobExecutionEnvironment.java | 8 +- .../server/ConnectorPackageServiceTest.java | 5 +- .../seatunnel/engine/server/TestUtils.java | 2 +- 17 files changed, 168 insertions(+), 33 deletions(-) rename seatunnel-engine/{seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/service => seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core}/classloader/ClassLoaderService.java (96%) rename seatunnel-engine/{seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/service => seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core}/classloader/DefaultClassLoaderService.java (98%) rename seatunnel-engine/{seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/service => seatunnel-engine-core/src/test/java/org/apache/seatunnel/engine/core}/classloader/AbstractClassLoaderServiceTest.java (97%) rename seatunnel-engine/{seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/service => seatunnel-engine-core/src/test/java/org/apache/seatunnel/engine/core}/classloader/ClassLoaderServiceCacheModeTest.java (95%) rename seatunnel-engine/{seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/service => seatunnel-engine-core/src/test/java/org/apache/seatunnel/engine/core}/classloader/ClassLoaderServiceTest.java (95%) diff --git a/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/classloader/ClassLoaderITBase.java b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/classloader/ClassLoaderITBase.java index d9b0ad23706..bdc6163c3be 100644 --- a/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/classloader/ClassLoaderITBase.java +++ b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/classloader/ClassLoaderITBase.java @@ -20,9 +20,12 @@ import org.apache.seatunnel.common.utils.FileUtils; import org.apache.seatunnel.e2e.common.util.ContainerUtil; import org.apache.seatunnel.engine.e2e.SeaTunnelContainer; +import org.apache.seatunnel.engine.server.rest.RestConstant; +import org.awaitility.Awaitility; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.testcontainers.containers.Container; import org.testcontainers.containers.GenericContainer; @@ -31,22 +34,36 @@ import org.testcontainers.utility.DockerLoggerFactory; import org.testcontainers.utility.MountableFile; +import io.restassured.response.Response; + import java.io.File; import java.io.IOException; import java.net.URL; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; +import static io.restassured.RestAssured.given; import static org.apache.seatunnel.e2e.common.util.ContainerUtil.PROJECT_ROOT_PATH; +import static org.hamcrest.Matchers.equalTo; public abstract class ClassLoaderITBase extends SeaTunnelContainer { private static final String CONF_FILE = "/classloader/fake_to_inmemory.conf"; + private static final String http = "http://"; + + private static final String colon = ":"; + abstract boolean cacheMode(); + private static final Path config = Paths.get(SEATUNNEL_HOME, "config"); + + private static final Path binPath = Paths.get(SEATUNNEL_HOME, "bin", SERVER_SHELL); + abstract String seatunnelConfigFileName(); @Test @@ -65,6 +82,96 @@ public void testFakeSourceToInMemorySink() throws IOException, InterruptedExcept } } + @Test + public void testFakeSourceToInMemorySinkForRestApi() throws IOException, InterruptedException { + LOG.info("test classloader with cache mode: {}", cacheMode()); + ContainerUtil.copyConnectorJarToContainer( + server, + CONF_FILE, + getConnectorModulePath(), + getConnectorNamePrefix(), + getConnectorType(), + SEATUNNEL_HOME); + Awaitility.await() + .atMost(2, TimeUnit.MINUTES) + .untilAsserted( + () -> { + Response response = + given().get( + http + + server.getHost() + + colon + + server.getFirstMappedPort() + + "/hazelcast/rest/cluster"); + response.then().statusCode(200); + Thread.sleep(10000); + Assertions.assertEquals( + 1, response.jsonPath().getList("members").size()); + }); + for (int i = 0; i < 10; i++) { + // load in memory sink which already leak thread with classloader + given().body( + "{\n" + + "\t\"env\": {\n" + + "\t\t\"parallelism\": 10,\n" + + "\t\t\"job.mode\": \"BATCH\"\n" + + "\t},\n" + + "\t\"source\": [\n" + + "\t\t{\n" + + "\t\t\t\"plugin_name\": \"FakeSource\",\n" + + "\t\t\t\"result_table_name\": \"fake\",\n" + + "\t\t\t\"parallelism\": 10,\n" + + "\t\t\t\"schema\": {\n" + + "\t\t\t\t\"fields\": {\n" + + "\t\t\t\t\t\"name\": \"string\",\n" + + "\t\t\t\t\t\"age\": \"int\",\n" + + "\t\t\t\t\t\"score\": \"double\"\n" + + "\t\t\t\t}\n" + + "\t\t\t}\n" + + "\t\t}\n" + + "\t],\n" + + "\t\"transform\": [],\n" + + "\t\"sink\": [\n" + + "\t\t{\n" + + "\t\t\t\"plugin_name\": \"InMemory\",\n" + + "\t\t\t\"source_table_name\": \"fake\"\n" + + "\t\t}\n" + + "\t]\n" + + "}") + .header("Content-Type", "application/json; charset=utf-8") + .post( + http + + server.getHost() + + colon + + server.getFirstMappedPort() + + RestConstant.SUBMIT_JOB_URL) + .then() + .statusCode(200); + + Awaitility.await() + .atMost(2, TimeUnit.MINUTES) + .untilAsserted( + () -> + given().get( + http + + server.getHost() + + colon + + server.getFirstMappedPort() + + RestConstant.FINISHED_JOBS_INFO + + "/FINISHED") + .then() + .statusCode(200) + .body("[0].jobStatus", equalTo("FINISHED"))); + Thread.sleep(5000); + Assertions.assertTrue(containsDaemonThread()); + if (cacheMode()) { + Assertions.assertEquals(3, getClassLoaderCount()); + } else { + Assertions.assertEquals(2 + i, getClassLoaderCount()); + } + } + } + private int getClassLoaderCount() throws IOException, InterruptedException { Map objects = ContainerUtil.getJVMLiveObject(server); String className = @@ -79,7 +186,7 @@ private boolean containsDaemonThread() throws IOException, InterruptedException } @Override - @BeforeAll + @BeforeEach public void startUp() throws Exception { server = new GenericContainer<>(getDockerImage()) @@ -96,7 +203,7 @@ public void startUp() throws Exception { "seatunnel-engine:" + JDK_DOCKER_IMAGE))) .waitingFor(Wait.forListeningPort()); copySeaTunnelStarterToContainer(server); - server.setPortBindings(Collections.singletonList("5801:5801")); + server.setExposedPorts(Collections.singletonList(5801)); server.withCopyFileToContainer( MountableFile.forHostPath( @@ -148,4 +255,10 @@ public void startUp() throws Exception { + "/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/resources/classloader/plugin-mapping.properties"), Paths.get(SEATUNNEL_HOME, "connectors", "plugin-mapping.properties").toString()); } + + @AfterEach + @Override + public void tearDown() throws Exception { + super.tearDown(); + } } diff --git a/seatunnel-engine/seatunnel-engine-client/src/main/java/org/apache/seatunnel/engine/client/job/ClientJobExecutionEnvironment.java b/seatunnel-engine/seatunnel-engine-client/src/main/java/org/apache/seatunnel/engine/client/job/ClientJobExecutionEnvironment.java index 8e0f0c689bd..d92f9722dc3 100644 --- a/seatunnel-engine/seatunnel-engine-client/src/main/java/org/apache/seatunnel/engine/client/job/ClientJobExecutionEnvironment.java +++ b/seatunnel-engine/seatunnel-engine-client/src/main/java/org/apache/seatunnel/engine/client/job/ClientJobExecutionEnvironment.java @@ -85,7 +85,7 @@ protected MultipleTableJobConfigParser getJobConfigParser() { @Override protected LogicalDag getLogicalDag() { - ImmutablePair, Set> immutablePair = getJobConfigParser().parse(); + ImmutablePair, Set> immutablePair = getJobConfigParser().parse(null); actions.addAll(immutablePair.getLeft()); // Enable upload connector jar package to engine server, automatically upload connector Jar // packages and dependent third-party Jar packages to the server before job execution. diff --git a/seatunnel-engine/seatunnel-engine-client/src/test/java/org/apache/seatunnel/engine/client/LogicalDagGeneratorTest.java b/seatunnel-engine/seatunnel-engine-client/src/test/java/org/apache/seatunnel/engine/client/LogicalDagGeneratorTest.java index fc9f2cb72f7..3cd9ed7604c 100644 --- a/seatunnel-engine/seatunnel-engine-client/src/test/java/org/apache/seatunnel/engine/client/LogicalDagGeneratorTest.java +++ b/seatunnel-engine/seatunnel-engine-client/src/test/java/org/apache/seatunnel/engine/client/LogicalDagGeneratorTest.java @@ -49,7 +49,7 @@ public void testLogicalGenerator() { IdGenerator idGenerator = new IdGenerator(); ImmutablePair, Set> immutablePair = - new MultipleTableJobConfigParser(filePath, idGenerator, jobConfig).parse(); + new MultipleTableJobConfigParser(filePath, idGenerator, jobConfig).parse(null); LogicalDagGenerator logicalDagGenerator = new LogicalDagGenerator(immutablePair.getLeft(), jobConfig, idGenerator); diff --git a/seatunnel-engine/seatunnel-engine-client/src/test/java/org/apache/seatunnel/engine/client/MultipleTableJobConfigParserTest.java b/seatunnel-engine/seatunnel-engine-client/src/test/java/org/apache/seatunnel/engine/client/MultipleTableJobConfigParserTest.java index 319e7515496..abc81903dee 100644 --- a/seatunnel-engine/seatunnel-engine-client/src/test/java/org/apache/seatunnel/engine/client/MultipleTableJobConfigParserTest.java +++ b/seatunnel-engine/seatunnel-engine-client/src/test/java/org/apache/seatunnel/engine/client/MultipleTableJobConfigParserTest.java @@ -51,7 +51,7 @@ public void testSimpleJobParse() { jobConfig.setJobContext(new JobContext()); MultipleTableJobConfigParser jobConfigParser = new MultipleTableJobConfigParser(filePath, new IdGenerator(), jobConfig); - ImmutablePair, Set> parse = jobConfigParser.parse(); + ImmutablePair, Set> parse = jobConfigParser.parse(null); List actions = parse.getLeft(); Assertions.assertEquals(1, actions.size()); Assertions.assertEquals("Sink[0]-LocalFile-MultiTableSink", actions.get(0).getName()); @@ -71,7 +71,7 @@ public void testComplexJobParse() { jobConfig.setJobContext(new JobContext()); MultipleTableJobConfigParser jobConfigParser = new MultipleTableJobConfigParser(filePath, new IdGenerator(), jobConfig); - ImmutablePair, Set> parse = jobConfigParser.parse(); + ImmutablePair, Set> parse = jobConfigParser.parse(null); List actions = parse.getLeft(); Assertions.assertEquals(1, actions.size()); @@ -102,7 +102,7 @@ public void testMultipleSinkName() { jobConfig.setJobContext(new JobContext()); MultipleTableJobConfigParser jobConfigParser = new MultipleTableJobConfigParser(filePath, new IdGenerator(), jobConfig); - ImmutablePair, Set> parse = jobConfigParser.parse(); + ImmutablePair, Set> parse = jobConfigParser.parse(null); List actions = parse.getLeft(); Assertions.assertEquals(2, actions.size()); @@ -122,7 +122,7 @@ public void testMultipleTableSourceWithMultiTableSinkParse() throws IOException Config config = ConfigBuilder.of(Paths.get(filePath)); MultipleTableJobConfigParser jobConfigParser = new MultipleTableJobConfigParser(config, new IdGenerator(), jobConfig); - ImmutablePair, Set> parse = jobConfigParser.parse(); + ImmutablePair, Set> parse = jobConfigParser.parse(null); List actions = parse.getLeft(); Assertions.assertEquals(1, actions.size()); Assertions.assertEquals("Sink[0]-console-MultiTableSink", actions.get(0).getName()); @@ -142,7 +142,7 @@ public void testDuplicatedTransformInOnePipeline() { Config config = ConfigBuilder.of(Paths.get(filePath)); MultipleTableJobConfigParser jobConfigParser = new MultipleTableJobConfigParser(config, new IdGenerator(), jobConfig); - ImmutablePair, Set> parse = jobConfigParser.parse(); + ImmutablePair, Set> parse = jobConfigParser.parse(null); List actions = parse.getLeft(); Assertions.assertEquals("Transform[0]-sql", actions.get(0).getUpstream().get(0).getName()); Assertions.assertEquals("Transform[1]-sql", actions.get(1).getUpstream().get(0).getName()); diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/service/classloader/ClassLoaderService.java b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/classloader/ClassLoaderService.java similarity index 96% rename from seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/service/classloader/ClassLoaderService.java rename to seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/classloader/ClassLoaderService.java index 2a596c39769..b832882733c 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/service/classloader/ClassLoaderService.java +++ b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/classloader/ClassLoaderService.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.seatunnel.engine.server.service.classloader; +package org.apache.seatunnel.engine.core.classloader; import java.net.URL; import java.util.Collection; diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/service/classloader/DefaultClassLoaderService.java b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/classloader/DefaultClassLoaderService.java similarity index 98% rename from seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/service/classloader/DefaultClassLoaderService.java rename to seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/classloader/DefaultClassLoaderService.java index 6c647a82d26..36c7ae2f029 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/service/classloader/DefaultClassLoaderService.java +++ b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/classloader/DefaultClassLoaderService.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.seatunnel.engine.server.service.classloader; +package org.apache.seatunnel.engine.core.classloader; import org.apache.seatunnel.engine.common.loader.SeaTunnelChildFirstClassLoader; diff --git a/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java index 0a32b0cf00c..395f8b4a1ac 100644 --- a/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java +++ b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java @@ -46,8 +46,10 @@ import org.apache.seatunnel.core.starter.utils.ConfigBuilder; import org.apache.seatunnel.engine.common.config.JobConfig; import org.apache.seatunnel.engine.common.exception.JobDefineCheckException; +import org.apache.seatunnel.engine.common.loader.ClassLoaderUtil; import org.apache.seatunnel.engine.common.loader.SeaTunnelChildFirstClassLoader; import org.apache.seatunnel.engine.common.utils.IdGenerator; +import org.apache.seatunnel.engine.core.classloader.ClassLoaderService; import org.apache.seatunnel.engine.core.dag.actions.Action; import org.apache.seatunnel.engine.core.dag.actions.SinkAction; import org.apache.seatunnel.engine.core.dag.actions.SinkConfig; @@ -149,7 +151,7 @@ public MultipleTableJobConfigParser( new JobConfigParser(idGenerator, commonPluginJars, isStartWithSavePoint); } - public ImmutablePair, Set> parse() { + public ImmutablePair, Set> parse(ClassLoaderService classLoaderService) { List sourceConfigs = TypesafeConfigUtils.getConfigList( seaTunnelJobConfig, "source", Collections.emptyList()); @@ -165,8 +167,15 @@ public ImmutablePair, Set> parse() { connectorJars.addAll(commonPluginJars); } ClassLoader parentClassLoader = Thread.currentThread().getContextClassLoader(); - ClassLoader classLoader = - new SeaTunnelChildFirstClassLoader(connectorJars, parentClassLoader); + + ClassLoader classLoader; + if (classLoaderService == null) { + classLoader = new SeaTunnelChildFirstClassLoader(connectorJars, parentClassLoader); + } else { + classLoader = + classLoaderService.getClassLoader( + Long.parseLong(jobConfig.getJobContext().getJobId()), connectorJars); + } try { Thread.currentThread().setContextClassLoader(classLoader); ConfigParserUtil.checkGraph(sourceConfigs, transformConfigs, sinkConfigs); @@ -196,6 +205,11 @@ public ImmutablePair, Set> parse() { return new ImmutablePair<>(sinkActions, factoryUrls); } finally { Thread.currentThread().setContextClassLoader(parentClassLoader); + if (classLoaderService != null) { + classLoaderService.releaseClassLoader( + Long.parseLong(jobConfig.getJobContext().getJobId()), connectorJars); + } + ClassLoaderUtil.recycleClassLoaderFromThread(classLoader); } } diff --git a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/service/classloader/AbstractClassLoaderServiceTest.java b/seatunnel-engine/seatunnel-engine-core/src/test/java/org/apache/seatunnel/engine/core/classloader/AbstractClassLoaderServiceTest.java similarity index 97% rename from seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/service/classloader/AbstractClassLoaderServiceTest.java rename to seatunnel-engine/seatunnel-engine-core/src/test/java/org/apache/seatunnel/engine/core/classloader/AbstractClassLoaderServiceTest.java index 87360951c23..779ab63c5a5 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/service/classloader/AbstractClassLoaderServiceTest.java +++ b/seatunnel-engine/seatunnel-engine-core/src/test/java/org/apache/seatunnel/engine/core/classloader/AbstractClassLoaderServiceTest.java @@ -15,17 +15,17 @@ * limitations under the License. */ -package org.apache.seatunnel.engine.server.service.classloader; +package org.apache.seatunnel.engine.core.classloader; import org.apache.seatunnel.engine.common.loader.SeaTunnelChildFirstClassLoader; -import org.apache.curator.shaded.com.google.common.collect.Lists; - import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import com.google.common.collect.Lists; + import java.net.MalformedURLException; import java.net.URL; import java.util.Collections; diff --git a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/service/classloader/ClassLoaderServiceCacheModeTest.java b/seatunnel-engine/seatunnel-engine-core/src/test/java/org/apache/seatunnel/engine/core/classloader/ClassLoaderServiceCacheModeTest.java similarity index 95% rename from seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/service/classloader/ClassLoaderServiceCacheModeTest.java rename to seatunnel-engine/seatunnel-engine-core/src/test/java/org/apache/seatunnel/engine/core/classloader/ClassLoaderServiceCacheModeTest.java index 60f249b950c..7c6c569c84e 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/service/classloader/ClassLoaderServiceCacheModeTest.java +++ b/seatunnel-engine/seatunnel-engine-core/src/test/java/org/apache/seatunnel/engine/core/classloader/ClassLoaderServiceCacheModeTest.java @@ -15,13 +15,13 @@ * limitations under the License. */ -package org.apache.seatunnel.engine.server.service.classloader; - -import org.apache.curator.shaded.com.google.common.collect.Lists; +package org.apache.seatunnel.engine.core.classloader; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import com.google.common.collect.Lists; + import java.net.MalformedURLException; import java.net.URL; diff --git a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/service/classloader/ClassLoaderServiceTest.java b/seatunnel-engine/seatunnel-engine-core/src/test/java/org/apache/seatunnel/engine/core/classloader/ClassLoaderServiceTest.java similarity index 95% rename from seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/service/classloader/ClassLoaderServiceTest.java rename to seatunnel-engine/seatunnel-engine-core/src/test/java/org/apache/seatunnel/engine/core/classloader/ClassLoaderServiceTest.java index af72f6ee9bd..0e2fe90af2b 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/service/classloader/ClassLoaderServiceTest.java +++ b/seatunnel-engine/seatunnel-engine-core/src/test/java/org/apache/seatunnel/engine/core/classloader/ClassLoaderServiceTest.java @@ -15,13 +15,13 @@ * limitations under the License. */ -package org.apache.seatunnel.engine.server.service.classloader; - -import org.apache.curator.shaded.com.google.common.collect.Lists; +package org.apache.seatunnel.engine.core.classloader; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import com.google.common.collect.Lists; + import java.net.MalformedURLException; import java.net.URL; diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/SeaTunnelServer.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/SeaTunnelServer.java index 4bb041e211c..e9dcdca779f 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/SeaTunnelServer.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/SeaTunnelServer.java @@ -21,10 +21,10 @@ import org.apache.seatunnel.engine.common.Constant; import org.apache.seatunnel.engine.common.config.SeaTunnelConfig; import org.apache.seatunnel.engine.common.exception.SeaTunnelEngineException; +import org.apache.seatunnel.engine.core.classloader.ClassLoaderService; +import org.apache.seatunnel.engine.core.classloader.DefaultClassLoaderService; import org.apache.seatunnel.engine.server.execution.ExecutionState; import org.apache.seatunnel.engine.server.execution.TaskGroupLocation; -import org.apache.seatunnel.engine.server.service.classloader.ClassLoaderService; -import org.apache.seatunnel.engine.server.service.classloader.DefaultClassLoaderService; import org.apache.seatunnel.engine.server.service.jar.ConnectorPackageService; import org.apache.seatunnel.engine.server.service.slot.DefaultSlotService; import org.apache.seatunnel.engine.server.service.slot.SlotService; diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/TaskExecutionService.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/TaskExecutionService.java index 68fa111db99..e4ff187bb22 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/TaskExecutionService.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/TaskExecutionService.java @@ -27,6 +27,7 @@ import org.apache.seatunnel.engine.common.exception.JobNotFoundException; import org.apache.seatunnel.engine.common.loader.ClassLoaderUtil; import org.apache.seatunnel.engine.common.utils.PassiveCompletableFuture; +import org.apache.seatunnel.engine.core.classloader.ClassLoaderService; import org.apache.seatunnel.engine.core.job.ConnectorJarIdentifier; import org.apache.seatunnel.engine.server.exception.TaskGroupContextNotFoundException; import org.apache.seatunnel.engine.server.execution.ExecutionState; @@ -42,7 +43,6 @@ import org.apache.seatunnel.engine.server.execution.TaskLocation; import org.apache.seatunnel.engine.server.execution.TaskTracker; import org.apache.seatunnel.engine.server.metrics.SeaTunnelMetricsContext; -import org.apache.seatunnel.engine.server.service.classloader.ClassLoaderService; import org.apache.seatunnel.engine.server.service.jar.ServerConnectorPackageClient; import org.apache.seatunnel.engine.server.task.SeaTunnelTask; import org.apache.seatunnel.engine.server.task.TaskGroupImmutableInformation; diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestHttpGetCommandProcessor.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestHttpGetCommandProcessor.java index 08d75891201..81a1047c749 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestHttpGetCommandProcessor.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestHttpGetCommandProcessor.java @@ -24,6 +24,7 @@ import org.apache.seatunnel.api.common.metrics.JobMetrics; import org.apache.seatunnel.common.utils.JsonUtils; import org.apache.seatunnel.engine.common.Constant; +import org.apache.seatunnel.engine.core.classloader.ClassLoaderService; import org.apache.seatunnel.engine.core.dag.logical.LogicalDag; import org.apache.seatunnel.engine.core.job.JobDAGInfo; import org.apache.seatunnel.engine.core.job.JobImmutableInformation; @@ -35,7 +36,6 @@ import org.apache.seatunnel.engine.server.operation.GetClusterHealthMetricsOperation; import org.apache.seatunnel.engine.server.operation.GetJobMetricsOperation; import org.apache.seatunnel.engine.server.operation.GetJobStatusOperation; -import org.apache.seatunnel.engine.server.service.classloader.ClassLoaderService; import org.apache.seatunnel.engine.server.utils.NodeEngineUtil; import com.hazelcast.cluster.Address; diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestHttpPostCommandProcessor.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestHttpPostCommandProcessor.java index 6f09e0aadf1..777cb609a02 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestHttpPostCommandProcessor.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestHttpPostCommandProcessor.java @@ -124,8 +124,10 @@ private void handleSubmitJob(HttpPostCommand httpPostCommand, String uri) boolean startWithSavePoint = Boolean.parseBoolean(requestParams.get(RestConstant.IS_START_WITH_SAVE_POINT)); + SeaTunnelServer seaTunnelServer = getSeaTunnelServer(); RestJobExecutionEnvironment restJobExecutionEnvironment = new RestJobExecutionEnvironment( + seaTunnelServer, jobConfig, config, textCommandService.getNode(), @@ -135,7 +137,6 @@ private void handleSubmitJob(HttpPostCommand httpPostCommand, String uri) : null); JobImmutableInformation jobImmutableInformation = restJobExecutionEnvironment.build(); Long jobId = jobImmutableInformation.getJobId(); - SeaTunnelServer seaTunnelServer = getSeaTunnelServer(); if (seaTunnelServer == null) { NodeEngineUtil.sendOperationToMasterNode( diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestJobExecutionEnvironment.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestJobExecutionEnvironment.java index c1aa84dd6d2..a166d0a4d5a 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestJobExecutionEnvironment.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestJobExecutionEnvironment.java @@ -27,6 +27,7 @@ import org.apache.seatunnel.engine.core.job.AbstractJobEnvironment; import org.apache.seatunnel.engine.core.job.JobImmutableInformation; import org.apache.seatunnel.engine.core.parse.MultipleTableJobConfigParser; +import org.apache.seatunnel.engine.server.SeaTunnelServer; import org.apache.commons.lang3.tuple.ImmutablePair; @@ -47,13 +48,17 @@ public class RestJobExecutionEnvironment extends AbstractJobEnvironment { private final Long jobId; + private final SeaTunnelServer seaTunnelServer; + public RestJobExecutionEnvironment( + SeaTunnelServer seaTunnelServer, JobConfig jobConfig, Config seaTunnelJobConfig, Node node, boolean isStartWithSavePoint, Long jobId) { super(jobConfig, isStartWithSavePoint); + this.seaTunnelServer = seaTunnelServer; this.seaTunnelJobConfig = seaTunnelJobConfig; this.nodeEngine = node.getNodeEngine(); this.jobConfig.setJobContext( @@ -73,7 +78,8 @@ public Long getJobId() { @Override protected LogicalDag getLogicalDag() { - ImmutablePair, Set> immutablePair = getJobConfigParser().parse(); + ImmutablePair, Set> immutablePair = + getJobConfigParser().parse(seaTunnelServer.getClassLoaderService()); actions.addAll(immutablePair.getLeft()); jarUrls.addAll(commonPluginJars); jarUrls.addAll(immutablePair.getRight()); diff --git a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/ConnectorPackageServiceTest.java b/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/ConnectorPackageServiceTest.java index a5c0569d608..a61715aeafc 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/ConnectorPackageServiceTest.java +++ b/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/ConnectorPackageServiceTest.java @@ -199,7 +199,7 @@ public void testRestoreWhenMasterNodeSwitch() throws InterruptedException, IOExc fillJobConfig(jobConfig, envOptions); List commonPluginJars = new ArrayList<>(searchPluginJars()); commonPluginJars.addAll( - new ArrayList( + new ArrayList<>( Common.getThirdPartyJars( jobConfig .getEnvOptions() @@ -220,7 +220,8 @@ public void testRestoreWhenMasterNodeSwitch() throws InterruptedException, IOExc MultipleTableJobConfigParser multipleTableJobConfigParser = new MultipleTableJobConfigParser( filePath, new IdGenerator(), jobConfig, commonPluginJars, false); - ImmutablePair, Set> immutablePair = multipleTableJobConfigParser.parse(); + ImmutablePair, Set> immutablePair = + multipleTableJobConfigParser.parse(null); Set commonJarIdentifiers = new HashSet<>(); // Upload commonPluginJar diff --git a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/TestUtils.java b/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/TestUtils.java index 6dac20a34d0..79d487d50a7 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/TestUtils.java +++ b/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/TestUtils.java @@ -119,7 +119,7 @@ public static LogicalDag createTestLogicalPlan( IdGenerator idGenerator = new IdGenerator(); ImmutablePair, Set> immutablePair = - new MultipleTableJobConfigParser(filePath, idGenerator, jobConfig).parse(); + new MultipleTableJobConfigParser(filePath, idGenerator, jobConfig).parse(null); LogicalDagGenerator logicalDagGenerator = new LogicalDagGenerator(immutablePair.getLeft(), jobConfig, idGenerator); From c15340ce7c09a334f5909391e65d77b14ef88e31 Mon Sep 17 00:00:00 2001 From: Guangdong Liu <804167098@qq.com> Date: Tue, 12 Mar 2024 21:20:49 +0800 Subject: [PATCH 11/59] [Bug] [zeta] Fix null pointer exception when submitting jobs (#6492) --- .../server/rest/RestHttpPostCommandProcessor.java | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestHttpPostCommandProcessor.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestHttpPostCommandProcessor.java index 777cb609a02..6b822520c32 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestHttpPostCommandProcessor.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestHttpPostCommandProcessor.java @@ -100,12 +100,7 @@ public void handle(HttpPostCommand httpPostCommand) { private SeaTunnelServer getSeaTunnelServer() { Map extensionServices = this.textCommandService.getNode().getNodeExtension().createExtensionServices(); - SeaTunnelServer seaTunnelServer = - (SeaTunnelServer) extensionServices.get(Constant.SEATUNNEL_SERVICE_NAME); - if (!seaTunnelServer.isMasterNode()) { - return null; - } - return seaTunnelServer; + return (SeaTunnelServer) extensionServices.get(Constant.SEATUNNEL_SERVICE_NAME); } private void handleSubmitJob(HttpPostCommand httpPostCommand, String uri) @@ -137,7 +132,7 @@ private void handleSubmitJob(HttpPostCommand httpPostCommand, String uri) : null); JobImmutableInformation jobImmutableInformation = restJobExecutionEnvironment.build(); Long jobId = jobImmutableInformation.getJobId(); - if (seaTunnelServer == null) { + if (!seaTunnelServer.isMasterNode()) { NodeEngineUtil.sendOperationToMasterNode( getNode().nodeEngine, @@ -171,7 +166,7 @@ private void handleStopJob(HttpPostCommand httpPostCommand, String uri) { } SeaTunnelServer seaTunnelServer = getSeaTunnelServer(); - if (seaTunnelServer == null) { + if (!seaTunnelServer.isMasterNode()) { if (isStopWithSavePoint) { NodeEngineUtil.sendOperationToMasterNode( getNode().nodeEngine, new SavePointJobOperation(jobId)) @@ -183,7 +178,7 @@ private void handleStopJob(HttpPostCommand httpPostCommand, String uri) { } } else { - CoordinatorService coordinatorService = getSeaTunnelServer().getCoordinatorService(); + CoordinatorService coordinatorService = seaTunnelServer.getCoordinatorService(); if (isStopWithSavePoint) { coordinatorService.savePoint(jobId); From aded56299c7f348fdf30047bbac0ba8a7f923a14 Mon Sep 17 00:00:00 2001 From: xiaochen <598457447@qq.com> Date: Thu, 14 Mar 2024 10:04:24 +0800 Subject: [PATCH 12/59] [Improve][Connector-V2] Support TableSourceFactory on StarRocks (#6498) --- docs/en/connector-v2/source/StarRocks.md | 7 ++ .../common/config/TypesafeConfigUtils.java | 33 ------- .../config/TypesafeConfigUtilsTest.java | 17 ---- .../catalog/StarRocksCatalogFactory.java | 3 +- .../starrocks/config/CommonConfig.java | 12 +++ .../starrocks/config/SourceConfig.java | 87 +++++-------------- .../starrocks/sink/StarRocksSinkFactory.java | 3 +- .../starrocks/source/StarRocksSource.java | 68 ++++----------- .../source/StarRocksSourceFactory.java | 22 ++++- .../starrocks/StarRocksFactoryTest.java | 2 + ...rrocks-thrift-to-starrocks-streamload.conf | 1 + 11 files changed, 88 insertions(+), 167 deletions(-) diff --git a/docs/en/connector-v2/source/StarRocks.md b/docs/en/connector-v2/source/StarRocks.md index ef00d4d7d54..df814105aa5 100644 --- a/docs/en/connector-v2/source/StarRocks.md +++ b/docs/en/connector-v2/source/StarRocks.md @@ -35,6 +35,7 @@ delivers the query plan as a parameter to BE nodes, and then obtains data result | scan_batch_rows | int | no | 1024 | | scan_mem_limit | long | no | 2147483648 | | max_retries | int | no | 3 | +| scan.params.* | string | no | - | ### node_urls [list] @@ -136,6 +137,10 @@ The maximum memory space allowed for a single query in the BE node, in bytes. Th number of retry requests sent to StarRocks +### scan.params. [string] + +The parameter of the scan data from be + ## Example ``` @@ -164,6 +169,8 @@ source { DATETIME_COL = TIMESTAMP DATE_COL = DATE } + scan.params.scanner_thread_pool_thread_num = "3" + } } ``` diff --git a/seatunnel-common/src/main/java/org/apache/seatunnel/common/config/TypesafeConfigUtils.java b/seatunnel-common/src/main/java/org/apache/seatunnel/common/config/TypesafeConfigUtils.java index c931535e186..d80273ece0e 100644 --- a/seatunnel-common/src/main/java/org/apache/seatunnel/common/config/TypesafeConfigUtils.java +++ b/seatunnel-common/src/main/java/org/apache/seatunnel/common/config/TypesafeConfigUtils.java @@ -18,13 +18,11 @@ package org.apache.seatunnel.common.config; import org.apache.seatunnel.shade.com.typesafe.config.Config; -import org.apache.seatunnel.shade.com.typesafe.config.ConfigFactory; import org.apache.seatunnel.shade.com.typesafe.config.ConfigValue; import lombok.NonNull; import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -32,37 +30,6 @@ public final class TypesafeConfigUtils { private TypesafeConfigUtils() {} - /** - * Extract sub config with fixed prefix - * - * @param source config source - * @param prefix config prefix - * @param keepPrefix true if keep prefix - * @deprecated use org.apache.seatunnel.api.configuration.Option interface instead - */ - @Deprecated - public static Config extractSubConfig(Config source, String prefix, boolean keepPrefix) { - - // use LinkedHashMap to keep insertion order - Map values = new LinkedHashMap<>(); - - for (Map.Entry entry : source.entrySet()) { - final String key = entry.getKey(); - final String value = String.valueOf(entry.getValue().unwrapped()); - - if (key.startsWith(prefix)) { - - if (keepPrefix) { - values.put(key, value); - } else { - values.put(key.substring(prefix.length()), value); - } - } - } - - return ConfigFactory.parseMap(values); - } - /** * Check if config with specific prefix exists * diff --git a/seatunnel-common/src/test/java/org/apache/seatunnel/common/config/TypesafeConfigUtilsTest.java b/seatunnel-common/src/test/java/org/apache/seatunnel/common/config/TypesafeConfigUtilsTest.java index 3b297a731a8..d9f5d505999 100644 --- a/seatunnel-common/src/test/java/org/apache/seatunnel/common/config/TypesafeConfigUtilsTest.java +++ b/seatunnel-common/src/test/java/org/apache/seatunnel/common/config/TypesafeConfigUtilsTest.java @@ -26,27 +26,10 @@ import java.util.HashMap; import java.util.Map; -import static org.apache.seatunnel.common.config.TypesafeConfigUtils.extractSubConfig; import static org.apache.seatunnel.common.config.TypesafeConfigUtils.hasSubConfig; public class TypesafeConfigUtilsTest { - @Test - public void testExtractSubConfig() { - Config config = getConfig(); - Config subConfig = extractSubConfig(config, "test.", true); - Map configMap = new HashMap<>(); - configMap.put("test.t0", "v0"); - configMap.put("test.t1", "v1"); - Assertions.assertEquals(ConfigFactory.parseMap(configMap), subConfig); - - subConfig = extractSubConfig(config, "test.", false); - configMap = new HashMap<>(); - configMap.put("t0", "v0"); - configMap.put("t1", "v1"); - Assertions.assertEquals(ConfigFactory.parseMap(configMap), subConfig); - } - @Test public void testHasSubConfig() { Config config = getConfig(); diff --git a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/StarRocksCatalogFactory.java b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/StarRocksCatalogFactory.java index 94a93b3f56f..124e0257198 100644 --- a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/StarRocksCatalogFactory.java +++ b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/StarRocksCatalogFactory.java @@ -22,6 +22,7 @@ import org.apache.seatunnel.api.table.catalog.Catalog; import org.apache.seatunnel.api.table.factory.CatalogFactory; import org.apache.seatunnel.api.table.factory.Factory; +import org.apache.seatunnel.connectors.seatunnel.starrocks.config.CommonConfig; import org.apache.seatunnel.connectors.seatunnel.starrocks.config.StarRocksOptions; import org.apache.seatunnel.connectors.seatunnel.starrocks.config.StarRocksSinkOptions; @@ -29,7 +30,7 @@ @AutoService(Factory.class) public class StarRocksCatalogFactory implements CatalogFactory { - public static final String IDENTIFIER = "StarRocks"; + public static final String IDENTIFIER = CommonConfig.CONNECTOR_IDENTITY; @Override public Catalog createCatalog(String catalogName, ReadonlyConfig options) { diff --git a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/config/CommonConfig.java b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/config/CommonConfig.java index ffb9b115940..c8a4775fcff 100644 --- a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/config/CommonConfig.java +++ b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/config/CommonConfig.java @@ -19,6 +19,7 @@ import org.apache.seatunnel.api.configuration.Option; import org.apache.seatunnel.api.configuration.Options; +import org.apache.seatunnel.api.configuration.ReadonlyConfig; import lombok.AllArgsConstructor; import lombok.Getter; @@ -31,6 +32,9 @@ @ToString @AllArgsConstructor public class CommonConfig implements Serializable { + + public static final String CONNECTOR_IDENTITY = "StarRocks"; + public static final Option> NODE_URLS = Options.key("nodeUrls") .listType() @@ -67,4 +71,12 @@ public class CommonConfig implements Serializable { private String password; private String database; private String table; + + public CommonConfig(ReadonlyConfig config) { + this.nodeUrls = config.get(NODE_URLS); + this.username = config.get(USERNAME); + this.password = config.get(PASSWORD); + this.database = config.get(DATABASE); + this.table = config.get(TABLE); + } } diff --git a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/config/SourceConfig.java b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/config/SourceConfig.java index 10d0358a8f1..d0698638430 100644 --- a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/config/SourceConfig.java +++ b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/config/SourceConfig.java @@ -17,18 +17,14 @@ package org.apache.seatunnel.connectors.seatunnel.starrocks.config; -import org.apache.seatunnel.shade.com.typesafe.config.Config; - import org.apache.seatunnel.api.configuration.Option; import org.apache.seatunnel.api.configuration.Options; -import org.apache.seatunnel.common.config.TypesafeConfigUtils; +import org.apache.seatunnel.api.configuration.ReadonlyConfig; import lombok.Getter; -import lombok.NonNull; import lombok.Setter; import java.util.HashMap; -import java.util.List; import java.util.Map; @Setter @@ -37,13 +33,26 @@ public class SourceConfig extends CommonConfig { private static final long DEFAULT_SCAN_MEM_LIMIT = 1024 * 1024 * 1024L; - public SourceConfig( - @NonNull List nodeUrls, - @NonNull String username, - @NonNull String password, - @NonNull String database, - @NonNull String table) { - super(nodeUrls, username, password, database, table); + public SourceConfig(ReadonlyConfig config) { + super(config); + this.maxRetries = config.get(MAX_RETRIES); + this.requestTabletSize = config.get(QUERY_TABLET_SIZE); + this.scanFilter = config.get(SCAN_FILTER); + this.connectTimeoutMs = config.get(SCAN_CONNECT_TIMEOUT); + this.batchRows = config.get(SCAN_BATCH_ROWS); + this.keepAliveMin = config.get(SCAN_KEEP_ALIVE_MIN); + this.queryTimeoutSec = config.get(SCAN_QUERY_TIMEOUT_SEC); + this.memLimit = config.get(SCAN_MEM_LIMIT); + + String prefix = STARROCKS_SCAN_CONFIG_PREFIX.key(); + config.toMap() + .forEach( + (key, value) -> { + if (key.startsWith(prefix)) { + this.sourceOptionProps.put( + key.substring(prefix.length()).toLowerCase(), value); + } + }); } public static final Option MAX_RETRIES = @@ -105,57 +114,5 @@ public SourceConfig( private int keepAliveMin = SCAN_KEEP_ALIVE_MIN.defaultValue(); private int batchRows = SCAN_BATCH_ROWS.defaultValue(); private int connectTimeoutMs = SCAN_CONNECT_TIMEOUT.defaultValue(); - private final Map sourceOptionProps = new HashMap<>(); - - public static SourceConfig loadConfig(Config pluginConfig) { - SourceConfig sourceConfig = - new SourceConfig( - pluginConfig.getStringList(NODE_URLS.key()), - pluginConfig.getString(USERNAME.key()), - pluginConfig.getString(PASSWORD.key()), - pluginConfig.getString(DATABASE.key()), - pluginConfig.getString(TABLE.key())); - - if (pluginConfig.hasPath(MAX_RETRIES.key())) { - sourceConfig.setMaxRetries(pluginConfig.getInt(MAX_RETRIES.key())); - } - if (pluginConfig.hasPath(QUERY_TABLET_SIZE.key())) { - sourceConfig.setRequestTabletSize(pluginConfig.getInt(QUERY_TABLET_SIZE.key())); - } - if (pluginConfig.hasPath(SCAN_FILTER.key())) { - sourceConfig.setScanFilter(pluginConfig.getString(SCAN_FILTER.key())); - } - if (pluginConfig.hasPath(SCAN_CONNECT_TIMEOUT.key())) { - sourceConfig.setConnectTimeoutMs(pluginConfig.getInt(SCAN_CONNECT_TIMEOUT.key())); - } - if (pluginConfig.hasPath(SCAN_BATCH_ROWS.key())) { - sourceConfig.setBatchRows(pluginConfig.getInt(SCAN_BATCH_ROWS.key())); - } - if (pluginConfig.hasPath(SCAN_KEEP_ALIVE_MIN.key())) { - sourceConfig.setKeepAliveMin(pluginConfig.getInt(SCAN_KEEP_ALIVE_MIN.key())); - } - if (pluginConfig.hasPath(SCAN_QUERY_TIMEOUT_SEC.key())) { - sourceConfig.setQueryTimeoutSec(pluginConfig.getInt(SCAN_QUERY_TIMEOUT_SEC.key())); - } - if (pluginConfig.hasPath(SCAN_MEM_LIMIT.key())) { - sourceConfig.setMemLimit(pluginConfig.getLong(SCAN_MEM_LIMIT.key())); - } - parseSourceOptionProperties(pluginConfig, sourceConfig); - return sourceConfig; - } - - private static void parseSourceOptionProperties( - Config pluginConfig, SourceConfig sourceConfig) { - Config sourceOptionConfig = - TypesafeConfigUtils.extractSubConfig( - pluginConfig, STARROCKS_SCAN_CONFIG_PREFIX.key(), false); - sourceOptionConfig - .entrySet() - .forEach( - entry -> { - final String configKey = entry.getKey().toLowerCase(); - sourceConfig.sourceOptionProps.put( - configKey, (String) entry.getValue().unwrapped()); - }); - } + private Map sourceOptionProps = new HashMap<>(); } diff --git a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/sink/StarRocksSinkFactory.java b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/sink/StarRocksSinkFactory.java index 081645270f9..08fc6906981 100644 --- a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/sink/StarRocksSinkFactory.java +++ b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/sink/StarRocksSinkFactory.java @@ -25,6 +25,7 @@ import org.apache.seatunnel.api.table.factory.Factory; import org.apache.seatunnel.api.table.factory.TableSinkFactory; import org.apache.seatunnel.api.table.factory.TableSinkFactoryContext; +import org.apache.seatunnel.connectors.seatunnel.starrocks.config.CommonConfig; import org.apache.seatunnel.connectors.seatunnel.starrocks.config.SinkConfig; import org.apache.seatunnel.connectors.seatunnel.starrocks.config.StarRocksOptions; import org.apache.seatunnel.connectors.seatunnel.starrocks.config.StarRocksSinkOptions; @@ -42,7 +43,7 @@ public class StarRocksSinkFactory implements TableSinkFactory { @Override public String factoryIdentifier() { - return "StarRocks"; + return CommonConfig.CONNECTOR_IDENTITY; } @Override diff --git a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/source/StarRocksSource.java b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/source/StarRocksSource.java index 738400436a3..9bde1b22a38 100644 --- a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/source/StarRocksSource.java +++ b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/source/StarRocksSource.java @@ -17,70 +17,35 @@ package org.apache.seatunnel.connectors.seatunnel.starrocks.source; -import org.apache.seatunnel.shade.com.typesafe.config.Config; - -import org.apache.seatunnel.api.common.PrepareFailException; -import org.apache.seatunnel.api.common.SeaTunnelAPIErrorCode; import org.apache.seatunnel.api.source.Boundedness; import org.apache.seatunnel.api.source.SeaTunnelSource; import org.apache.seatunnel.api.source.SourceReader; import org.apache.seatunnel.api.source.SourceSplitEnumerator; -import org.apache.seatunnel.api.table.catalog.CatalogTableUtil; -import org.apache.seatunnel.api.table.catalog.schema.TableSchemaOptions; -import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; -import org.apache.seatunnel.common.config.CheckConfigUtil; -import org.apache.seatunnel.common.config.CheckResult; -import org.apache.seatunnel.common.constants.PluginType; +import org.apache.seatunnel.connectors.seatunnel.starrocks.config.CommonConfig; import org.apache.seatunnel.connectors.seatunnel.starrocks.config.SourceConfig; -import org.apache.seatunnel.connectors.seatunnel.starrocks.exception.StarRocksConnectorException; import com.google.auto.service.AutoService; -import static org.apache.seatunnel.connectors.seatunnel.starrocks.config.CommonConfig.DATABASE; -import static org.apache.seatunnel.connectors.seatunnel.starrocks.config.CommonConfig.NODE_URLS; -import static org.apache.seatunnel.connectors.seatunnel.starrocks.config.CommonConfig.PASSWORD; -import static org.apache.seatunnel.connectors.seatunnel.starrocks.config.CommonConfig.TABLE; -import static org.apache.seatunnel.connectors.seatunnel.starrocks.config.CommonConfig.USERNAME; +import java.util.Collections; +import java.util.List; @AutoService(SeaTunnelSource.class) public class StarRocksSource implements SeaTunnelSource { - private SeaTunnelRowType typeInfo; + private CatalogTable catalogTable; private SourceConfig sourceConfig; @Override public String getPluginName() { - return "StarRocks"; + return CommonConfig.CONNECTOR_IDENTITY; } - @Override - public void prepare(Config pluginConfig) throws PrepareFailException { - CheckResult checkResult = - CheckConfigUtil.checkAllExists( - pluginConfig, - NODE_URLS.key(), - DATABASE.key(), - TABLE.key(), - USERNAME.key(), - PASSWORD.key()); - - CheckResult schemaCheckResult = - CheckConfigUtil.checkAllExists(pluginConfig, TableSchemaOptions.SCHEMA.key()); - CheckResult mergedConfigCheck = - CheckConfigUtil.mergeCheckResults(checkResult, schemaCheckResult); - if (!mergedConfigCheck.isSuccess()) { - throw new StarRocksConnectorException( - SeaTunnelAPIErrorCode.CONFIG_VALIDATION_FAILED, - String.format( - "PluginName: %s, PluginType: %s, Message: %s", - getPluginName(), PluginType.SOURCE, mergedConfigCheck.getMsg())); - } - - this.typeInfo = CatalogTableUtil.buildWithConfig(pluginConfig).getSeaTunnelRowType(); - this.sourceConfig = SourceConfig.loadConfig(pluginConfig); + public StarRocksSource(SourceConfig sourceConfig, CatalogTable catalogTable) { + this.sourceConfig = sourceConfig; + this.catalogTable = catalogTable; } @Override @@ -89,13 +54,14 @@ public Boundedness getBoundedness() { } @Override - public SeaTunnelDataType getProducedType() { - return typeInfo; + public List getProducedCatalogTables() { + return Collections.singletonList(catalogTable); } @Override public SourceReader createReader(SourceReader.Context readerContext) { - return new StarRocksSourceReader(readerContext, typeInfo, sourceConfig); + return new StarRocksSourceReader( + readerContext, catalogTable.getSeaTunnelRowType(), sourceConfig); } @Override @@ -104,11 +70,15 @@ public SourceSplitEnumerator restore StarRocksSourceState checkpointState) throws Exception { return new StartRocksSourceSplitEnumerator( - enumeratorContext, sourceConfig, typeInfo, checkpointState); + enumeratorContext, + sourceConfig, + catalogTable.getSeaTunnelRowType(), + checkpointState); } @Override public SourceSplitEnumerator createEnumerator(SourceSplitEnumerator.Context enumeratorContext) { - return new StartRocksSourceSplitEnumerator(enumeratorContext, sourceConfig, typeInfo); + return new StartRocksSourceSplitEnumerator( + enumeratorContext, sourceConfig, catalogTable.getSeaTunnelRowType()); } } diff --git a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/source/StarRocksSourceFactory.java b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/source/StarRocksSourceFactory.java index fffb5a435ce..1f5e3c16905 100644 --- a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/source/StarRocksSourceFactory.java +++ b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/source/StarRocksSourceFactory.java @@ -17,20 +17,29 @@ package org.apache.seatunnel.connectors.seatunnel.starrocks.source; +import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.configuration.util.OptionRule; import org.apache.seatunnel.api.source.SeaTunnelSource; +import org.apache.seatunnel.api.source.SourceSplit; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.CatalogTableUtil; import org.apache.seatunnel.api.table.catalog.schema.TableSchemaOptions; +import org.apache.seatunnel.api.table.connector.TableSource; import org.apache.seatunnel.api.table.factory.Factory; import org.apache.seatunnel.api.table.factory.TableSourceFactory; +import org.apache.seatunnel.api.table.factory.TableSourceFactoryContext; +import org.apache.seatunnel.connectors.seatunnel.starrocks.config.CommonConfig; import org.apache.seatunnel.connectors.seatunnel.starrocks.config.SourceConfig; import com.google.auto.service.AutoService; +import java.io.Serializable; + @AutoService(Factory.class) public class StarRocksSourceFactory implements TableSourceFactory { @Override public String factoryIdentifier() { - return "StarRocks"; + return CommonConfig.CONNECTOR_IDENTITY; } @Override @@ -59,4 +68,15 @@ public OptionRule optionRule() { public Class getSourceClass() { return StarRocksSource.class; } + + @Override + public + TableSource createSource(TableSourceFactoryContext context) { + ReadonlyConfig config = context.getOptions(); + SourceConfig starRocksSourceConfig = new SourceConfig(config); + CatalogTable catalogTable = CatalogTableUtil.buildWithConfig(config); + return () -> + (SeaTunnelSource) + new StarRocksSource(starRocksSourceConfig, catalogTable); + } } diff --git a/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/StarRocksFactoryTest.java b/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/StarRocksFactoryTest.java index 9bc934aac95..0054f16fc9c 100644 --- a/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/StarRocksFactoryTest.java +++ b/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/StarRocksFactoryTest.java @@ -18,6 +18,7 @@ package org.apache.seatunnel.connectors.seatunnel.starrocks; import org.apache.seatunnel.connectors.seatunnel.starrocks.sink.StarRocksSinkFactory; +import org.apache.seatunnel.connectors.seatunnel.starrocks.source.StarRocksSourceFactory; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; @@ -27,5 +28,6 @@ class StarRocksFactoryTest { @Test void optionRule() { Assertions.assertNotNull((new StarRocksSinkFactory()).optionRule()); + Assertions.assertNotNull((new StarRocksSourceFactory()).optionRule()); } } diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/resources/starrocks-thrift-to-starrocks-streamload.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/resources/starrocks-thrift-to-starrocks-streamload.conf index 7b4c25af735..91f7b0402db 100644 --- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/resources/starrocks-thrift-to-starrocks-streamload.conf +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/resources/starrocks-thrift-to-starrocks-streamload.conf @@ -46,6 +46,7 @@ source { DATE_COL = DATE } } + scan.params.scanner_thread_pool_thread_num = "3" } } From d159fbe086324b220748c2d0dc095420fe6a1017 Mon Sep 17 00:00:00 2001 From: Shaun Wu <554350076@qq.com> Date: Thu, 14 Mar 2024 12:56:44 +0800 Subject: [PATCH 13/59] [Feature][Connectors-V2][File]support assign encoding for file source/sink (#6489) --- docs/en/connector-v2/sink/CosFile.md | 6 + docs/en/connector-v2/sink/FtpFile.md | 6 + docs/en/connector-v2/sink/HdfsFile.md | 1 + docs/en/connector-v2/sink/LocalFile.md | 18 ++ docs/en/connector-v2/sink/OssFile.md | 6 + docs/en/connector-v2/sink/OssJindoFile.md | 6 + docs/en/connector-v2/sink/S3File.md | 6 + docs/en/connector-v2/sink/SftpFile.md | 6 + docs/en/connector-v2/source/CosFile.md | 6 + docs/en/connector-v2/source/FtpFile.md | 6 + docs/en/connector-v2/source/HdfsFile.md | 6 + docs/en/connector-v2/source/LocalFile.md | 18 ++ docs/en/connector-v2/source/OssFile.md | 6 + docs/en/connector-v2/source/OssJindoFile.md | 6 + docs/en/connector-v2/source/S3File.md | 6 + docs/en/connector-v2/source/SftpFile.md | 6 + release-note.md | 1 + .../common/exception/CommonError.java | 8 + .../common/exception/CommonErrorCode.java | 1 + .../seatunnel/common/utils/EncodingUtils.java | 45 ++++ .../seatunnel/file/config/BaseSinkConfig.java | 6 + .../file/config/BaseSourceConfigOptions.java | 7 + .../file/sink/config/FileSinkConfig.java | 6 + .../seatunnel/file/sink/util/XmlWriter.java | 5 +- .../file/sink/writer/JsonWriteStrategy.java | 8 +- .../file/sink/writer/TextWriteStrategy.java | 7 +- .../file/source/reader/JsonReadStrategy.java | 11 +- .../file/source/reader/OrcReadStrategy.java | 41 +++- .../file/source/reader/TextReadStrategy.java | 10 +- .../file/source/reader/XmlReadStrategy.java | 16 +- .../file/writer/ReadStrategyEncodingTest.java | 192 ++++++++++++++++++ .../src/test/resources/encoding/gbk.json | 1 + .../src/test/resources/encoding/gbk.txt | 1 + .../src/test/resources/encoding/gbk.xml | 36 ++++ .../encoding/gbk_use_attr_format.xml | 19 ++ .../resources/encoding/test_read_json.conf | 41 ++++ .../resources/encoding/test_read_text.conf | 41 ++++ .../resources/encoding/test_read_xml.conf | 44 ++++ .../test_read_xml_use_attr_format.conf | 44 ++++ .../local/source/LocalFileSourceFactory.java | 1 + .../e2e/connector/file/local/LocalFileIT.java | 20 ++ .../src/test/resources/json/e2e_gbk.json | 4 + ...fake_to_local_file_json_with_encoding.conf | 86 ++++++++ ...al_file_json_to_console_with_encoding.conf | 66 ++++++ .../src/test/resources/text/e2e_gbk.txt | 4 + .../fake_to_local_file_with_encoding.conf | 85 ++++++++ ...al_file_text_to_console_with_encoding.conf | 66 ++++++ .../format/json/JsonSerializationSchema.java | 12 +- .../text/TextDeserializationSchema.java | 24 ++- .../format/text/TextSerializationSchema.java | 28 ++- 50 files changed, 1078 insertions(+), 24 deletions(-) create mode 100644 seatunnel-common/src/main/java/org/apache/seatunnel/common/utils/EncodingUtils.java create mode 100644 seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/ReadStrategyEncodingTest.java create mode 100644 seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/gbk.json create mode 100644 seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/gbk.txt create mode 100644 seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/gbk.xml create mode 100644 seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/gbk_use_attr_format.xml create mode 100644 seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/test_read_json.conf create mode 100644 seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/test_read_text.conf create mode 100644 seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/test_read_xml.conf create mode 100644 seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/test_read_xml_use_attr_format.conf create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/json/e2e_gbk.json create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/json/fake_to_local_file_json_with_encoding.conf create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/json/local_file_json_to_console_with_encoding.conf create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/text/e2e_gbk.txt create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/text/fake_to_local_file_with_encoding.conf create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/text/local_file_text_to_console_with_encoding.conf diff --git a/docs/en/connector-v2/sink/CosFile.md b/docs/en/connector-v2/sink/CosFile.md index 6c88e922947..7d2733e9057 100644 --- a/docs/en/connector-v2/sink/CosFile.md +++ b/docs/en/connector-v2/sink/CosFile.md @@ -61,6 +61,7 @@ By default, we use 2PC commit to ensure `exactly-once` | xml_root_tag | string | no | RECORDS | Only used when file_format is xml. | | xml_row_tag | string | no | RECORD | Only used when file_format is xml. | | xml_use_attr_format | boolean | no | - | Only used when file_format is xml. | +| encoding | string | no | "UTF-8" | Only used when file_format_type is json,text,csv,xml. | ### path [string] @@ -205,6 +206,11 @@ Specifies the tag name of the data rows within the XML file. Specifies Whether to process data using the tag attribute format. +### encoding [string] + +Only used when file_format_type is json,text,csv,xml. +The encoding of the file to write. This param will be parsed by `Charset.forName(encoding)`. + ## Example For text file format with `have_partition` and `custom_filename` and `sink_columns` diff --git a/docs/en/connector-v2/sink/FtpFile.md b/docs/en/connector-v2/sink/FtpFile.md index 9a3af0e744c..98080012f9f 100644 --- a/docs/en/connector-v2/sink/FtpFile.md +++ b/docs/en/connector-v2/sink/FtpFile.md @@ -60,6 +60,7 @@ By default, we use 2PC commit to ensure `exactly-once` | xml_root_tag | string | no | RECORDS | Only used when file_format is xml. | | xml_row_tag | string | no | RECORD | Only used when file_format is xml. | | xml_use_attr_format | boolean | no | - | Only used when file_format is xml. | +| encoding | string | no | "UTF-8" | Only used when file_format_type is json,text,csv,xml. | ### host [string] @@ -210,6 +211,11 @@ Specifies the tag name of the data rows within the XML file. Specifies Whether to process data using the tag attribute format. +### encoding [string] + +Only used when file_format_type is json,text,csv,xml. +The encoding of the file to write. This param will be parsed by `Charset.forName(encoding)`. + ## Example For text file format simple config diff --git a/docs/en/connector-v2/sink/HdfsFile.md b/docs/en/connector-v2/sink/HdfsFile.md index 4df905ff439..319b9dde143 100644 --- a/docs/en/connector-v2/sink/HdfsFile.md +++ b/docs/en/connector-v2/sink/HdfsFile.md @@ -67,6 +67,7 @@ Output data to hdfs file | xml_root_tag | string | no | RECORDS | Only used when file_format is xml, specifies the tag name of the root element within the XML file. | | xml_row_tag | string | no | RECORD | Only used when file_format is xml, specifies the tag name of the data rows within the XML file | | xml_use_attr_format | boolean | no | - | Only used when file_format is xml, specifies Whether to process data using the tag attribute format. | +| encoding | string | no | "UTF-8" | Only used when file_format_type is json,text,csv,xml. | ### Tips diff --git a/docs/en/connector-v2/sink/LocalFile.md b/docs/en/connector-v2/sink/LocalFile.md index e16c81c3f3a..33aae02c66b 100644 --- a/docs/en/connector-v2/sink/LocalFile.md +++ b/docs/en/connector-v2/sink/LocalFile.md @@ -56,6 +56,7 @@ By default, we use 2PC commit to ensure `exactly-once` | xml_row_tag | string | no | RECORD | Only used when file_format is xml. | | xml_use_attr_format | boolean | no | - | Only used when file_format is xml. | | enable_header_write | boolean | no | false | Only used when file_format_type is text,csv.
false:don't write header,true:write header. | +| encoding | string | no | "UTF-8" | Only used when file_format_type is json,text,csv,xml. | ### path [string] @@ -188,6 +189,11 @@ Specifies Whether to process data using the tag attribute format. Only used when file_format_type is text,csv.false:don't write header,true:write header. +### encoding [string] + +Only used when file_format_type is json,text,csv,xml. +The encoding of the file to write. This param will be parsed by `Charset.forName(encoding)`. + ## Example For orc file format simple config @@ -201,6 +207,18 @@ LocalFile { ``` +For json, text, csv or xml file format with `encoding` + +```hocon + +LocalFile { + path = "/tmp/hive/warehouse/test2" + file_format_type = "text" + encoding = "gbk" +} + +``` + For parquet file format with `sink_columns` ```bash diff --git a/docs/en/connector-v2/sink/OssFile.md b/docs/en/connector-v2/sink/OssFile.md index 4c85121c20c..182c138fba8 100644 --- a/docs/en/connector-v2/sink/OssFile.md +++ b/docs/en/connector-v2/sink/OssFile.md @@ -112,6 +112,7 @@ If write to `csv`, `text` file type, All column will be string. | xml_root_tag | string | no | RECORDS | Only used when file_format is xml. | | xml_row_tag | string | no | RECORD | Only used when file_format is xml. | | xml_use_attr_format | boolean | no | - | Only used when file_format is xml. | +| encoding | string | no | "UTF-8" | Only used when file_format_type is json,text,csv,xml. | ### path [string] @@ -256,6 +257,11 @@ Specifies the tag name of the data rows within the XML file. Specifies Whether to process data using the tag attribute format. +### encoding [string] + +Only used when file_format_type is json,text,csv,xml. +The encoding of the file to write. This param will be parsed by `Charset.forName(encoding)`. + ## How to Create an Oss Data Synchronization Jobs The following example demonstrates how to create a data synchronization job that reads data from Fake Source and writes it to the Oss: diff --git a/docs/en/connector-v2/sink/OssJindoFile.md b/docs/en/connector-v2/sink/OssJindoFile.md index 1a55c319704..f1ca20ab5c7 100644 --- a/docs/en/connector-v2/sink/OssJindoFile.md +++ b/docs/en/connector-v2/sink/OssJindoFile.md @@ -65,6 +65,7 @@ By default, we use 2PC commit to ensure `exactly-once` | xml_root_tag | string | no | RECORDS | Only used when file_format is xml. | | xml_row_tag | string | no | RECORD | Only used when file_format is xml. | | xml_use_attr_format | boolean | no | - | Only used when file_format is xml. | +| encoding | string | no | "UTF-8" | Only used when file_format_type is json,text,csv,xml. | ### path [string] @@ -209,6 +210,11 @@ Specifies the tag name of the data rows within the XML file. Specifies Whether to process data using the tag attribute format. +### encoding [string] + +Only used when file_format_type is json,text,csv,xml. +The encoding of the file to write. This param will be parsed by `Charset.forName(encoding)`. + ## Example For text file format with `have_partition` and `custom_filename` and `sink_columns` diff --git a/docs/en/connector-v2/sink/S3File.md b/docs/en/connector-v2/sink/S3File.md index a3811ea34ac..afa2e91c194 100644 --- a/docs/en/connector-v2/sink/S3File.md +++ b/docs/en/connector-v2/sink/S3File.md @@ -123,6 +123,7 @@ If write to `csv`, `text` file type, All column will be string. | hadoop_s3_properties | map | no | | If you need to add a other option, you could add it here and refer to this [link](https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html) | | schema_save_mode | Enum | no | CREATE_SCHEMA_WHEN_NOT_EXIST | Before turning on the synchronous task, do different treatment of the target path | | data_save_mode | Enum | no | APPEND_DATA | Before opening the synchronous task, the data file in the target path is differently processed | +| encoding | string | no | "UTF-8" | Only used when file_format_type is json,text,csv,xml. | ### path [string] @@ -278,6 +279,11 @@ Option introduction: `APPEND_DATA`:use the path, and add new files in the path for write data. `ERROR_WHEN_DATA_EXISTS`:When there are some data files in the path, an error will is reported. +### encoding [string] + +Only used when file_format_type is json,text,csv,xml. +The encoding of the file to write. This param will be parsed by `Charset.forName(encoding)`. + ## Example ### Simple: diff --git a/docs/en/connector-v2/sink/SftpFile.md b/docs/en/connector-v2/sink/SftpFile.md index 448d1dd050d..9169a79b2aa 100644 --- a/docs/en/connector-v2/sink/SftpFile.md +++ b/docs/en/connector-v2/sink/SftpFile.md @@ -59,6 +59,7 @@ By default, we use 2PC commit to ensure `exactly-once` | xml_root_tag | string | no | RECORDS | Only used when file_format is xml. | | xml_row_tag | string | no | RECORD | Only used when file_format is xml. | | xml_use_attr_format | boolean | no | - | Only used when file_format is xml. | +| encoding | string | no | "UTF-8" | Only used when file_format_type is json,text,csv,xml. | ### host [string] @@ -203,6 +204,11 @@ Specifies the tag name of the data rows within the XML file. Specifies Whether to process data using the tag attribute format. +### encoding [string] + +Only used when file_format_type is json,text,csv,xml. +The encoding of the file to write. This param will be parsed by `Charset.forName(encoding)`. + ## Example For text file format with `have_partition` and `custom_filename` and `sink_columns` diff --git a/docs/en/connector-v2/source/CosFile.md b/docs/en/connector-v2/source/CosFile.md index 7f0d6020800..973ad8b029c 100644 --- a/docs/en/connector-v2/source/CosFile.md +++ b/docs/en/connector-v2/source/CosFile.md @@ -65,6 +65,7 @@ To use this connector you need put hadoop-cos-{hadoop.version}-{version}.jar and | xml_use_attr_format | boolean | no | - | | file_filter_pattern | string | no | - | | compress_codec | string | no | none | +| encoding | string | no | UTF-8 | | common-options | | no | - | ### path [string] @@ -277,6 +278,11 @@ The compress codec of files and the details that supported as the following show - orc/parquet: automatically recognizes the compression type, no additional settings required. +### encoding [string] + +Only used when file_format_type is json,text,csv,xml. +The encoding of the file to read. This param will be parsed by `Charset.forName(encoding)`. + ### common options Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details. diff --git a/docs/en/connector-v2/source/FtpFile.md b/docs/en/connector-v2/source/FtpFile.md index e103c14a9ae..e3df8d20594 100644 --- a/docs/en/connector-v2/source/FtpFile.md +++ b/docs/en/connector-v2/source/FtpFile.md @@ -59,6 +59,7 @@ If you use SeaTunnel Engine, It automatically integrated the hadoop jar when you | xml_use_attr_format | boolean | no | - | | file_filter_pattern | string | no | - | | compress_codec | string | no | none | +| encoding | string | no | UTF-8 | | common-options | | no | - | ### host [string] @@ -258,6 +259,11 @@ The compress codec of files and the details that supported as the following show - orc/parquet: automatically recognizes the compression type, no additional settings required. +### encoding [string] + +Only used when file_format_type is json,text,csv,xml. +The encoding of the file to read. This param will be parsed by `Charset.forName(encoding)`. + ### common options Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details. diff --git a/docs/en/connector-v2/source/HdfsFile.md b/docs/en/connector-v2/source/HdfsFile.md index 5534dcd9653..06305e481ee 100644 --- a/docs/en/connector-v2/source/HdfsFile.md +++ b/docs/en/connector-v2/source/HdfsFile.md @@ -62,6 +62,7 @@ Read data from hdfs file system. | xml_row_tag | string | no | - | Specifies the tag name of the data rows within the XML file, only used when file_format is xml. | | xml_use_attr_format | boolean | no | - | Specifies whether to process data using the tag attribute format, only used when file_format is xml. | | compress_codec | string | no | none | The compress codec of files | +| encoding | string | no | UTF-8 | | common-options | | no | - | Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details. | ### delimiter/field_delimiter [string] @@ -78,6 +79,11 @@ The compress codec of files and the details that supported as the following show - orc/parquet: automatically recognizes the compression type, no additional settings required. +### encoding [string] + +Only used when file_format_type is json,text,csv,xml. +The encoding of the file to read. This param will be parsed by `Charset.forName(encoding)`. + ### Tips > If you use spark/flink, In order to use this connector, You must ensure your spark/flink cluster already integrated hadoop. The tested hadoop version is 2.x. If you use SeaTunnel Engine, It automatically integrated the hadoop jar when you download and install SeaTunnel Engine. You can check the jar package under ${SEATUNNEL_HOME}/lib to confirm this. diff --git a/docs/en/connector-v2/source/LocalFile.md b/docs/en/connector-v2/source/LocalFile.md index 172049498cc..dc7efc3c13f 100644 --- a/docs/en/connector-v2/source/LocalFile.md +++ b/docs/en/connector-v2/source/LocalFile.md @@ -59,6 +59,7 @@ If you use SeaTunnel Engine, It automatically integrated the hadoop jar when you | xml_use_attr_format | boolean | no | - | | file_filter_pattern | string | no | - | | compress_codec | string | no | none | +| encoding | string | no | UTF-8 | | common-options | | no | - | | tables_configs | list | no | used to define a multiple table task | @@ -256,6 +257,11 @@ The compress codec of files and the details that supported as the following show - orc/parquet: automatically recognizes the compression type, no additional settings required. +### encoding [string] + +Only used when file_format_type is json,text,csv,xml. +The encoding of the file to read. This param will be parsed by `Charset.forName(encoding)`. + ### common options Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details @@ -292,6 +298,18 @@ LocalFile { ``` +For json, text or csv file format with `encoding` + +```hocon + +LocalFile { + path = "/tmp/hive/warehouse/test2" + file_format_type = "text" + encoding = "gbk" +} + +``` + ### Multiple Table ```hocon diff --git a/docs/en/connector-v2/source/OssFile.md b/docs/en/connector-v2/source/OssFile.md index 85d922644de..62ecf19c3b2 100644 --- a/docs/en/connector-v2/source/OssFile.md +++ b/docs/en/connector-v2/source/OssFile.md @@ -209,6 +209,7 @@ If you assign file type to `parquet` `orc`, schema option not required, connecto | xml_row_tag | string | no | - | Specifies the tag name of the data rows within the XML file, only used when file_format is xml. | | xml_use_attr_format | boolean | no | - | Specifies whether to process data using the tag attribute format, only used when file_format is xml. | | compress_codec | string | no | none | Which compress codec the files used. | +| encoding | string | no | UTF-8 | | file_filter_pattern | string | no | | `*.txt` means you only need read the files end with `.txt` | | common-options | config | no | - | Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details. | @@ -222,6 +223,11 @@ The compress codec of files and the details that supported as the following show - orc/parquet: automatically recognizes the compression type, no additional settings required. +### encoding [string] + +Only used when file_format_type is json,text,csv,xml. +The encoding of the file to read. This param will be parsed by `Charset.forName(encoding)`. + ### file_filter_pattern [string] Filter pattern, which used for filtering files. diff --git a/docs/en/connector-v2/source/OssJindoFile.md b/docs/en/connector-v2/source/OssJindoFile.md index d1a28265539..3c1847608c6 100644 --- a/docs/en/connector-v2/source/OssJindoFile.md +++ b/docs/en/connector-v2/source/OssJindoFile.md @@ -69,6 +69,7 @@ It only supports hadoop version **2.9.X+**. | xml_use_attr_format | boolean | no | - | | file_filter_pattern | string | no | - | | compress_codec | string | no | none | +| encoding | string | no | UTF-8 | | common-options | | no | - | ### path [string] @@ -269,6 +270,11 @@ The compress codec of files and the details that supported as the following show - orc/parquet: automatically recognizes the compression type, no additional settings required. +### encoding [string] + +Only used when file_format_type is json,text,csv,xml. +The encoding of the file to read. This param will be parsed by `Charset.forName(encoding)`. + ### common options Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details. diff --git a/docs/en/connector-v2/source/S3File.md b/docs/en/connector-v2/source/S3File.md index 0387af044d6..1cf8d43fe03 100644 --- a/docs/en/connector-v2/source/S3File.md +++ b/docs/en/connector-v2/source/S3File.md @@ -217,6 +217,7 @@ If you assign file type to `parquet` `orc`, schema option not required, connecto | xml_row_tag | string | no | - | Specifies the tag name of the data rows within the XML file, only valid for XML files. | | xml_use_attr_format | boolean | no | - | Specifies whether to process data using the tag attribute format, only valid for XML files. | | compress_codec | string | no | none | +| encoding | string | no | UTF-8 | | common-options | | no | - | Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details. | ### delimiter/field_delimiter [string] @@ -233,6 +234,11 @@ The compress codec of files and the details that supported as the following show - orc/parquet: automatically recognizes the compression type, no additional settings required. +### encoding [string] + +Only used when file_format_type is json,text,csv,xml. +The encoding of the file to read. This param will be parsed by `Charset.forName(encoding)`. + ## Example 1. In this example, We read data from s3 path `s3a://seatunnel-test/seatunnel/text` and the file type is orc in this path. diff --git a/docs/en/connector-v2/source/SftpFile.md b/docs/en/connector-v2/source/SftpFile.md index 0f179749fbc..a5a4f221686 100644 --- a/docs/en/connector-v2/source/SftpFile.md +++ b/docs/en/connector-v2/source/SftpFile.md @@ -91,6 +91,7 @@ The File does not have a specific type list, and we can indicate which SeaTunnel | xml_use_attr_format | boolean | no | - | Specifies whether to process data using the tag attribute format, only used when file_format is xml. | | schema | Config | No | - | Please check #schema below | | compress_codec | String | No | None | The compress codec of files and the details that supported as the following shown:
- txt: `lzo` `None`
- json: `lzo` `None`
- csv: `lzo` `None`
- orc: `lzo` `snappy` `lz4` `zlib` `None`
- parquet: `lzo` `snappy` `lz4` `gzip` `brotli` `zstd` `None`
Tips: excel type does Not support any compression format | +| encoding | string | no | UTF-8 | | common-options | | No | - | Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details. | ### file_format_type [string] @@ -169,6 +170,11 @@ The compress codec of files and the details that supported as the following show - orc/parquet: automatically recognizes the compression type, no additional settings required. +### encoding [string] + +Only used when file_format_type is json,text,csv,xml. +The encoding of the file to read. This param will be parsed by `Charset.forName(encoding)`. + ### schema [config] #### fields [Config] diff --git a/release-note.md b/release-note.md index b399c161e39..6c325864a83 100644 --- a/release-note.md +++ b/release-note.md @@ -52,6 +52,7 @@ - [Connector-v2] [Clickhouse] fix get clickhouse local table name with closing bracket from distributed table engineFull (#4710) - [Connector-v2] [CDC] Fix jdbc connection leak for mysql (#5037) - [Connector-v2] [File] Fix WriteStrategy parallel writing thread unsafe issue #5546 +- [Connector-v2] [File] Support assign encoding for file source/sink (#5973) ### Zeta(ST-Engine) diff --git a/seatunnel-common/src/main/java/org/apache/seatunnel/common/exception/CommonError.java b/seatunnel-common/src/main/java/org/apache/seatunnel/common/exception/CommonError.java index 65312d87460..c491666a355 100644 --- a/seatunnel-common/src/main/java/org/apache/seatunnel/common/exception/CommonError.java +++ b/seatunnel-common/src/main/java/org/apache/seatunnel/common/exception/CommonError.java @@ -22,6 +22,8 @@ import org.apache.seatunnel.common.constants.PluginType; +import org.apache.commons.collections4.map.SingletonMap; + import java.util.HashMap; import java.util.Map; @@ -35,6 +37,7 @@ import static org.apache.seatunnel.common.exception.CommonErrorCode.GET_CATALOG_TABLE_WITH_UNSUPPORTED_TYPE_ERROR; import static org.apache.seatunnel.common.exception.CommonErrorCode.JSON_OPERATION_FAILED; import static org.apache.seatunnel.common.exception.CommonErrorCode.UNSUPPORTED_DATA_TYPE; +import static org.apache.seatunnel.common.exception.CommonErrorCode.UNSUPPORTED_ENCODING; import static org.apache.seatunnel.common.exception.CommonErrorCode.WRITE_SEATUNNEL_ROW_ERROR; /** @@ -92,6 +95,11 @@ public static SeaTunnelRuntimeException unsupportedDataType( return new SeaTunnelRuntimeException(UNSUPPORTED_DATA_TYPE, params); } + public static SeaTunnelRuntimeException unsupportedEncoding(String encoding) { + Map params = new SingletonMap<>("encoding", encoding); + return new SeaTunnelRuntimeException(UNSUPPORTED_ENCODING, params); + } + public static SeaTunnelRuntimeException convertToSeaTunnelTypeError( String connector, PluginType pluginType, String dataType, String field) { Map params = new HashMap<>(); diff --git a/seatunnel-common/src/main/java/org/apache/seatunnel/common/exception/CommonErrorCode.java b/seatunnel-common/src/main/java/org/apache/seatunnel/common/exception/CommonErrorCode.java index 54710a14283..326187e6973 100644 --- a/seatunnel-common/src/main/java/org/apache/seatunnel/common/exception/CommonErrorCode.java +++ b/seatunnel-common/src/main/java/org/apache/seatunnel/common/exception/CommonErrorCode.java @@ -24,6 +24,7 @@ public enum CommonErrorCode implements SeaTunnelErrorCode { "COMMON-02", " JSON convert/parse '' operation failed."), UNSUPPORTED_DATA_TYPE( "COMMON-07", "'' unsupported data type '' of ''"), + UNSUPPORTED_ENCODING("COMMON-08", "unsupported encoding ''"), CONVERT_TO_SEATUNNEL_TYPE_ERROR( "COMMON-16", "'' unsupported convert type '' of '' to SeaTunnel data type."), diff --git a/seatunnel-common/src/main/java/org/apache/seatunnel/common/utils/EncodingUtils.java b/seatunnel-common/src/main/java/org/apache/seatunnel/common/utils/EncodingUtils.java new file mode 100644 index 00000000000..f15f8a0afb5 --- /dev/null +++ b/seatunnel-common/src/main/java/org/apache/seatunnel/common/utils/EncodingUtils.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.common.utils; + +import org.apache.seatunnel.common.exception.CommonError; + +import org.apache.commons.lang3.StringUtils; + +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; + +public class EncodingUtils { + + /** + * try to parse charset by encoding name. such as ISO-8859-1, GBK, UTF-8. If failed, will use + * UTF-8 as the default charset + * + * @param encoding the charset name + */ + public static Charset tryParseCharset(String encoding) { + if (StringUtils.isBlank(encoding)) { + return StandardCharsets.UTF_8; + } + try { + return Charset.forName(encoding); + } catch (Exception e) { + throw CommonError.unsupportedEncoding(encoding); + } + } +} diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/config/BaseSinkConfig.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/config/BaseSinkConfig.java index c7d4576f288..394ccc5ceb3 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/config/BaseSinkConfig.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/config/BaseSinkConfig.java @@ -185,6 +185,12 @@ public class BaseSinkConfig { .defaultValue(FileFormat.CSV) .withDescription("File format type, e.g. csv, orc, parquet, text"); + public static final Option ENCODING = + Options.key("encoding") + .stringType() + .defaultValue("UTF-8") + .withDescription("The encoding of output file, e.g. UTF-8, ISO-8859-1...."); + public static final Option> SINK_COLUMNS = Options.key("sink_columns") .listType() diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/config/BaseSourceConfigOptions.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/config/BaseSourceConfigOptions.java index 4e4c0bbef5f..a96fc4658c3 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/config/BaseSourceConfigOptions.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/config/BaseSourceConfigOptions.java @@ -51,6 +51,13 @@ public class BaseSourceConfigOptions { .withDescription( "The separator between columns in a row of data. Only needed by `text` file format"); + public static final Option ENCODING = + Options.key("encoding") + .stringType() + .defaultValue("UTF-8") + .withDescription( + "The encoding of the file to read, e.g. UTF-8, ISO-8859-1...."); + public static final Option DATE_FORMAT = Options.key("date_format") .enumType(DateUtils.Formatter.class) diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/config/FileSinkConfig.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/config/FileSinkConfig.java index 7fe10224b71..883a45d9cbf 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/config/FileSinkConfig.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/config/FileSinkConfig.java @@ -62,6 +62,8 @@ public class FileSinkConfig extends BaseFileSinkConfig implements PartitionConfi private boolean isEnableTransaction = BaseSinkConfig.IS_ENABLE_TRANSACTION.defaultValue(); + private String encoding = BaseSinkConfig.ENCODING.defaultValue(); + // ---------------------generator by config params------------------- private List sinkColumnsIndexInRow; @@ -131,6 +133,10 @@ public FileSinkConfig(@NonNull Config config, @NonNull SeaTunnelRowType seaTunne config.getBoolean(BaseSinkConfig.IS_ENABLE_TRANSACTION.key()); } + if (config.hasPath(BaseSinkConfig.ENCODING.key())) { + this.encoding = config.getString(BaseSinkConfig.ENCODING.key()); + } + if (this.isEnableTransaction && !this.fileNameExpression.contains(BaseSinkConfig.TRANSACTION_EXPRESSION)) { throw new FileConnectorException( diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/util/XmlWriter.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/util/XmlWriter.java index 2617817f7d7..b5483594426 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/util/XmlWriter.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/util/XmlWriter.java @@ -21,6 +21,7 @@ import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; +import org.apache.seatunnel.common.utils.EncodingUtils; import org.apache.seatunnel.common.utils.JsonUtils; import org.apache.seatunnel.connectors.seatunnel.file.exception.FileConnectorException; import org.apache.seatunnel.connectors.seatunnel.file.sink.config.FileSinkConfig; @@ -33,6 +34,7 @@ import java.io.IOException; import java.io.OutputStream; +import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.util.AbstractMap; import java.util.List; @@ -139,7 +141,8 @@ private void setXmlOutputFormat() { this.format.setNewLineAfterDeclaration(true); this.format.setSuppressDeclaration(false); this.format.setExpandEmptyElements(false); - this.format.setEncoding(StandardCharsets.UTF_8.name()); this.format.setIndent("\t"); + Charset charset = EncodingUtils.tryParseCharset(fileSinkConfig.getEncoding()); + this.format.setEncoding(charset.name()); } } diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/JsonWriteStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/JsonWriteStrategy.java index bb6ca9ec600..f95973f4cfc 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/JsonWriteStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/JsonWriteStrategy.java @@ -22,6 +22,7 @@ import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.common.exception.CommonError; import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; +import org.apache.seatunnel.common.utils.EncodingUtils; import org.apache.seatunnel.connectors.seatunnel.file.exception.FileConnectorException; import org.apache.seatunnel.connectors.seatunnel.file.sink.config.FileSinkConfig; import org.apache.seatunnel.format.json.JsonSerializationSchema; @@ -33,6 +34,7 @@ import java.io.IOException; import java.io.OutputStream; +import java.nio.charset.Charset; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; @@ -42,12 +44,14 @@ public class JsonWriteStrategy extends AbstractWriteStrategy { private SerializationSchema serializationSchema; private final LinkedHashMap beingWrittenOutputStream; private final Map isFirstWrite; + private final Charset charset; public JsonWriteStrategy(FileSinkConfig textFileSinkConfig) { super(textFileSinkConfig); this.beingWrittenOutputStream = new LinkedHashMap<>(); this.isFirstWrite = new HashMap<>(); - this.rowDelimiter = textFileSinkConfig.getRowDelimiter().getBytes(); + this.charset = EncodingUtils.tryParseCharset(textFileSinkConfig.getEncoding()); + this.rowDelimiter = textFileSinkConfig.getRowDelimiter().getBytes(charset); } @Override @@ -55,7 +59,7 @@ public void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType) { super.setSeaTunnelRowTypeInfo(seaTunnelRowType); this.serializationSchema = new JsonSerializationSchema( - buildSchemaWithRowType(seaTunnelRowType, sinkColumnsIndexInRow)); + buildSchemaWithRowType(seaTunnelRowType, sinkColumnsIndexInRow), charset); } @Override diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/TextWriteStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/TextWriteStrategy.java index 03b6d65dedb..621048fb39a 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/TextWriteStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/sink/writer/TextWriteStrategy.java @@ -24,6 +24,7 @@ import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; import org.apache.seatunnel.common.utils.DateTimeUtils; import org.apache.seatunnel.common.utils.DateUtils; +import org.apache.seatunnel.common.utils.EncodingUtils; import org.apache.seatunnel.common.utils.TimeUtils; import org.apache.seatunnel.connectors.seatunnel.file.config.FileFormat; import org.apache.seatunnel.connectors.seatunnel.file.exception.FileConnectorException; @@ -37,6 +38,7 @@ import java.io.IOException; import java.io.OutputStream; +import java.nio.charset.Charset; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; @@ -51,6 +53,7 @@ public class TextWriteStrategy extends AbstractWriteStrategy { private final TimeUtils.Formatter timeFormat; private final FileFormat fileFormat; private final Boolean enableHeaderWriter; + private final Charset charset; private SerializationSchema serializationSchema; public TextWriteStrategy(FileSinkConfig fileSinkConfig) { @@ -64,6 +67,7 @@ public TextWriteStrategy(FileSinkConfig fileSinkConfig) { this.timeFormat = fileSinkConfig.getTimeFormat(); this.fileFormat = fileSinkConfig.getFileFormat(); this.enableHeaderWriter = fileSinkConfig.getEnableHeaderWriter(); + this.charset = EncodingUtils.tryParseCharset(fileSinkConfig.getEncoding()); } @Override @@ -77,6 +81,7 @@ public void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType) { .dateFormatter(dateFormat) .dateTimeFormatter(dateTimeFormat) .timeFormatter(timeFormat) + .charset(charset) .build(); } @@ -89,7 +94,7 @@ public void write(@NonNull SeaTunnelRow seaTunnelRow) { if (isFirstWrite.get(filePath)) { isFirstWrite.put(filePath, false); } else { - fsDataOutputStream.write(rowDelimiter.getBytes()); + fsDataOutputStream.write(rowDelimiter.getBytes(charset)); } fsDataOutputStream.write( serializationSchema.serialize( diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/JsonReadStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/JsonReadStrategy.java index 0535241af0f..6c58e368721 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/JsonReadStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/JsonReadStrategy.java @@ -17,6 +17,7 @@ package org.apache.seatunnel.connectors.seatunnel.file.source.reader; +import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.serialization.DeserializationSchema; import org.apache.seatunnel.api.source.Collector; import org.apache.seatunnel.api.table.type.SeaTunnelRow; @@ -43,6 +44,7 @@ public class JsonReadStrategy extends AbstractReadStrategy { private DeserializationSchema deserializationSchema; private CompressFormat compressFormat = BaseSourceConfigOptions.COMPRESS_CODEC.defaultValue(); + private String encoding = BaseSourceConfigOptions.ENCODING.defaultValue(); @Override public void init(HadoopConf conf) { @@ -52,6 +54,10 @@ public void init(HadoopConf conf) { pluginConfig.getString(BaseSourceConfigOptions.COMPRESS_CODEC.key()); compressFormat = CompressFormat.valueOf(compressCodec.toUpperCase()); } + encoding = + ReadonlyConfig.fromConfig(pluginConfig) + .getOptional(BaseSourceConfigOptions.ENCODING) + .orElse(StandardCharsets.UTF_8.name()); } @Override @@ -87,13 +93,14 @@ public void read(String path, String tableId, Collector output) break; } try (BufferedReader reader = - new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8))) { + new BufferedReader(new InputStreamReader(inputStream, encoding))) { reader.lines() .forEach( line -> { try { SeaTunnelRow seaTunnelRow = - deserializationSchema.deserialize(line.getBytes()); + deserializationSchema.deserialize( + line.getBytes(StandardCharsets.UTF_8)); if (isMergePartition) { int index = seaTunnelRowType.getTotalFields(); for (String value : partitionsMap.values()) { diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/OrcReadStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/OrcReadStrategy.java index 4f7be397f0e..79158a54232 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/OrcReadStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/OrcReadStrategy.java @@ -17,6 +17,7 @@ package org.apache.seatunnel.connectors.seatunnel.file.source.reader; +import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.source.Collector; import org.apache.seatunnel.api.table.type.ArrayType; import org.apache.seatunnel.api.table.type.BasicType; @@ -28,6 +29,7 @@ import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; +import org.apache.seatunnel.connectors.seatunnel.file.config.BaseSourceConfigOptions; import org.apache.seatunnel.connectors.seatunnel.file.exception.FileConnectorErrorCode; import org.apache.seatunnel.connectors.seatunnel.file.exception.FileConnectorException; @@ -56,6 +58,8 @@ import java.io.IOException; import java.math.BigDecimal; import java.nio.ByteBuffer; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.sql.Timestamp; import java.time.LocalDate; import java.util.ArrayList; @@ -354,17 +358,48 @@ private Object readLongVal(ColumnVector colVec, TypeDescription colType, int row } private Object readBytesVal(ColumnVector colVec, TypeDescription typeDescription, int rowNum) { + Charset charset = StandardCharsets.UTF_8; + if (pluginConfig != null) { + charset = + ReadonlyConfig.fromConfig(pluginConfig) + .getOptional(BaseSourceConfigOptions.ENCODING) + .map(Charset::forName) + .orElse(StandardCharsets.UTF_8); + } + Object bytesObj = null; if (!colVec.isNull[rowNum]) { BytesColumnVector bytesVector = (BytesColumnVector) colVec; - bytesObj = bytesVector.toString(rowNum); - if (typeDescription.getCategory() == TypeDescription.Category.BINARY) { - bytesObj = ((String) bytesObj).getBytes(); + bytesObj = this.bytesVectorToString(bytesVector, rowNum, charset); + if (typeDescription.getCategory() == TypeDescription.Category.BINARY + && bytesObj != null) { + bytesObj = ((String) bytesObj).getBytes(charset); } } return bytesObj; } + /** + * copied from {@link BytesColumnVector#toString(int)} + * + * @param bytesVector the BytesColumnVector + * @param row rowNum + * @param charset read charset + */ + private Object bytesVectorToString(BytesColumnVector bytesVector, int row, Charset charset) { + if (bytesVector.isRepeating) { + row = 0; + } + + return !bytesVector.noNulls && bytesVector.isNull[row] + ? null + : new String( + bytesVector.vector[row], + bytesVector.start[row], + bytesVector.length[row], + charset); + } + private Object readDecimalVal(ColumnVector colVec, int rowNum) { Object decimalObj = null; if (!colVec.isNull[rowNum]) { diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/TextReadStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/TextReadStrategy.java index c3a0315fe0b..0b8d1b7aa3d 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/TextReadStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/TextReadStrategy.java @@ -57,6 +57,7 @@ public class TextReadStrategy extends AbstractReadStrategy { private TimeUtils.Formatter timeFormat = BaseSourceConfigOptions.TIME_FORMAT.defaultValue(); private CompressFormat compressFormat = BaseSourceConfigOptions.COMPRESS_CODEC.defaultValue(); private int[] indexes; + private String encoding = BaseSourceConfigOptions.ENCODING.defaultValue(); @Override public void read(String path, String tableId, Collector output) @@ -80,14 +81,15 @@ public void read(String path, String tableId, Collector output) } try (BufferedReader reader = - new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8))) { + new BufferedReader(new InputStreamReader(inputStream, encoding))) { reader.lines() .skip(skipHeaderNumber) .forEach( line -> { try { SeaTunnelRow seaTunnelRow = - deserializationSchema.deserialize(line.getBytes()); + deserializationSchema.deserialize( + line.getBytes(StandardCharsets.UTF_8)); if (!readColumns.isEmpty()) { // need column projection Object[] fields; @@ -160,6 +162,10 @@ public void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType) { Optional fieldDelimiterOptional = ReadonlyConfig.fromConfig(pluginConfig) .getOptional(BaseSourceConfigOptions.FIELD_DELIMITER); + encoding = + ReadonlyConfig.fromConfig(pluginConfig) + .getOptional(BaseSourceConfigOptions.ENCODING) + .orElse(StandardCharsets.UTF_8.name()); if (fieldDelimiterOptional.isPresent()) { fieldDelimiter = fieldDelimiterOptional.get(); } else { diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/XmlReadStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/XmlReadStrategy.java index 0752bf52a85..d09431f12b4 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/XmlReadStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/XmlReadStrategy.java @@ -21,6 +21,7 @@ import org.apache.seatunnel.api.common.SeaTunnelAPIErrorCode; import org.apache.seatunnel.api.configuration.Option; +import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.source.Collector; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.api.table.type.SeaTunnelRow; @@ -49,6 +50,7 @@ import lombok.extern.slf4j.Slf4j; import java.io.IOException; +import java.io.InputStreamReader; import java.math.BigDecimal; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -72,6 +74,7 @@ public class XmlReadStrategy extends AbstractReadStrategy { private DateUtils.Formatter dateFormat; private DateTimeUtils.Formatter datetimeFormat; private TimeUtils.Formatter timeFormat; + private String encoding = BaseSourceConfigOptions.ENCODING.defaultValue(); private final ObjectMapper objectMapper = new ObjectMapper(); @@ -88,7 +91,10 @@ public void read(String path, String tableId, Collector output) SAXReader saxReader = new SAXReader(); Document document; try { - document = saxReader.read(hadoopFileSystemProxy.getInputStream(path)); + document = + saxReader.read( + new InputStreamReader( + hadoopFileSystemProxy.getInputStream(path), encoding)); } catch (DocumentException e) { throw new FileConnectorException( FileConnectorErrorCode.FILE_READ_FAILED, "Failed to read xml file: " + path, e); @@ -210,13 +216,13 @@ private Object convert(String fieldValue, SeaTunnelDataType fieldType) { case INT: return (int) Double.parseDouble(fieldValue); case BIGINT: - return (long) Double.parseDouble(fieldValue); + return new BigDecimal(fieldValue).longValue(); case DOUBLE: return Double.parseDouble(fieldValue); case FLOAT: return (float) Double.parseDouble(fieldValue); case DECIMAL: - return BigDecimal.valueOf(Double.parseDouble(fieldValue)); + return new BigDecimal(fieldValue); case BOOLEAN: return Boolean.parseBoolean(fieldValue); case BYTES: @@ -270,6 +276,10 @@ private void preCheckAndInitializeConfiguration() { this.datetimeFormat = getComplexDateConfigValue( BaseSourceConfigOptions.DATETIME_FORMAT, DateTimeUtils.Formatter::parse); + this.encoding = + ReadonlyConfig.fromConfig(pluginConfig) + .getOptional(BaseSourceConfigOptions.ENCODING) + .orElse(StandardCharsets.UTF_8.name()); } /** diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/ReadStrategyEncodingTest.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/ReadStrategyEncodingTest.java new file mode 100644 index 00000000000..736ae590963 --- /dev/null +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/java/org/apache/seatunnel/connectors/seatunnel/file/writer/ReadStrategyEncodingTest.java @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.file.writer; + +import org.apache.seatunnel.shade.com.typesafe.config.Config; +import org.apache.seatunnel.shade.com.typesafe.config.ConfigFactory; + +import org.apache.seatunnel.api.source.Collector; +import org.apache.seatunnel.api.table.catalog.CatalogTableUtil; +import org.apache.seatunnel.api.table.type.SeaTunnelRow; +import org.apache.seatunnel.api.table.type.SeaTunnelRowType; +import org.apache.seatunnel.connectors.seatunnel.file.config.HadoopConf; +import org.apache.seatunnel.connectors.seatunnel.file.source.reader.AbstractReadStrategy; +import org.apache.seatunnel.connectors.seatunnel.file.source.reader.JsonReadStrategy; +import org.apache.seatunnel.connectors.seatunnel.file.source.reader.TextReadStrategy; +import org.apache.seatunnel.connectors.seatunnel.file.source.reader.XmlReadStrategy; + +import org.apache.commons.lang3.StringUtils; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import lombok.extern.slf4j.Slf4j; + +import java.io.File; +import java.io.IOException; +import java.math.BigDecimal; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.file.Paths; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT; + +@Slf4j +public class ReadStrategyEncodingTest { + + private static final Map cMap = new HashMap<>(); + private static final Integer[] cArray = {101}; + private static final String[] cArrayString = {"测试ABC123!@#"}; + private static final String cString = "你好,世界ABC123!@#"; + private static final Boolean cBoolean = true; + private static final Byte cTinyint = 117; + private static final Short cSmallint = 15987; + private static final Integer cInt = 56387395; + private static final Long cBigint = 7084913402530365000L; + private static final Float cFloat = 1.23f; + private static final Double cDouble = 1.23; + private static final BigDecimal cDecimal = new BigDecimal("2924137191386439303744.39292216"); + private static final byte[] cBytes = { + -28, -67, -96, -27, -91, -67, -28, -72, -106, -25, -107, -116, 65, 66, 67, 97, 98, 99, 49, + 50, 51, 33, 64, 35 + }; + private static final LocalDate cDate = LocalDate.of(2023, 4, 22); + private static final LocalDateTime cTimestamp = LocalDateTime.of(2023, 4, 22, 23, 20, 58); + + @BeforeAll + public static void before() { + cMap.put("a测试", "b测试"); + } + + @Test + public void testTextRead() throws Exception { + try (TextReadStrategy textReadStrategy = new TextReadStrategy()) { + testRead("/encoding/gbk.txt", "/encoding/test_read_text.conf", textReadStrategy); + } + } + + @Test + public void testJsonRead() throws Exception { + try (JsonReadStrategy jsonReadStrategy = new JsonReadStrategy()) { + testRead("/encoding/gbk.json", "/encoding/test_read_json.conf", jsonReadStrategy); + } + } + + @Test + public void testXmlRead() throws Exception { + try (XmlReadStrategy xmlReadStrategy = new XmlReadStrategy()) { + testRead("/encoding/gbk.xml", "/encoding/test_read_xml.conf", xmlReadStrategy); + testRead( + "/encoding/gbk_use_attr_format.xml", + "/encoding/test_read_xml_use_attr_format.conf", + xmlReadStrategy); + } + } + + private static void testRead( + String sourcePathStr, String confPathStr, AbstractReadStrategy readStrategy) + throws URISyntaxException, IOException { + URL sourceFile = ReadStrategyEncodingTest.class.getResource(sourcePathStr); + URL conf = ReadStrategyEncodingTest.class.getResource(confPathStr); + Assertions.assertNotNull(sourceFile); + Assertions.assertNotNull(conf); + String sourceFilePath = Paths.get(sourceFile.toURI()).toString(); + String confPath = Paths.get(conf.toURI()).toString(); + TestCollector testCollector; + LocalConf localConf = new LocalConf(FS_DEFAULT_NAME_DEFAULT); + Config pluginConfig = ConfigFactory.parseFile(new File(confPath)); + readStrategy.setPluginConfig(pluginConfig); + readStrategy.init(localConf); + readStrategy.getFileNamesByPath(sourceFilePath); + testCollector = new TestCollector(); + SeaTunnelRowType seaTunnelRowTypeInfo = + CatalogTableUtil.buildWithConfig(pluginConfig).getSeaTunnelRowType(); + Assertions.assertNotNull(seaTunnelRowTypeInfo); + readStrategy.setSeaTunnelRowTypeInfo(seaTunnelRowTypeInfo); + log.info(seaTunnelRowTypeInfo.toString()); + readStrategy.read(sourceFilePath, "", testCollector); + assertRows(testCollector); + } + + private static void assertRows(TestCollector testCollector) { + for (SeaTunnelRow row : testCollector.getRows()) { + Assertions.assertEquals(row.getField(0), cMap); + Assertions.assertArrayEquals(((Integer[]) row.getField(1)), cArray); + Assertions.assertArrayEquals(((String[]) row.getField(2)), cArrayString); + Assertions.assertEquals(row.getField(3), cString); + Assertions.assertEquals(row.getField(4), cBoolean); + Assertions.assertEquals(row.getField(5), cTinyint); + Assertions.assertEquals(row.getField(6), cSmallint); + Assertions.assertEquals(row.getField(7), cInt); + Assertions.assertEquals(row.getField(8), cBigint); + Assertions.assertEquals(row.getField(9), cFloat); + Assertions.assertEquals(row.getField(10), cDouble); + Assertions.assertEquals(row.getField(11), cDecimal); + Assertions.assertTrue(StringUtils.isBlank((String) row.getField(12))); + Assertions.assertArrayEquals((byte[]) row.getField(13), cBytes); + Assertions.assertEquals(row.getField(14), cDate); + Assertions.assertEquals(row.getField(15), cTimestamp); + } + } + + public static class TestCollector implements Collector { + + private final List rows = new ArrayList<>(); + + public List getRows() { + return rows; + } + + @Override + public void collect(SeaTunnelRow record) { + log.info(record.toString()); + rows.add(record); + } + + @Override + public Object getCheckpointLock() { + return null; + } + } + + public static class LocalConf extends HadoopConf { + private static final String HDFS_IMPL = "org.apache.hadoop.fs.LocalFileSystem"; + private static final String SCHEMA = "file"; + + public LocalConf(String hdfsNameKey) { + super(hdfsNameKey); + } + + @Override + public String getFsHdfsImpl() { + return HDFS_IMPL; + } + + @Override + public String getSchema() { + return SCHEMA; + } + } +} diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/gbk.json b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/gbk.json new file mode 100644 index 00000000000..5f94c3a993c --- /dev/null +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/gbk.json @@ -0,0 +1 @@ +{"c_map":{"a":"b"},"c_array":[101],"c_array_string":["ABC123!@#"],"c_string":"ãABC123!@#","c_boolean":true,"c_tinyint":117,"c_smallint":15987,"c_int":56387395,"c_bigint":7084913402530365000,"c_float":1.23,"c_double":1.23,"c_decimal":2924137191386439303744.39292216,"c_null":null,"c_bytes":"5L2g5aW95LiW55WMQUJDYWJjMTIzIUAj","c_date":"2023-04-22","c_timestamp":"2023-04-22T23:20:58"} \ No newline at end of file diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/gbk.txt b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/gbk.txt new file mode 100644 index 00000000000..7e576c02187 --- /dev/null +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/gbk.txt @@ -0,0 +1 @@ +ab101ABC123!@#ãABC123!@#true117159875638739570849134025303650001.231.232924137191386439303744.39292216ABCabc123!@#2023-04-222023-04-22 23:20:58 \ No newline at end of file diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/gbk.xml b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/gbk.xml new file mode 100644 index 00000000000..a40d97d739a --- /dev/null +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/gbk.xml @@ -0,0 +1,36 @@ + + + + + + {"a":"b"} + [101] + ["ABC123!@#"] + ãABC123!@# + true + 117 + 15987 + 56387395 + 7084913402530365000 + 1.23 + 1.23 + 2924137191386439303744.39292216 + + ABCabc123!@# + 2023-04-22 + 2023-04-22 23:20:58 + + diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/gbk_use_attr_format.xml b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/gbk_use_attr_format.xml new file mode 100644 index 00000000000..ebcfdffe28b --- /dev/null +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/gbk_use_attr_format.xml @@ -0,0 +1,19 @@ + + + + + + diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/test_read_json.conf b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/test_read_json.conf new file mode 100644 index 00000000000..68af2774049 --- /dev/null +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/test_read_json.conf @@ -0,0 +1,41 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +{ + file_format_type = "json" + encoding = "gbk" + schema = { + fields { + c_map = "map" + c_array = "array" + c_array_string = "array" + c_string = string + c_boolean = boolean + c_tinyint = tinyint + c_smallint = smallint + c_int = int + c_bigint = bigint + c_float = float + c_double = double + c_decimal = "decimal(30, 8)" + c_null = "null" + c_bytes = bytes + c_date = date + c_timestamp = timestamp + } + } +} \ No newline at end of file diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/test_read_text.conf b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/test_read_text.conf new file mode 100644 index 00000000000..2c113ce1b95 --- /dev/null +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/test_read_text.conf @@ -0,0 +1,41 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +{ + file_format_type = "text" + encoding = "gbk" + schema = { + fields { + c_map = "map" + c_array = "array" + c_array_string = "array" + c_string = string + c_boolean = boolean + c_tinyint = tinyint + c_smallint = smallint + c_int = int + c_bigint = bigint + c_float = float + c_double = double + c_decimal = "decimal(30, 8)" + c_null = "null" + c_bytes = bytes + c_date = date + c_timestamp = timestamp + } + } +} \ No newline at end of file diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/test_read_xml.conf b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/test_read_xml.conf new file mode 100644 index 00000000000..266862b5c81 --- /dev/null +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/test_read_xml.conf @@ -0,0 +1,44 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +{ + file_format_type = "text" + encoding = "gbk" + xml_root_tag = "RECORDS" + xml_row_tag = "RECORD" + xml_use_attr_format = false + schema = { + fields { + c_map = "map" + c_array = "array" + c_array_string = "array" + c_string = string + c_boolean = boolean + c_tinyint = tinyint + c_smallint = smallint + c_int = int + c_bigint = bigint + c_float = float + c_double = double + c_decimal = "decimal(30, 8)" + c_null = "null" + c_bytes = bytes + c_date = date + c_timestamp = timestamp + } + } +} \ No newline at end of file diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/test_read_xml_use_attr_format.conf b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/test_read_xml_use_attr_format.conf new file mode 100644 index 00000000000..46cab19d3dc --- /dev/null +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/test/resources/encoding/test_read_xml_use_attr_format.conf @@ -0,0 +1,44 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +{ + file_format_type = "text" + encoding = "gbk" + xml_root_tag = "RECORDS" + xml_row_tag = "RECORD" + xml_use_attr_format = true + schema = { + fields { + c_map = "map" + c_array = "array" + c_array_string = "array" + c_string = string + c_boolean = boolean + c_tinyint = tinyint + c_smallint = smallint + c_int = int + c_bigint = bigint + c_float = float + c_double = double + c_decimal = "decimal(30, 8)" + c_null = "null" + c_bytes = bytes + c_date = date + c_timestamp = timestamp + } + } +} \ No newline at end of file diff --git a/seatunnel-connectors-v2/connector-file/connector-file-local/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/local/source/LocalFileSourceFactory.java b/seatunnel-connectors-v2/connector-file/connector-file-local/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/local/source/LocalFileSourceFactory.java index 450561a6081..499ebf45f19 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-local/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/local/source/LocalFileSourceFactory.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-local/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/local/source/LocalFileSourceFactory.java @@ -53,6 +53,7 @@ public OptionRule optionRule() { .optional(BaseSourceConfigOptions.TABLE_CONFIGS) .optional(BaseSourceConfigOptions.FILE_PATH) .optional(BaseSourceConfigOptions.FILE_FORMAT_TYPE) + .optional(BaseSourceConfigOptions.ENCODING) .conditional( BaseSourceConfigOptions.FILE_FORMAT_TYPE, FileFormat.TEXT, diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/java/org/apache/seatunnel/e2e/connector/file/local/LocalFileIT.java b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/java/org/apache/seatunnel/e2e/connector/file/local/LocalFileIT.java index 403479d7bdf..d06dc9f890b 100644 --- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/java/org/apache/seatunnel/e2e/connector/file/local/LocalFileIT.java +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/java/org/apache/seatunnel/e2e/connector/file/local/LocalFileIT.java @@ -52,6 +52,11 @@ public class LocalFileIT extends TestSuiteBase { "/seatunnel/read/json/name=tyrantlucifer/hobby=coding/e2e.json", container); + ContainerUtil.copyFileIntoContainers( + "/json/e2e_gbk.json", + "/seatunnel/read/encoding/json/e2e_gbk.json", + container); + Path jsonLzo = convertToLzoFile(ContainerUtil.getResourcesFile("/json/e2e.json")); ContainerUtil.copyFileIntoContainers( jsonLzo, "/seatunnel/read/lzo_json/e2e.json", container); @@ -61,6 +66,11 @@ public class LocalFileIT extends TestSuiteBase { "/seatunnel/read/text/name=tyrantlucifer/hobby=coding/e2e.txt", container); + ContainerUtil.copyFileIntoContainers( + "/text/e2e_gbk.txt", + "/seatunnel/read/encoding/text/e2e_gbk.txt", + container); + ContainerUtil.copyFileIntoContainers( "/text/e2e_delimiter.txt", "/seatunnel/read/text_delimiter/e2e.txt", @@ -119,11 +129,21 @@ public void testLocalFileReadAndWrite(TestContainer container) helper.execute("/text/local_file_text_to_assert.conf"); // test read local text file with projection helper.execute("/text/local_file_text_projection_to_assert.conf"); + // test read local csv file with assigning encoding + helper.execute("/text/fake_to_local_file_with_encoding.conf"); + // test read local csv file with assigning encoding + helper.execute("/text/local_file_text_to_console_with_encoding.conf"); + // test write local json file helper.execute("/json/fake_to_local_file_json.conf"); // test read local json file helper.execute("/json/local_file_json_to_assert.conf"); helper.execute("/json/local_file_json_lzo_to_console.conf"); + // test read local json file with assigning encoding + helper.execute("/json/fake_to_local_file_json_with_encoding.conf"); + // test write local json file with assigning encoding + helper.execute("/json/local_file_json_to_console_with_encoding.conf"); + // test write local orc file helper.execute("/orc/fake_to_local_file_orc.conf"); // test read local orc file diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/json/e2e_gbk.json b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/json/e2e_gbk.json new file mode 100644 index 00000000000..0c8061072a0 --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/json/e2e_gbk.json @@ -0,0 +1,4 @@ +{"c_map":{"a":"b"},"c_array":[101],"c_array_string":["ABC123!@#"],"c_string":"ãABC123!@#","c_boolean":true,"c_tinyint":117,"c_smallint":15987,"c_int":56387395,"c_bigint":7084913402530365000,"c_float":1.23,"c_double":1.23,"c_decimal":2924137191386439303744.39292216,"c_null":null,"c_bytes":"5L2g5aW95LiW55WMQUJDYWJjMTIzIUAj","c_date":"2023-04-22","c_timestamp":"2023-04-22T23:20:58"} +{"c_map":{"a":"c"},"c_array":[102],"c_array_string":["ABC123!@#"],"c_string":"","c_boolean":true,"c_tinyint":117,"c_smallint":15987,"c_int":56387395,"c_bigint":7084913402530365000,"c_float":1.23,"c_double":1.23,"c_decimal":2924137191386439303744.39292216,"c_null":null,"c_bytes":"5L2g5aW95LiW55WMQUJDYWJjMTIzIUAj","c_date":"2023-04-22","c_timestamp":"2023-04-22T23:20:58"} +{"c_map":{"a":"e"},"c_array":[103],"c_array_string":["ABC123!@#"],"c_string":"GBKַB","c_boolean":true,"c_tinyint":117,"c_smallint":15987,"c_int":56387395,"c_bigint":7084913402530365000,"c_float":1.23,"c_double":1.23,"c_decimal":2924137191386439303744.39292216,"c_null":null,"c_bytes":"5L2g5aW95LiW55WMQUJDYWJjMTIzIUAj","c_date":"2023-04-22","c_timestamp":"2023-04-22T23:20:58"} +{"c_map":{"a":"f"},"c_array":[104],"c_array_string":["ABC123!@#"],"c_string":"ַ","c_boolean":true,"c_tinyint":117,"c_smallint":15987,"c_int":56387395,"c_bigint":7084913402530365000,"c_float":1.23,"c_double":1.23,"c_decimal":2924137191386439303744.39292216,"c_null":null,"c_bytes":"5L2g5aW95LiW55WMQUJDYWJjMTIzIUAj","c_date":"2023-04-22","c_timestamp":"2023-04-22T23:20:58"} \ No newline at end of file diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/json/fake_to_local_file_json_with_encoding.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/json/fake_to_local_file_json_with_encoding.conf new file mode 100644 index 00000000000..1a4dcc81ba0 --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/json/fake_to_local_file_json_with_encoding.conf @@ -0,0 +1,86 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +###### +###### This config file is a demonstration of streaming processing in seatunnel config +###### + +env { + parallelism = 1 + job.mode = "BATCH" + + # You can set spark configuration here + spark.app.name = "SeaTunnel" + spark.executor.instances = 2 + spark.executor.cores = 1 + spark.executor.memory = "1g" + spark.driver.extraJavaOptions = "-Dfile.encoding=UTF-8" + spark.master = local +} + +source { + FakeSource { + schema = { + fields { + c_map = "map" + c_array = "array" + c_array_string = "array" + c_string = string + c_boolean = boolean + c_tinyint = tinyint + c_smallint = smallint + c_int = int + c_bigint = bigint + c_float = float + c_double = double + c_decimal = "decimal(30, 8)" + c_null = "null" + c_bytes = bytes + c_date = date + c_timestamp = timestamp + } + } + rows = [ + { + kind = INSERT + fields = [{"aA\"测试\"": "bB\"测试\""}, [101], ["测试ABC123!@#"], "\"你好,世界\"ABC123!@#", true, 117, 15987, 56387395, 7084913402530365000, 1.23, 1.23, "2924137191386439303744.39292216", null, "5L2g5aW95LiW55WMQUJDYWJjMTIzIUAj", "2023-04-22", "2023-04-22T23:20:58"] + } + { + kind = UPDATE_BEFORE + fields = [{"aA\"测试\"": "c"}, [102], ["\"测试\"ABC123!@#"], "\"海底隧道\"", true, 117, 15987, 56387395, 7084913402530365000, 1.23, 1.23, "2924137191386439303744.39292216", null, "5L2g5aW95LiW55WMQUJDYWJjMTIzIUAj", "2023-04-22", "2023-04-22T23:20:58"] + } + { + kind = UPDATE_AFTER + fields = [{"a": "eE\"测试\""}, [103], ["\"测试\"ABC123!@#"], "GBK\"字符﨎\"", true, 117, 15987, 56387395, 7084913402530365000, 1.23, 1.23, "2924137191386439303744.39292216", null, "5L2g5aW95LiW55WMQUJDYWJjMTIzIUAj", "2023-04-22", "2023-04-22T23:20:58"] + } + { + kind = DELETE + fields = [{"a": "f"}, [104], ["\"测试\"ABC123!@#"], "\"测试字符\"", true, 117, 15987, 56387395, 7084913402530365000, 1.23, 1.23, "2924137191386439303744.39292216", null, "5L2g5aW95LiW55WMQUJDYWJjMTIzIUAj", "2023-04-22", "2023-04-22T23:20:58"] + } + ] + } +} + +transform { +} + +sink { + LocalFile { + path = "/tmp/seatunnel/encoding/json" + file_format_type = "json" + encoding = "gbk" + } +} \ No newline at end of file diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/json/local_file_json_to_console_with_encoding.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/json/local_file_json_to_console_with_encoding.conf new file mode 100644 index 00000000000..8a2b221c9b5 --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/json/local_file_json_to_console_with_encoding.conf @@ -0,0 +1,66 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +###### +###### This config file is a demonstration of streaming processing in seatunnel config +###### + +env { + parallelism = 1 + job.mode = "BATCH" + + spark.app.name = "SeaTunnel" + spark.executor.instances = 2 + spark.executor.cores = 1 + spark.executor.memory = "1g" + spark.driver.extraJavaOptions = "-Dfile.encoding=UTF-8" + spark.master = local +} + +source { + LocalFile { + path = "/seatunnel/read/encoding/json" + file_format_type = "json" + encoding = "gbk" + schema = { + fields { + c_map = "map" + c_array = "array" + c_array_string = "array" + c_string = string + c_boolean = boolean + c_tinyint = tinyint + c_smallint = smallint + c_int = int + c_bigint = bigint + c_float = float + c_double = double + c_decimal = "decimal(30, 8)" + c_null = "null" + c_bytes = bytes + c_date = date + c_timestamp = timestamp + } + } + } +} + +transform { +} + +sink { + Console {} +} diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/text/e2e_gbk.txt b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/text/e2e_gbk.txt new file mode 100644 index 00000000000..fa2639325e2 --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/text/e2e_gbk.txt @@ -0,0 +1,4 @@ +aAbB101ABC123!@#ãABC123!@#true117159875638739570849134025303650001.231.232924137191386439303744.39292216ABCabc123!@#2023-04-222023-04-22 23:20:58 +aAc102ABC123!@#true117159875638739570849134025303650001.231.232924137191386439303744.39292216ABCabc123!@#2023-04-222023-04-22 23:20:58 +aeE103ABC123!@#GBKַBtrue117159875638739570849134025303650001.231.232924137191386439303744.39292216ABCabc123!@#2023-04-222023-04-22 23:20:58 +af104ABC123!@#ַtrue117159875638739570849134025303650001.231.232924137191386439303744.39292216ABCabc123!@#2023-04-222023-04-22 23:20:58 \ No newline at end of file diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/text/fake_to_local_file_with_encoding.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/text/fake_to_local_file_with_encoding.conf new file mode 100644 index 00000000000..c53a75e6b1d --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/text/fake_to_local_file_with_encoding.conf @@ -0,0 +1,85 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +###### +###### This config file is a demonstration of streaming processing in seatunnel config +###### + +env { + parallelism = 1 + job.mode = "BATCH" + + spark.app.name = "SeaTunnel" + spark.executor.instances = 2 + spark.executor.cores = 1 + spark.executor.memory = "1g" + spark.driver.extraJavaOptions = "-Dfile.encoding=UTF-8" + spark.master = local +} + +source { + FakeSource { + schema = { + fields { + c_map = "map" + c_array = "array" + c_array_string = "array" + c_string = string + c_boolean = boolean + c_tinyint = tinyint + c_smallint = smallint + c_int = int + c_bigint = bigint + c_float = float + c_double = double + c_decimal = "decimal(30, 8)" + c_null = "null" + c_bytes = bytes + c_date = date + c_timestamp = timestamp + } + } + rows = [ + { + kind = INSERT + fields = [{"aA\"测试\"": "bB\"测试\""}, [101], ["测试ABC123!@#"], "\"你好,世界\"ABC123!@#", true, 117, 15987, 56387395, 7084913402530365000, 1.23, 1.23, "2924137191386439303744.39292216", null, "5L2g5aW95LiW55WMQUJDYWJjMTIzIUAj", "2023-04-22", "2023-04-22T23:20:58"] + } + { + kind = UPDATE_BEFORE + fields = [{"aA\"测试\"": "c"}, [102], ["\"测试\"ABC123!@#"], "\"海底隧道\"", true, 117, 15987, 56387395, 7084913402530365000, 1.23, 1.23, "2924137191386439303744.39292216", null, "5L2g5aW95LiW55WMQUJDYWJjMTIzIUAj", "2023-04-22", "2023-04-22T23:20:58"] + } + { + kind = UPDATE_AFTER + fields = [{"a": "eE\"测试\""}, [103], ["\"测试\"ABC123!@#"], "GBK\"字符﨎\"", true, 117, 15987, 56387395, 7084913402530365000, 1.23, 1.23, "2924137191386439303744.39292216", null, "5L2g5aW95LiW55WMQUJDYWJjMTIzIUAj", "2023-04-22", "2023-04-22T23:20:58"] + } + { + kind = DELETE + fields = [{"a": "f"}, [104], ["\"测试\"ABC123!@#"], "\"测试字符\"", true, 117, 15987, 56387395, 7084913402530365000, 1.23, 1.23, "2924137191386439303744.39292216", null, "5L2g5aW95LiW55WMQUJDYWJjMTIzIUAj", "2023-04-22", "2023-04-22T23:20:58"] + } + ] + } +} + +transform { +} + +sink { + LocalFile { + path ="/tmp/seatunnel/encoding/text" + file_format_type = "text" + encoding = "gbk" + } +} diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/text/local_file_text_to_console_with_encoding.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/text/local_file_text_to_console_with_encoding.conf new file mode 100644 index 00000000000..d402c07f197 --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/text/local_file_text_to_console_with_encoding.conf @@ -0,0 +1,66 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +###### +###### This config file is a demonstration of streaming processing in seatunnel config +###### + +env { + parallelism = 1 + job.mode = "BATCH" + + spark.app.name = "SeaTunnel" + spark.executor.instances = 2 + spark.executor.cores = 1 + spark.executor.memory = "1g" + spark.driver.extraJavaOptions = "-Dfile.encoding=UTF-8" + spark.master = local +} + +source { + LocalFile { + path = "/seatunnel/read/encoding/text" + file_format_type = "text" + encoding = "gbk" + schema = { + fields { + 0 = "map" + 1 = "array" + 2 = "array" + 3 = string + 4 = boolean + 5 = tinyint + 6 = smallint + 7 = int + 8 = bigint + 9 = float + 10 = double + 11 = "decimal(30, 8)" + 12 = "null" + 13 = bytes + 14 = date + 15 = timestamp + } + } + } +} + +transform { +} + +sink { + Console {} +} diff --git a/seatunnel-formats/seatunnel-format-json/src/main/java/org/apache/seatunnel/format/json/JsonSerializationSchema.java b/seatunnel-formats/seatunnel-format-json/src/main/java/org/apache/seatunnel/format/json/JsonSerializationSchema.java index f743dc3dbe8..4e2e98317b4 100644 --- a/seatunnel-formats/seatunnel-format-json/src/main/java/org/apache/seatunnel/format/json/JsonSerializationSchema.java +++ b/seatunnel-formats/seatunnel-format-json/src/main/java/org/apache/seatunnel/format/json/JsonSerializationSchema.java @@ -28,6 +28,9 @@ import lombok.Getter; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; + import static com.google.common.base.Preconditions.checkNotNull; public class JsonSerializationSchema implements SerializationSchema { @@ -42,11 +45,18 @@ public class JsonSerializationSchema implements SerializationSchema { /** Object mapper that is used to create output JSON objects. */ @Getter private final ObjectMapper mapper = new ObjectMapper(); + private final Charset charset; + private final RowToJsonConverters.RowToJsonConverter runtimeConverter; public JsonSerializationSchema(SeaTunnelRowType rowType) { + this(rowType, StandardCharsets.UTF_8); + } + + public JsonSerializationSchema(SeaTunnelRowType rowType, Charset charset) { this.rowType = rowType; this.runtimeConverter = new RowToJsonConverters().createConverter(checkNotNull(rowType)); + this.charset = charset; } @Override @@ -57,7 +67,7 @@ public byte[] serialize(SeaTunnelRow row) { try { runtimeConverter.convert(mapper, node, row); - return mapper.writeValueAsBytes(node); + return mapper.writeValueAsString(node).getBytes(charset); } catch (Throwable t) { throw CommonError.jsonOperationError(FORMAT, row.toString(), t); } diff --git a/seatunnel-formats/seatunnel-format-text/src/main/java/org/apache/seatunnel/format/text/TextDeserializationSchema.java b/seatunnel-formats/seatunnel-format-text/src/main/java/org/apache/seatunnel/format/text/TextDeserializationSchema.java index b6429eb600e..d7515da84a1 100644 --- a/seatunnel-formats/seatunnel-format-text/src/main/java/org/apache/seatunnel/format/text/TextDeserializationSchema.java +++ b/seatunnel-formats/seatunnel-format-text/src/main/java/org/apache/seatunnel/format/text/TextDeserializationSchema.java @@ -27,6 +27,7 @@ import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; import org.apache.seatunnel.common.utils.DateTimeUtils; import org.apache.seatunnel.common.utils.DateUtils; +import org.apache.seatunnel.common.utils.EncodingUtils; import org.apache.seatunnel.common.utils.TimeUtils; import org.apache.seatunnel.format.text.constant.TextFormatConstant; import org.apache.seatunnel.format.text.exception.SeaTunnelTextFormatException; @@ -37,6 +38,7 @@ import java.io.IOException; import java.math.BigDecimal; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.Map; @@ -47,18 +49,21 @@ public class TextDeserializationSchema implements DeserializationSchema splitsMap = splitLineBySeaTunnelRowType(content, seaTunnelRowType, 0); Object[] objects = new Object[seaTunnelRowType.getTotalFields()]; for (int i = 0; i < objects.length; i++) { @@ -217,7 +233,7 @@ private Object convert(String field, SeaTunnelDataType fieldType, int level) case NULL: return null; case BYTES: - return field.getBytes(); + return field.getBytes(StandardCharsets.UTF_8); case DATE: return DateUtils.parse(field, dateFormatter); case TIME: diff --git a/seatunnel-formats/seatunnel-format-text/src/main/java/org/apache/seatunnel/format/text/TextSerializationSchema.java b/seatunnel-formats/seatunnel-format-text/src/main/java/org/apache/seatunnel/format/text/TextSerializationSchema.java index e33095703bd..d71ccd0ccd5 100644 --- a/seatunnel-formats/seatunnel-format-text/src/main/java/org/apache/seatunnel/format/text/TextSerializationSchema.java +++ b/seatunnel-formats/seatunnel-format-text/src/main/java/org/apache/seatunnel/format/text/TextSerializationSchema.java @@ -33,6 +33,8 @@ import lombok.NonNull; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; @@ -46,18 +48,21 @@ public class TextSerializationSchema implements SerializationSchema { private final DateUtils.Formatter dateFormatter; private final DateTimeUtils.Formatter dateTimeFormatter; private final TimeUtils.Formatter timeFormatter; + private final Charset charset; private TextSerializationSchema( @NonNull SeaTunnelRowType seaTunnelRowType, String[] separators, DateUtils.Formatter dateFormatter, DateTimeUtils.Formatter dateTimeFormatter, - TimeUtils.Formatter timeFormatter) { + TimeUtils.Formatter timeFormatter, + Charset charset) { this.seaTunnelRowType = seaTunnelRowType; this.separators = separators; this.dateFormatter = dateFormatter; this.dateTimeFormatter = dateTimeFormatter; this.timeFormatter = timeFormatter; + this.charset = charset; } public static Builder builder() { @@ -71,6 +76,7 @@ public static class Builder { private DateTimeUtils.Formatter dateTimeFormatter = DateTimeUtils.Formatter.YYYY_MM_DD_HH_MM_SS; private TimeUtils.Formatter timeFormatter = TimeUtils.Formatter.HH_MM_SS; + private Charset charset = StandardCharsets.UTF_8; private Builder() {} @@ -104,9 +110,19 @@ public Builder timeFormatter(TimeUtils.Formatter timeFormatter) { return this; } + public Builder charset(Charset charset) { + this.charset = charset; + return this; + } + public TextSerializationSchema build() { return new TextSerializationSchema( - seaTunnelRowType, separators, dateFormatter, dateTimeFormatter, timeFormatter); + seaTunnelRowType, + separators, + dateFormatter, + dateTimeFormatter, + timeFormatter, + charset); } } @@ -121,7 +137,7 @@ public byte[] serialize(SeaTunnelRow element) { for (int i = 0; i < fields.length; i++) { strings[i] = convert(fields[i], seaTunnelRowType.getFieldType(i), 0); } - return String.join(separators[0], strings).getBytes(); + return String.join(separators[0], strings).getBytes(charset); } private String convert(Object field, SeaTunnelDataType fieldType, int level) { @@ -132,13 +148,15 @@ private String convert(Object field, SeaTunnelDataType fieldType, int level) case DOUBLE: case FLOAT: case INT: - case STRING: case BOOLEAN: case TINYINT: case SMALLINT: case BIGINT: case DECIMAL: return field.toString(); + case STRING: + byte[] bytes = field.toString().getBytes(StandardCharsets.UTF_8); + return new String(bytes, StandardCharsets.UTF_8); case DATE: return DateUtils.toString((LocalDate) field, dateFormatter); case TIME: @@ -148,7 +166,7 @@ private String convert(Object field, SeaTunnelDataType fieldType, int level) case NULL: return ""; case BYTES: - return new String((byte[]) field); + return new String((byte[]) field, StandardCharsets.UTF_8); case ARRAY: BasicType elementType = ((ArrayType) fieldType).getElementType(); return Arrays.stream((Object[]) field) From d58086025584f6c4232b9005dcb122185a435911 Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Fri, 15 Mar 2024 10:07:39 +0800 Subject: [PATCH 14/59] [Fix][Zeta] Fix thread classloader be set to null when use cache mode (#6509) --- .../engine/common/loader/ClassLoaderUtil.java | 35 ------------------- .../DefaultClassLoaderService.java | 13 ++++++- .../parse/MultipleTableJobConfigParser.java | 2 -- .../classloader/ClassLoaderServiceTest.java | 31 ++++++++++++++++ .../engine/server/TaskExecutionService.java | 3 -- .../engine/server/master/JobMaster.java | 3 -- 6 files changed, 43 insertions(+), 44 deletions(-) delete mode 100644 seatunnel-engine/seatunnel-engine-common/src/main/java/org/apache/seatunnel/engine/common/loader/ClassLoaderUtil.java diff --git a/seatunnel-engine/seatunnel-engine-common/src/main/java/org/apache/seatunnel/engine/common/loader/ClassLoaderUtil.java b/seatunnel-engine/seatunnel-engine-common/src/main/java/org/apache/seatunnel/engine/common/loader/ClassLoaderUtil.java deleted file mode 100644 index cb6e0820f08..00000000000 --- a/seatunnel-engine/seatunnel-engine-common/src/main/java/org/apache/seatunnel/engine/common/loader/ClassLoaderUtil.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.seatunnel.engine.common.loader; - -import lombok.extern.slf4j.Slf4j; - -@Slf4j -public class ClassLoaderUtil { - - public static void recycleClassLoaderFromThread(ClassLoader classLoader) { - log.info("recycle classloader " + classLoader); - Thread.getAllStackTraces().keySet().stream() - .filter(thread -> thread.getContextClassLoader() == classLoader) - .forEach( - thread -> { - log.info("recycle classloader for thread " + thread.getName()); - thread.setContextClassLoader(null); - }); - } -} diff --git a/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/classloader/DefaultClassLoaderService.java b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/classloader/DefaultClassLoaderService.java index 36c7ae2f029..c5628290069 100644 --- a/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/classloader/DefaultClassLoaderService.java +++ b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/classloader/DefaultClassLoaderService.java @@ -88,9 +88,10 @@ public synchronized void releaseClassLoader(long jobId, Collection jars) { return; } if (referenceCount == 0) { - classLoaderMap.remove(key); + ClassLoader classLoader = classLoaderMap.remove(key); log.info("Release classloader for job {} with jars {}", jobId, jars); classLoaderReferenceCount.get(jobId).remove(key); + recycleClassLoaderFromThread(classLoader); } if (classLoaderMap.isEmpty()) { classLoaderCache.remove(jobId); @@ -98,6 +99,16 @@ public synchronized void releaseClassLoader(long jobId, Collection jars) { } } + private static void recycleClassLoaderFromThread(ClassLoader classLoader) { + Thread.getAllStackTraces().keySet().stream() + .filter(thread -> thread.getContextClassLoader() == classLoader) + .forEach( + thread -> { + log.info("recycle classloader for thread " + thread.getName()); + thread.setContextClassLoader(null); + }); + } + private String covertJarsToKey(Collection jars) { return jars.stream().map(URL::toString).sorted().reduce((a, b) -> a + b).orElse(""); } diff --git a/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java index 395f8b4a1ac..50e35d9117e 100644 --- a/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java +++ b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java @@ -46,7 +46,6 @@ import org.apache.seatunnel.core.starter.utils.ConfigBuilder; import org.apache.seatunnel.engine.common.config.JobConfig; import org.apache.seatunnel.engine.common.exception.JobDefineCheckException; -import org.apache.seatunnel.engine.common.loader.ClassLoaderUtil; import org.apache.seatunnel.engine.common.loader.SeaTunnelChildFirstClassLoader; import org.apache.seatunnel.engine.common.utils.IdGenerator; import org.apache.seatunnel.engine.core.classloader.ClassLoaderService; @@ -209,7 +208,6 @@ public ImmutablePair, Set> parse(ClassLoaderService classLoade classLoaderService.releaseClassLoader( Long.parseLong(jobConfig.getJobContext().getJobId()), connectorJars); } - ClassLoaderUtil.recycleClassLoaderFromThread(classLoader); } } diff --git a/seatunnel-engine/seatunnel-engine-core/src/test/java/org/apache/seatunnel/engine/core/classloader/ClassLoaderServiceTest.java b/seatunnel-engine/seatunnel-engine-core/src/test/java/org/apache/seatunnel/engine/core/classloader/ClassLoaderServiceTest.java index 0e2fe90af2b..00a2445e585 100644 --- a/seatunnel-engine/seatunnel-engine-core/src/test/java/org/apache/seatunnel/engine/core/classloader/ClassLoaderServiceTest.java +++ b/seatunnel-engine/seatunnel-engine-core/src/test/java/org/apache/seatunnel/engine/core/classloader/ClassLoaderServiceTest.java @@ -69,4 +69,35 @@ void testSameJarInDifferentJob() throws MalformedURLException { Lists.newArrayList(new URL("file:///console.jar"), new URL("file:///fake.jar"))); Assertions.assertEquals(0, classLoaderService.queryClassLoaderCount()); } + + @Test + void testRecycleClassLoaderFromThread() throws MalformedURLException, InterruptedException { + ClassLoader classLoader = + classLoaderService.getClassLoader( + 3L, + Lists.newArrayList( + new URL("file:///console.jar"), new URL("file:///fake.jar"))); + ClassLoader appClassLoader = Thread.currentThread().getContextClassLoader(); + Thread.currentThread().setContextClassLoader(classLoader); + Thread thread = + new Thread( + () -> { + while (Thread.currentThread().getContextClassLoader() != null) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + }); + thread.start(); + Thread.currentThread().setContextClassLoader(appClassLoader); + Assertions.assertEquals(classLoader, thread.getContextClassLoader()); + classLoaderService.releaseClassLoader( + 3L, + Lists.newArrayList(new URL("file:///console.jar"), new URL("file:///fake.jar"))); + Assertions.assertNull(thread.getContextClassLoader()); + Thread.sleep(2000); + Assertions.assertFalse(thread.isAlive()); + } } diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/TaskExecutionService.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/TaskExecutionService.java index e4ff187bb22..197833903d5 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/TaskExecutionService.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/TaskExecutionService.java @@ -25,7 +25,6 @@ import org.apache.seatunnel.engine.common.config.SeaTunnelConfig; import org.apache.seatunnel.engine.common.config.server.ThreadShareMode; import org.apache.seatunnel.engine.common.exception.JobNotFoundException; -import org.apache.seatunnel.engine.common.loader.ClassLoaderUtil; import org.apache.seatunnel.engine.common.utils.PassiveCompletableFuture; import org.apache.seatunnel.engine.core.classloader.ClassLoaderService; import org.apache.seatunnel.engine.core.job.ConnectorJarIdentifier; @@ -919,9 +918,7 @@ void taskDone(Task task) { private void recycleClassLoader(TaskGroupLocation taskGroupLocation) { TaskGroupContext context = executionContexts.get(taskGroupLocation); - ClassLoader classLoader = context.getClassLoader(); executionContexts.get(taskGroupLocation).setClassLoader(null); - ClassLoaderUtil.recycleClassLoaderFromThread(classLoader); classLoaderService.releaseClassLoader(taskGroupLocation.getJobId(), context.getJars()); } diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/master/JobMaster.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/master/JobMaster.java index fd3f9ad0bc1..6b90fce8e29 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/master/JobMaster.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/master/JobMaster.java @@ -30,7 +30,6 @@ import org.apache.seatunnel.engine.common.config.server.CheckpointConfig; import org.apache.seatunnel.engine.common.config.server.CheckpointStorageConfig; import org.apache.seatunnel.engine.common.exception.SeaTunnelEngineException; -import org.apache.seatunnel.engine.common.loader.ClassLoaderUtil; import org.apache.seatunnel.engine.common.utils.ExceptionUtil; import org.apache.seatunnel.engine.common.utils.PassiveCompletableFuture; import org.apache.seatunnel.engine.core.dag.logical.LogicalDag; @@ -217,8 +216,6 @@ public void init(long initializationTimestamp, boolean restart) throws Exception jobImmutableInformation.getJobId(), jobImmutableInformation.getPluginJarsUrls()); - ClassLoaderUtil.recycleClassLoaderFromThread(classLoader); - final Tuple2> planTuple = PlanUtils.fromLogicalDAG( logicalDag, From 77ffa56f6da0f834928a4c9921035ec324e1c3dc Mon Sep 17 00:00:00 2001 From: Jarvis Date: Fri, 15 Mar 2024 10:38:14 +0800 Subject: [PATCH 15/59] [Improve][Transform] Sql transform support inner strucy query (#6484) --- docs/en/transform-v2/sql.md | 60 +++++++++ docs/zh/transform-v2/sql.md | 58 +++++++++ .../api/table/type/SeaTunnelRowType.java | 10 +- .../seatunnel/e2e/transform/TestSQLIT.java | 13 ++ .../resources/sql_transform/inner_query.conf | 114 ++++++++++++++++++ .../transform/sql/zeta/ZetaSQLFunction.java | 32 ++++- .../transform/sql/zeta/ZetaSQLType.java | 35 +++++- 7 files changed, 317 insertions(+), 5 deletions(-) create mode 100644 seatunnel-e2e/seatunnel-transforms-v2-e2e/seatunnel-transforms-v2-e2e-part-2/src/test/resources/sql_transform/inner_query.conf diff --git a/docs/en/transform-v2/sql.md b/docs/en/transform-v2/sql.md index b6c2306c656..a3bdb9bbfc1 100644 --- a/docs/en/transform-v2/sql.md +++ b/docs/en/transform-v2/sql.md @@ -24,6 +24,9 @@ The source table name, the query SQL table name must match this field. The query SQL, it's a simple SQL supported base function and criteria filter operation. But the complex SQL unsupported yet, include: multi source table/rows JOIN and AGGREGATE operation and the like. +the query expression can be `select [table_name.]column_a` to query the column that named `column_a`. and the table name is optional. +or `select c_row.c_inner_row.column_b` to query the inline struct column that named `column_b` within `c_row` column and `c_inner_row` column. **In this query expression, can't have table name.** + ## Example The data read from source is a table like this: @@ -56,6 +59,61 @@ Then the data in result table `fake1` will update to | 3 | Kin Dom_ | 25 | | 4 | Joy Dom_ | 23 | +### Struct query + +if your upstream data schema is like this: + +```hacon +source { + FakeSource { + result_table_name = "fake" + row.num = 100 + string.template = ["innerQuery"] + schema = { + fields { + name = "string" + c_date = "date" + c_row = { + c_inner_row = { + c_inner_int = "int" + c_inner_string = "string" + c_inner_timestamp = "timestamp" + c_map_1 = "map" + c_map_2 = "map>" + } + c_string = "string" + } + } + } + } +} +``` + +Those query all are valid: + +```sql +select +name, +c_date, +c_row, +c_row.c_inner_row, +c_row.c_string, +c_row.c_inner_row.c_inner_int, +c_row.c_inner_row.c_inner_string, +c_row.c_inner_row.c_inner_timestamp, +c_row.c_inner_row.c_map_1, +c_row.c_inner_row.c_map_1.some_key +``` + +But this query are not valid: + +```sql +select +c_row.c_inner_row.c_map_2.some_key.inner_map_key +``` + +The map must be the latest struct, can't query the nesting map. + ## Job Config Example ``` @@ -94,6 +152,8 @@ sink { ## Changelog +- Support struct query + ### new version - Add SQL Transform Connector diff --git a/docs/zh/transform-v2/sql.md b/docs/zh/transform-v2/sql.md index ccbbc7f14cb..1b56f1fef3f 100644 --- a/docs/zh/transform-v2/sql.md +++ b/docs/zh/transform-v2/sql.md @@ -24,6 +24,9 @@ SQL 转换使用内存中的 SQL 引擎,我们可以通过 SQL 函数和 SQL 查询 SQL,它是一个简单的 SQL,支持基本的函数和条件过滤操作。但是,复杂的 SQL 尚不支持,包括:多源表/行连接和聚合操作等。 +查询表达式可以是`select [table_name.]column_a`,这时会去查询列为`column_a`的列,`table_name`为可选项 +也可以是`select c_row.c_inner_row.column_b`,这时会去查询列`c_row`下的`c_inner_row`的`column_b`。**嵌套结构查询中,不能存在`table_name`** + ## 示例 源端数据读取的表格如下: @@ -56,6 +59,61 @@ transform { | 3 | Kin Dom_ | 25 | | 4 | Joy Dom_ | 23 | +### 嵌套结构查询 + +例如你的上游数据结构是这样: + +```hacon +source { + FakeSource { + result_table_name = "fake" + row.num = 100 + string.template = ["innerQuery"] + schema = { + fields { + name = "string" + c_date = "date" + c_row = { + c_inner_row = { + c_inner_int = "int" + c_inner_string = "string" + c_inner_timestamp = "timestamp" + c_map_1 = "map" + c_map_2 = "map>" + } + c_string = "string" + } + } + } + } +} +``` + +那么下列所有的查询表达式都是有效的 + +```sql +select +name, +c_date, +c_row, +c_row.c_inner_row, +c_row.c_string, +c_row.c_inner_row.c_inner_int, +c_row.c_inner_row.c_inner_string, +c_row.c_inner_row.c_inner_timestamp, +c_row.c_inner_row.c_map_1, +c_row.c_inner_row.c_map_1.some_key +``` + +但是这个查询语句是无效的 + +```sql +select +c_row.c_inner_row.c_map_2.some_key.inner_map_key +``` + +当查询map结构时,map结构应该为最后一个数据结构,不能查询嵌套map + ## 作业配置示例 ``` diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/SeaTunnelRowType.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/SeaTunnelRowType.java index 4eedb2255ad..575def632dd 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/SeaTunnelRowType.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/SeaTunnelRowType.java @@ -74,12 +74,20 @@ public SeaTunnelDataType getFieldType(int index) { } public int indexOf(String fieldName) { + return indexOf(fieldName, true); + } + + public int indexOf(String fieldName, boolean throwExceptionWhenNotFound) { for (int i = 0; i < fieldNames.length; i++) { if (fieldNames[i].equals(fieldName)) { return i; } } - throw new IllegalArgumentException(String.format("can't find field [%s]", fieldName)); + if (throwExceptionWhenNotFound) { + throw new IllegalArgumentException(String.format("can't find field [%s]", fieldName)); + } else { + return -1; + } } @Override diff --git a/seatunnel-e2e/seatunnel-transforms-v2-e2e/seatunnel-transforms-v2-e2e-part-2/src/test/java/org/apache/seatunnel/e2e/transform/TestSQLIT.java b/seatunnel-e2e/seatunnel-transforms-v2-e2e/seatunnel-transforms-v2-e2e-part-2/src/test/java/org/apache/seatunnel/e2e/transform/TestSQLIT.java index 54e2f0ae134..df404a28525 100644 --- a/seatunnel-e2e/seatunnel-transforms-v2-e2e/seatunnel-transforms-v2-e2e-part-2/src/test/java/org/apache/seatunnel/e2e/transform/TestSQLIT.java +++ b/seatunnel-e2e/seatunnel-transforms-v2-e2e/seatunnel-transforms-v2-e2e-part-2/src/test/java/org/apache/seatunnel/e2e/transform/TestSQLIT.java @@ -17,7 +17,9 @@ package org.apache.seatunnel.e2e.transform; +import org.apache.seatunnel.e2e.common.container.EngineType; import org.apache.seatunnel.e2e.common.container.TestContainer; +import org.apache.seatunnel.e2e.common.junit.DisabledOnContainer; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.TestTemplate; @@ -58,4 +60,15 @@ public void testSQLTransform(TestContainer container) throws IOException, Interr Container.ExecResult caseWhenSql = container.executeJob("/sql_transform/case_when.conf"); Assertions.assertEquals(0, caseWhenSql.getExitCode()); } + + @TestTemplate + @DisabledOnContainer( + value = {}, + type = {EngineType.SPARK}, + disabledReason = "Spark translation has some issue on map convert") + public void testInnerQuery(TestContainer container) throws IOException, InterruptedException { + Container.ExecResult innerQuerySql = + container.executeJob("/sql_transform/inner_query.conf"); + Assertions.assertEquals(0, innerQuerySql.getExitCode()); + } } diff --git a/seatunnel-e2e/seatunnel-transforms-v2-e2e/seatunnel-transforms-v2-e2e-part-2/src/test/resources/sql_transform/inner_query.conf b/seatunnel-e2e/seatunnel-transforms-v2-e2e/seatunnel-transforms-v2-e2e-part-2/src/test/resources/sql_transform/inner_query.conf new file mode 100644 index 00000000000..65a6324f94c --- /dev/null +++ b/seatunnel-e2e/seatunnel-transforms-v2-e2e/seatunnel-transforms-v2-e2e-part-2/src/test/resources/sql_transform/inner_query.conf @@ -0,0 +1,114 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +###### +###### This config file is a demonstration of streaming processing in seatunnel config +###### + +env { + job.mode = "BATCH" +} + +source { + FakeSource { + result_table_name = "fake" + row.num = 100 + string.template = ["innerQuery"] + schema = { + fields { + name = "string" + c_date = "date" + c_row = { + c_inner_row = { + c_inner_int = "int" + c_inner_string = "string" + c_inner_timestamp = "timestamp" + c_map = "map" + } + c_string = "string" + } + } + } + } +} + +transform { + Sql { + source_table_name = "fake" + result_table_name = "tmp1" + query = """select c_date, + c_row.c_string c_string, + c_row.c_inner_row.c_inner_string c_inner_string, + c_row.c_inner_row.c_inner_timestamp c_inner_timestamp, + c_row.c_inner_row.c_map.innerQuery map_val, + c_row.c_inner_row.c_map.notExistKey map_not_exist_val + from fake""" + } +} + +sink { + Console { + source_table_name = "tmp1" + } + Assert { + source_table_name = "tmp1" + rules = { + field_rules = [{ + field_name = "c_date" + field_type = "date" + field_value = [ + {rule_type = NOT_NULL} + ] + }, + { + field_name = "c_string" + field_type = "string" + field_value = [ + {equals_to = "innerQuery"} + ] + }, + { + field_name = "c_inner_string" + field_type = "string" + field_value = [ + {equals_to = "innerQuery"} + ] + }, + { + field_name = "c_inner_timestamp" + field_type = "timestamp" + field_value = [ + {rule_type = NOT_NULL} + ] + }, + { + field_name = "map_val" + field_type = "string" + field_value = [ + {rule_type = NOT_NULL} + ] + }, + { + field_name = "map_not_exist_val" + field_type = "null" + field_value = [ + {rule_type = NULL} + ] + } + ] + } + } +} \ No newline at end of file diff --git a/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/zeta/ZetaSQLFunction.java b/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/zeta/ZetaSQLFunction.java index 23cf4844ed6..30794af42f4 100644 --- a/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/zeta/ZetaSQLFunction.java +++ b/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/zeta/ZetaSQLFunction.java @@ -18,7 +18,9 @@ package org.apache.seatunnel.transform.sql.zeta; import org.apache.seatunnel.api.table.type.DecimalType; +import org.apache.seatunnel.api.table.type.MapType; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.api.table.type.SqlType; import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; @@ -56,6 +58,7 @@ import java.math.RoundingMode; import java.util.ArrayList; import java.util.List; +import java.util.Map; public class ZetaSQLFunction { // ============================internal functions===================== @@ -199,8 +202,33 @@ public Object computeForValue(Expression expression, Object[] inputFields) { return ((StringValue) expression).getValue(); } if (expression instanceof Column) { - int idx = inputRowType.indexOf(((Column) expression).getColumnName()); - return inputFields[idx]; + Column columnExp = (Column) expression; + String columnName = columnExp.getColumnName(); + int index = inputRowType.indexOf(columnName, false); + if (index != -1) { + return inputFields[index]; + } else { + String fullyQualifiedName = columnExp.getFullyQualifiedName(); + String[] columnNames = fullyQualifiedName.split("\\."); + int deep = columnNames.length; + SeaTunnelDataType parDataType = inputRowType; + SeaTunnelRow parRowValues = new SeaTunnelRow(inputFields); + Object res = parRowValues; + for (int i = 0; i < deep; i++) { + if (parDataType instanceof MapType) { + return ((Map) res).get(columnNames[i]); + } + parRowValues = (SeaTunnelRow) res; + int idx = ((SeaTunnelRowType) parDataType).indexOf(columnNames[i], false); + if (idx == -1) { + throw new IllegalArgumentException( + String.format("can't find field [%s]", fullyQualifiedName)); + } + parDataType = ((SeaTunnelRowType) parDataType).getFieldType(idx); + res = parRowValues.getFields()[idx]; + } + return res; + } } if (expression instanceof Function) { Function function = (Function) expression; diff --git a/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/zeta/ZetaSQLType.java b/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/zeta/ZetaSQLType.java index 968566e45a7..635ce3274f2 100644 --- a/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/zeta/ZetaSQLType.java +++ b/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/zeta/ZetaSQLType.java @@ -20,6 +20,7 @@ import org.apache.seatunnel.api.table.type.BasicType; import org.apache.seatunnel.api.table.type.DecimalType; import org.apache.seatunnel.api.table.type.LocalTimeType; +import org.apache.seatunnel.api.table.type.MapType; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.api.table.type.SqlType; @@ -101,8 +102,38 @@ public SeaTunnelDataType getExpressionType(Expression expression) { return BasicType.STRING_TYPE; } if (expression instanceof Column) { - String columnName = ((Column) expression).getColumnName(); - return inputRowType.getFieldType(inputRowType.indexOf(columnName)); + Column columnExp = (Column) expression; + String columnName = columnExp.getColumnName(); + int index = inputRowType.indexOf(columnName, false); + if (index != -1) { + return inputRowType.getFieldType(index); + } else { + // fullback logical to handel struct query. + String fullyQualifiedName = columnExp.getFullyQualifiedName(); + String[] columnNames = fullyQualifiedName.split("\\."); + int deep = columnNames.length; + SeaTunnelRowType parRowType = inputRowType; + SeaTunnelDataType filedTypeRes = null; + for (int i = 0; i < deep; i++) { + int idx = parRowType.indexOf(columnNames[i], false); + if (idx == -1) { + throw new IllegalArgumentException( + String.format("can't find field [%s]", fullyQualifiedName)); + } + filedTypeRes = parRowType.getFieldType(idx); + if (filedTypeRes instanceof SeaTunnelRowType) { + parRowType = (SeaTunnelRowType) filedTypeRes; + } else if (filedTypeRes instanceof MapType) { + // for map type. only support it's the latest struct. + if (i != deep - 2) { + throw new IllegalArgumentException( + "For now, we only support map struct is the latest struct in inner query function! Please modify your query!"); + } + return ((MapType) filedTypeRes).getValueType(); + } + } + return filedTypeRes; + } } if (expression instanceof Function) { return getFunctionType((Function) expression); From d6dcb03bf3f1a75dfd69b8bb8c08e9a63a7c1882 Mon Sep 17 00:00:00 2001 From: hailin0 Date: Fri, 15 Mar 2024 10:41:59 +0800 Subject: [PATCH 16/59] [Improve][Jdbc] Support custom case-sensitive config for dameng (#6510) --- .../jdbc/internal/dialect/dm/DmdbDialect.java | 5 +- .../dialect/dm/DmdbDialectFactory.java | 8 +++- .../internal/dialect/dm/DmdbDialectTest.java | 47 +++++++++++++++++++ 3 files changed, 55 insertions(+), 5 deletions(-) create mode 100644 seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/dm/DmdbDialectTest.java diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/dm/DmdbDialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/dm/DmdbDialect.java index edd51060a7d..460de0272fb 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/dm/DmdbDialect.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/dm/DmdbDialect.java @@ -22,7 +22,6 @@ import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.DatabaseIdentifier; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialect; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialectTypeMapper; -import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.dialectenum.FieldIdeEnum; import java.util.Arrays; import java.util.List; @@ -31,14 +30,12 @@ public class DmdbDialect implements JdbcDialect { - public String fieldIde = FieldIdeEnum.ORIGINAL.getValue(); + public String fieldIde; public DmdbDialect(String fieldIde) { this.fieldIde = fieldIde; } - public DmdbDialect() {} - @Override public String dialectName() { return DatabaseIdentifier.DAMENG; diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/dm/DmdbDialectFactory.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/dm/DmdbDialectFactory.java index c31d0d2b19e..3d29f2f3e55 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/dm/DmdbDialectFactory.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/dm/DmdbDialectFactory.java @@ -19,6 +19,7 @@ import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialect; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialectFactory; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.dialectenum.FieldIdeEnum; import com.google.auto.service.AutoService; @@ -33,6 +34,11 @@ public boolean acceptsURL(String url) { @Override public JdbcDialect create() { - return new DmdbDialect(); + return create(null, FieldIdeEnum.ORIGINAL.getValue()); + } + + @Override + public JdbcDialect create(String compatibleMode, String fieldIde) { + return new DmdbDialect(fieldIde); } } diff --git a/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/dm/DmdbDialectTest.java b/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/dm/DmdbDialectTest.java new file mode 100644 index 00000000000..bc7a81f14d4 --- /dev/null +++ b/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/dm/DmdbDialectTest.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.dm; + +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialect; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.dialectenum.FieldIdeEnum; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +public class DmdbDialectTest { + @Test + public void testIdentifierCaseSensitive() { + DmdbDialectFactory factory = new DmdbDialectFactory(); + + JdbcDialect dialect = factory.create(); + Assertions.assertEquals("\"test\"", dialect.quoteIdentifier("test")); + Assertions.assertEquals("\"TEST\"", dialect.quoteIdentifier("TEST")); + + dialect = factory.create(null, FieldIdeEnum.ORIGINAL.getValue()); + Assertions.assertEquals("\"test\"", dialect.quoteIdentifier("test")); + Assertions.assertEquals("\"TEST\"", dialect.quoteIdentifier("TEST")); + + dialect = factory.create(null, FieldIdeEnum.LOWERCASE.getValue()); + Assertions.assertEquals("\"test\"", dialect.quoteIdentifier("test")); + Assertions.assertEquals("\"test\"", dialect.quoteIdentifier("TEST")); + + dialect = factory.create(null, FieldIdeEnum.UPPERCASE.getValue()); + Assertions.assertEquals("\"TEST\"", dialect.quoteIdentifier("test")); + Assertions.assertEquals("\"TEST\"", dialect.quoteIdentifier("TEST")); + } +} From c0487a9dd2a8e7b2628f58a333ec28cb72652dc6 Mon Sep 17 00:00:00 2001 From: Jarvis Date: Fri, 15 Mar 2024 19:14:19 +0800 Subject: [PATCH 17/59] [Docs] update sql udf document (#6518) --- docs/en/transform-v2/sql-udf.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/en/transform-v2/sql-udf.md b/docs/en/transform-v2/sql-udf.md index ede3ef9ab4a..78810c11b53 100644 --- a/docs/en/transform-v2/sql-udf.md +++ b/docs/en/transform-v2/sql-udf.md @@ -91,7 +91,8 @@ public class ExampleUDF implements ZetaUDF { } ``` -Package the UDF project and copy the jar to the path: ${SEATUNNEL_HOME}/lib +Package the UDF project and copy the jar to the path: ${SEATUNNEL_HOME}/lib. And if your UDF use third party library, you also need put it to ${SEATUNNEL_HOME}/lib. +If you use cluster mode, you need put the lib to all your node's ${SEATUNNEL_HOME}/lib folder and re-start the cluster. ## Example From cb3db759728eb34e85bdeb6765f821f5699391a5 Mon Sep 17 00:00:00 2001 From: Jarvis Date: Sun, 17 Mar 2024 23:06:42 +0800 Subject: [PATCH 18/59] [Fix][Zeta] improve the local mode hazelcast connection (#6521) --- .../starter/seatunnel/command/ClientExecuteCommand.java | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/seatunnel-core/seatunnel-starter/src/main/java/org/apache/seatunnel/core/starter/seatunnel/command/ClientExecuteCommand.java b/seatunnel-core/seatunnel-starter/src/main/java/org/apache/seatunnel/core/starter/seatunnel/command/ClientExecuteCommand.java index ad41ae983c9..980832f4dea 100644 --- a/seatunnel-core/seatunnel-starter/src/main/java/org/apache/seatunnel/core/starter/seatunnel/command/ClientExecuteCommand.java +++ b/seatunnel-core/seatunnel-starter/src/main/java/org/apache/seatunnel/core/starter/seatunnel/command/ClientExecuteCommand.java @@ -48,6 +48,7 @@ import java.nio.file.Path; import java.time.Duration; import java.time.LocalDateTime; +import java.util.Collections; import java.util.Random; import java.util.concurrent.CompletableFuture; import java.util.concurrent.Executors; @@ -79,6 +80,7 @@ public void execute() throws CommandExecuteException { SeaTunnelConfig seaTunnelConfig = ConfigProvider.locateAndGetSeaTunnelConfig(); try { String clusterName = clientCommandArgs.getClusterName(); + ClientConfig clientConfig = ConfigProvider.locateAndGetClientConfig(); if (clientCommandArgs.getMasterType().equals(MasterType.LOCAL)) { clusterName = creatRandomClusterName( @@ -86,12 +88,13 @@ public void execute() throws CommandExecuteException { ? clusterName : Constant.DEFAULT_SEATUNNEL_CLUSTER_NAME); instance = createServerInLocal(clusterName, seaTunnelConfig); + int port = instance.getCluster().getLocalMember().getSocketAddress().getPort(); + clientConfig + .getNetworkConfig() + .setAddresses(Collections.singletonList("localhost:" + port)); } if (StringUtils.isNotEmpty(clusterName)) { seaTunnelConfig.getHazelcastConfig().setClusterName(clusterName); - } - ClientConfig clientConfig = ConfigProvider.locateAndGetClientConfig(); - if (StringUtils.isNotEmpty(clusterName)) { clientConfig.setClusterName(clusterName); } engineClient = new SeaTunnelClient(clientConfig); From b0abbd2d89d07409c6c90f1b278e8cca65d1b9ed Mon Sep 17 00:00:00 2001 From: dailai <837833280@qq.com> Date: Tue, 19 Mar 2024 10:48:32 +0800 Subject: [PATCH 19/59] [Feature][Connector-V2] Support multi-table sink feature for paimon #5652 (#6449) --- docs/en/connector-v2/sink/Paimon.md | 82 ++++-- docs/zh/connector-v2/sink/Paimon.md | 87 ++++++ .../connector-paimon/pom.xml | 5 + .../paimon/catalog/PaimonCatalog.java | 215 +++++++++++++++ .../paimon/catalog/PaimonCatalogFactory.java | 54 ++++ .../paimon/catalog/PaimonCatalogLoader.java | 61 ++++ .../seatunnel/paimon/catalog/PaimonTable.java | 28 ++ .../seatunnel/paimon/config/PaimonConfig.java | 9 +- .../paimon/config/PaimonSinkConfig.java | 66 +++++ .../paimon/data/PaimonTypeMapper.java | 50 ++++ .../paimon/handler/PaimonSaveModeHandler.java | 58 ++++ .../seatunnel/paimon/sink/PaimonSink.java | 146 +++++----- .../paimon/sink/PaimonSinkFactory.java | 68 ++++- .../paimon/sink/PaimonSinkWriter.java | 57 +++- .../paimon/sink/SupportLoadTable.java | 22 ++ .../commit/PaimonAggregatedCommitter.java | 34 ++- .../paimon/utils/JobContextUtil.java | 32 +++ .../seatunnel/paimon/utils/RowConverter.java | 12 +- .../paimon/utils/RowKindConverter.java | 51 ++++ .../paimon/utils/RowTypeConverter.java | 85 +++++- .../seatunnel/paimon/utils/SchemaUtil.java | 54 ++++ .../paimon/utils/RowTypeConverterTest.java | 2 +- .../connector-paimon-e2e/pom.xml | 9 + .../e2e/connector/paimon/PaimonSinkCDCIT.java | 260 ++++++++++++++++++ .../resources/fake_cdc_sink_paimon_case1.conf | 86 ++++++ .../resources/fake_cdc_sink_paimon_case2.conf | 142 ++++++++++ .../seatunnel-hadoop3-3.1.4-uber/pom.xml | 5 + 27 files changed, 1652 insertions(+), 128 deletions(-) create mode 100644 docs/zh/connector-v2/sink/Paimon.md create mode 100644 seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalog.java create mode 100644 seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalogFactory.java create mode 100644 seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalogLoader.java create mode 100644 seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonTable.java create mode 100644 seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/config/PaimonSinkConfig.java create mode 100644 seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/data/PaimonTypeMapper.java create mode 100644 seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/handler/PaimonSaveModeHandler.java create mode 100644 seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/SupportLoadTable.java create mode 100644 seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/JobContextUtil.java create mode 100644 seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowKindConverter.java create mode 100644 seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/SchemaUtil.java create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/java/org/apache/seatunnel/e2e/connector/paimon/PaimonSinkCDCIT.java create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case1.conf create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case2.conf diff --git a/docs/en/connector-v2/sink/Paimon.md b/docs/en/connector-v2/sink/Paimon.md index 6fa721a1e63..5e9d3c431f7 100644 --- a/docs/en/connector-v2/sink/Paimon.md +++ b/docs/en/connector-v2/sink/Paimon.md @@ -4,7 +4,7 @@ ## Description -Write data to Apache Paimon. +Sink connector for Apache Paimon. It can support cdc mode 、auto create table. ## Key features @@ -12,40 +12,76 @@ Write data to Apache Paimon. ## Options -| name | type | required | default value | -|----------------|--------|----------|---------------| -| warehouse | String | Yes | - | -| database | String | Yes | - | -| table | String | Yes | - | -| hdfs_site_path | String | No | - | +| name | type | required | default value | Description | +|------------------|--------|----------|------------------------------|---------------------------------| +| warehouse | String | Yes | - | Paimon warehouse path | +| database | String | Yes | - | The database you want to access | +| table | String | Yes | - | The table you want to access | +| hdfs_site_path | String | No | - | | +| schema_save_mode | Enum | no | CREATE_SCHEMA_WHEN_NOT_EXIST | The schema save mode | +| data_save_mode | Enum | no | APPEND_DATA | The data save mode | -### warehouse [string] - -Paimon warehouse path - -### database [string] +## Examples -The database you want to access +### Single table -### table [String] +```hocon +env { + parallelism = 1 + job.mode = "STREAMING" + checkpoint.interval = 5000 +} -The table you want to access +source { + Mysql-CDC { + base-url = "jdbc:mysql://127.0.0.1:3306/seatunnel" + username = "root" + password = "******" + table-names = ["seatunnel.role"] + } +} -## Examples +transform { +} -```hocon sink { Paimon { - warehouse = "/tmp/paimon" - database = "default" - table = "st_test" + catalog_name="seatunnel_test" + warehouse="file:///tmp/seatunnel/paimon/hadoop-sink/" + database="seatunnel" + table="role" } } ``` -## Changelog +### Multiple table + +```hocon +env { + parallelism = 1 + job.mode = "STREAMING" + checkpoint.interval = 5000 +} + +source { + Mysql-CDC { + base-url = "jdbc:mysql://127.0.0.1:3306/seatunnel" + username = "root" + password = "******" + table-names = ["seatunnel.role","seatunnel.user","galileo.Bucket"] + } +} -### next version +transform { +} -- Add Paimon Sink Connector +sink { + Paimon { + catalog_name="seatunnel_test" + warehouse="file:///tmp/seatunnel/paimon/hadoop-sink/" + database="${database_name}" + table="${table_name}" + } +} +``` diff --git a/docs/zh/connector-v2/sink/Paimon.md b/docs/zh/connector-v2/sink/Paimon.md new file mode 100644 index 00000000000..b1b4baef9b1 --- /dev/null +++ b/docs/zh/connector-v2/sink/Paimon.md @@ -0,0 +1,87 @@ +# Paimon + +> Paimon 数据连接器 + +## 描述 + +Apache Paimon数据连接器。支持cdc写以及自动建表。 + +## 主要特性 + +- [x] [exactly-once](../../concept/connector-v2-features.md) + +## 连接器选项 + +| 名称 | 类型 | 是否必须 | 默认值 | 描述 | +|------------------|--------|------|------------------------------|--------------------| +| warehouse | String | Yes | - | Paimon warehouse路径 | +| database | String | Yes | - | 数据库名称 | +| table | String | Yes | - | 表名 | +| hdfs_site_path | String | No | - | | +| schema_save_mode | Enum | no | CREATE_SCHEMA_WHEN_NOT_EXIST | schema保存模式 | +| data_save_mode | Enum | no | APPEND_DATA | 数据保存模式 | + +## 示例 + +### 单表 + +```hocon +env { + parallelism = 1 + job.mode = "STREAMING" + checkpoint.interval = 5000 +} + +source { + Mysql-CDC { + base-url = "jdbc:mysql://127.0.0.1:3306/seatunnel" + username = "root" + password = "******" + table-names = ["seatunnel.role"] + } +} + +transform { +} + +sink { + Paimon { + catalog_name="seatunnel_test" + warehouse="file:///tmp/seatunnel/paimon/hadoop-sink/" + database="seatunnel" + table="role" + } +} +``` + +### 多表 + +```hocon +env { + parallelism = 1 + job.mode = "STREAMING" + checkpoint.interval = 5000 +} + +source { + Mysql-CDC { + base-url = "jdbc:mysql://127.0.0.1:3306/seatunnel" + username = "root" + password = "******" + table-names = ["seatunnel.role","seatunnel.user","galileo.Bucket"] + } +} + +transform { +} + +sink { + Paimon { + catalog_name="seatunnel_test" + warehouse="file:///tmp/seatunnel/paimon/hadoop-sink/" + database="${database_name}" + table="${table_name}" + } +} +``` + diff --git a/seatunnel-connectors-v2/connector-paimon/pom.xml b/seatunnel-connectors-v2/connector-paimon/pom.xml index 8bcb1c35070..499165ea6fb 100644 --- a/seatunnel-connectors-v2/connector-paimon/pom.xml +++ b/seatunnel-connectors-v2/connector-paimon/pom.xml @@ -34,6 +34,11 @@ + + org.apache.seatunnel + connector-common + ${project.version} + org.apache.paimon diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalog.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalog.java new file mode 100644 index 00000000000..7312ed28b06 --- /dev/null +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalog.java @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.paimon.catalog; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.table.catalog.Catalog; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.api.table.catalog.TableSchema; +import org.apache.seatunnel.api.table.catalog.exception.CatalogException; +import org.apache.seatunnel.api.table.catalog.exception.DatabaseAlreadyExistException; +import org.apache.seatunnel.api.table.catalog.exception.DatabaseNotExistException; +import org.apache.seatunnel.api.table.catalog.exception.TableAlreadyExistException; +import org.apache.seatunnel.api.table.catalog.exception.TableNotExistException; +import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.connectors.seatunnel.paimon.config.PaimonSinkConfig; +import org.apache.seatunnel.connectors.seatunnel.paimon.utils.SchemaUtil; + +import org.apache.paimon.catalog.Identifier; +import org.apache.paimon.table.FileStoreTable; +import org.apache.paimon.table.Table; +import org.apache.paimon.types.DataField; + +import lombok.extern.slf4j.Slf4j; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; + +@Slf4j +public class PaimonCatalog implements Catalog, PaimonTable { + private static final String DEFAULT_DATABASE = "default"; + + private String catalogName; + private ReadonlyConfig readonlyConfig; + private PaimonCatalogLoader paimonCatalogLoader; + private org.apache.paimon.catalog.Catalog catalog; + + public PaimonCatalog(String catalogName, ReadonlyConfig readonlyConfig) { + this.readonlyConfig = readonlyConfig; + this.catalogName = catalogName; + this.paimonCatalogLoader = new PaimonCatalogLoader(new PaimonSinkConfig(readonlyConfig)); + } + + @Override + public void open() throws CatalogException { + this.catalog = paimonCatalogLoader.loadCatalog(); + } + + @Override + public void close() throws CatalogException { + if (catalog != null && catalog instanceof Closeable) { + try { + ((Closeable) catalog).close(); + } catch (IOException e) { + log.error("Error while closing IcebergCatalog.", e); + throw new CatalogException(e); + } + } + } + + @Override + public String name() { + return this.catalogName; + } + + @Override + public String getDefaultDatabase() throws CatalogException { + return DEFAULT_DATABASE; + } + + @Override + public boolean databaseExists(String databaseName) throws CatalogException { + return catalog.databaseExists(databaseName); + } + + @Override + public List listDatabases() throws CatalogException { + return catalog.listDatabases(); + } + + @Override + public List listTables(String databaseName) + throws CatalogException, DatabaseNotExistException { + try { + return catalog.listTables(databaseName); + } catch (org.apache.paimon.catalog.Catalog.DatabaseNotExistException e) { + throw new DatabaseNotExistException(this.catalogName, databaseName); + } + } + + @Override + public boolean tableExists(TablePath tablePath) throws CatalogException { + return catalog.tableExists(toIdentifier(tablePath)); + } + + @Override + public CatalogTable getTable(TablePath tablePath) + throws CatalogException, TableNotExistException { + try { + FileStoreTable paimonFileStoreTableTable = (FileStoreTable) getPaimonTable(tablePath); + return toCatalogTable(paimonFileStoreTableTable, tablePath); + } catch (Exception e) { + throw new TableNotExistException(this.catalogName, tablePath); + } + } + + @Override + public Table getPaimonTable(TablePath tablePath) + throws CatalogException, TableNotExistException { + try { + return catalog.getTable(toIdentifier(tablePath)); + } catch (org.apache.paimon.catalog.Catalog.TableNotExistException e) { + throw new TableNotExistException(this.catalogName, tablePath); + } + } + + @Override + public void createTable(TablePath tablePath, CatalogTable table, boolean ignoreIfExists) + throws TableAlreadyExistException, DatabaseNotExistException, CatalogException { + try { + catalog.createTable( + toIdentifier(tablePath), + SchemaUtil.toPaimonSchema(table.getTableSchema()), + ignoreIfExists); + } catch (org.apache.paimon.catalog.Catalog.TableAlreadyExistException e) { + throw new TableAlreadyExistException(this.catalogName, tablePath); + } catch (org.apache.paimon.catalog.Catalog.DatabaseNotExistException e) { + throw new DatabaseNotExistException(this.catalogName, tablePath.getDatabaseName()); + } + } + + @Override + public void dropTable(TablePath tablePath, boolean ignoreIfNotExists) + throws TableNotExistException, CatalogException { + try { + catalog.dropTable(toIdentifier(tablePath), ignoreIfNotExists); + } catch (org.apache.paimon.catalog.Catalog.TableNotExistException e) { + throw new TableNotExistException(this.catalogName, tablePath); + } + } + + @Override + public void createDatabase(TablePath tablePath, boolean ignoreIfExists) + throws DatabaseAlreadyExistException, CatalogException { + try { + catalog.createDatabase(tablePath.getDatabaseName(), ignoreIfExists); + } catch (org.apache.paimon.catalog.Catalog.DatabaseAlreadyExistException e) { + throw new DatabaseAlreadyExistException(this.catalogName, tablePath.getDatabaseName()); + } + } + + @Override + public void dropDatabase(TablePath tablePath, boolean ignoreIfNotExists) + throws DatabaseNotExistException, CatalogException { + try { + catalog.dropDatabase(tablePath.getDatabaseName(), ignoreIfNotExists, true); + } catch (Exception e) { + throw new DatabaseNotExistException(this.catalogName, tablePath.getDatabaseName()); + } + } + + private CatalogTable toCatalogTable( + FileStoreTable paimonFileStoreTableTable, TablePath tablePath) { + org.apache.paimon.schema.TableSchema schema = paimonFileStoreTableTable.schema(); + List dataFields = schema.fields(); + TableSchema.Builder builder = TableSchema.builder(); + dataFields.forEach( + dataField -> { + String name = dataField.name(); + SeaTunnelDataType seaTunnelType = + SchemaUtil.toSeaTunnelType(dataField.type()); + PhysicalColumn physicalColumn = + PhysicalColumn.of( + name, + seaTunnelType, + (Long) null, + true, + null, + dataField.description()); + builder.column(physicalColumn); + }); + + List partitionKeys = schema.partitionKeys(); + + return CatalogTable.of( + org.apache.seatunnel.api.table.catalog.TableIdentifier.of( + catalogName, tablePath.getDatabaseName(), tablePath.getTableName()), + builder.build(), + paimonFileStoreTableTable.options(), + partitionKeys, + null, + catalogName); + } + + private Identifier toIdentifier(TablePath tablePath) { + return Identifier.create(tablePath.getDatabaseName(), tablePath.getTableName()); + } +} diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalogFactory.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalogFactory.java new file mode 100644 index 00000000000..4d94f385d9f --- /dev/null +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalogFactory.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.paimon.catalog; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.configuration.util.OptionRule; +import org.apache.seatunnel.api.table.catalog.Catalog; +import org.apache.seatunnel.api.table.factory.CatalogFactory; +import org.apache.seatunnel.api.table.factory.Factory; +import org.apache.seatunnel.connectors.seatunnel.paimon.config.PaimonSinkConfig; + +import com.google.auto.service.AutoService; + +@AutoService(Factory.class) +public class PaimonCatalogFactory implements CatalogFactory { + @Override + public Catalog createCatalog(String catalogName, ReadonlyConfig readonlyConfig) { + return new PaimonCatalog(catalogName, readonlyConfig); + } + + @Override + public String factoryIdentifier() { + return "Paimon"; + } + + @Override + public OptionRule optionRule() { + return OptionRule.builder() + .required( + PaimonSinkConfig.WAREHOUSE, + PaimonSinkConfig.DATABASE, + PaimonSinkConfig.TABLE) + .optional( + PaimonSinkConfig.HDFS_SITE_PATH, + PaimonSinkConfig.SCHEMA_SAVE_MODE, + PaimonSinkConfig.DATA_SAVE_MODE) + .build(); + } +} diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalogLoader.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalogLoader.java new file mode 100644 index 00000000000..bec66dbe3f2 --- /dev/null +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalogLoader.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.paimon.catalog; + +import org.apache.seatunnel.connectors.seatunnel.paimon.config.PaimonSinkConfig; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.paimon.catalog.Catalog; +import org.apache.paimon.catalog.CatalogContext; +import org.apache.paimon.catalog.CatalogFactory; +import org.apache.paimon.options.Options; + +import lombok.extern.slf4j.Slf4j; + +import java.io.Serializable; +import java.util.HashMap; +import java.util.Map; + +import static org.apache.seatunnel.connectors.seatunnel.paimon.config.PaimonConfig.WAREHOUSE; + +@Slf4j +public class PaimonCatalogLoader implements Serializable { + private PaimonSinkConfig config; + + public PaimonCatalogLoader(PaimonSinkConfig config) { + this.config = config; + } + + public Catalog loadCatalog() { + // When using the seatunel engine, set the current class loader to prevent loading failures + Thread.currentThread().setContextClassLoader(PaimonCatalogLoader.class.getClassLoader()); + final String warehouse = config.getWarehouse(); + final Map optionsMap = new HashMap<>(); + optionsMap.put(WAREHOUSE.key(), warehouse); + final Options options = Options.fromMap(optionsMap); + final Configuration hadoopConf = new Configuration(); + String hdfsSitePathOptional = config.getHdfsSitePath(); + if (StringUtils.isNotBlank(hdfsSitePathOptional)) { + hadoopConf.addResource(new Path(hdfsSitePathOptional)); + } + final CatalogContext catalogContext = CatalogContext.create(options, hadoopConf); + return CatalogFactory.createCatalog(catalogContext); + } +} diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonTable.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonTable.java new file mode 100644 index 00000000000..55b18f79ab3 --- /dev/null +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonTable.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.paimon.catalog; + +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.api.table.catalog.exception.CatalogException; +import org.apache.seatunnel.api.table.catalog.exception.TableNotExistException; + +import org.apache.paimon.table.Table; + +public interface PaimonTable { + Table getPaimonTable(TablePath tablePath) throws CatalogException, TableNotExistException; +} diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/config/PaimonConfig.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/config/PaimonConfig.java index b5299d87559..0396e6223af 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/config/PaimonConfig.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/config/PaimonConfig.java @@ -22,13 +22,14 @@ import org.apache.seatunnel.api.sink.SeaTunnelSink; import org.apache.seatunnel.api.source.SeaTunnelSource; +import java.io.Serializable; import java.util.List; /** * Utility class to store configuration options, used by {@link SeaTunnelSource} and {@link * SeaTunnelSink}. */ -public class PaimonConfig { +public class PaimonConfig implements Serializable { public static final Option WAREHOUSE = Options.key("warehouse") @@ -36,6 +37,12 @@ public class PaimonConfig { .noDefaultValue() .withDescription("The warehouse path of paimon"); + public static final Option CATALOG_NAME = + Options.key("catalog_name") + .stringType() + .defaultValue("paimon") + .withDescription(" the iceberg catalog name"); + public static final Option DATABASE = Options.key("database") .stringType() diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/config/PaimonSinkConfig.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/config/PaimonSinkConfig.java new file mode 100644 index 00000000000..589fd948167 --- /dev/null +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/config/PaimonSinkConfig.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.paimon.config; + +import org.apache.seatunnel.api.configuration.Option; +import org.apache.seatunnel.api.configuration.Options; +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.sink.DataSaveMode; +import org.apache.seatunnel.api.sink.SchemaSaveMode; + +import lombok.Getter; + +import static org.apache.seatunnel.shade.com.google.common.base.Preconditions.checkNotNull; + +@Getter +public class PaimonSinkConfig extends PaimonConfig { + public static final Option SCHEMA_SAVE_MODE = + Options.key("schema_save_mode") + .enumType(SchemaSaveMode.class) + .defaultValue(SchemaSaveMode.CREATE_SCHEMA_WHEN_NOT_EXIST) + .withDescription("schema_save_mode"); + + public static final Option DATA_SAVE_MODE = + Options.key("data_save_mode") + .enumType(DataSaveMode.class) + .defaultValue(DataSaveMode.APPEND_DATA) + .withDescription("data_save_mode"); + + private String catalogName; + private String warehouse; + private String namespace; + private String table; + private String hdfsSitePath; + private SchemaSaveMode schemaSaveMode; + private DataSaveMode dataSaveMode; + + public PaimonSinkConfig(ReadonlyConfig readonlyConfig) { + this.catalogName = checkArgumentNotNull(readonlyConfig.get(CATALOG_NAME)); + this.warehouse = checkArgumentNotNull(readonlyConfig.get(WAREHOUSE)); + this.namespace = checkArgumentNotNull(readonlyConfig.get(DATABASE)); + this.table = checkArgumentNotNull(readonlyConfig.get(TABLE)); + this.hdfsSitePath = readonlyConfig.get(HDFS_SITE_PATH); + this.schemaSaveMode = readonlyConfig.get(SCHEMA_SAVE_MODE); + this.dataSaveMode = readonlyConfig.get(DATA_SAVE_MODE); + } + + protected T checkArgumentNotNull(T argument) { + checkNotNull(argument); + return argument; + } +} diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/data/PaimonTypeMapper.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/data/PaimonTypeMapper.java new file mode 100644 index 00000000000..1f8b1cff32f --- /dev/null +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/data/PaimonTypeMapper.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.paimon.data; + +import org.apache.seatunnel.api.table.catalog.Column; +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.converter.TypeConverter; +import org.apache.seatunnel.connectors.seatunnel.paimon.sink.PaimonSink; +import org.apache.seatunnel.connectors.seatunnel.paimon.utils.RowTypeConverter; + +import org.apache.paimon.types.DataType; + +import com.google.auto.service.AutoService; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@AutoService(TypeConverter.class) +public class PaimonTypeMapper implements TypeConverter { + public static final PaimonTypeMapper INSTANCE = new PaimonTypeMapper(); + + @Override + public String identifier() { + return PaimonSink.PLUGIN_NAME; + } + + @Override + public Column convert(DataType dataType) { + return PhysicalColumn.builder().dataType(RowTypeConverter.convert(dataType)).build(); + } + + @Override + public DataType reconvert(Column column) { + return RowTypeConverter.reconvert(column.getDataType()); + } +} diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/handler/PaimonSaveModeHandler.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/handler/PaimonSaveModeHandler.java new file mode 100644 index 00000000000..b479ebf14b0 --- /dev/null +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/handler/PaimonSaveModeHandler.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.paimon.handler; + +import org.apache.seatunnel.api.sink.DataSaveMode; +import org.apache.seatunnel.api.sink.DefaultSaveModeHandler; +import org.apache.seatunnel.api.sink.SchemaSaveMode; +import org.apache.seatunnel.api.table.catalog.Catalog; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.connectors.seatunnel.paimon.catalog.PaimonCatalog; +import org.apache.seatunnel.connectors.seatunnel.paimon.sink.SupportLoadTable; + +import org.apache.paimon.table.Table; + +public class PaimonSaveModeHandler extends DefaultSaveModeHandler { + + private SupportLoadTable supportLoadTable; + private Catalog catalog; + private CatalogTable catalogTable; + + public PaimonSaveModeHandler( + SupportLoadTable supportLoadTable, + SchemaSaveMode schemaSaveMode, + DataSaveMode dataSaveMode, + Catalog catalog, + CatalogTable catalogTable, + String customSql) { + super(schemaSaveMode, dataSaveMode, catalog, catalogTable, customSql); + this.supportLoadTable = supportLoadTable; + this.catalog = catalog; + this.catalogTable = catalogTable; + } + + @Override + public void handleSchemaSaveMode() { + super.handleSchemaSaveMode(); + TablePath tablePath = catalogTable.getTablePath(); + Table paimonTable = ((PaimonCatalog) catalog).getPaimonTable(tablePath); + // load paimon table and set it into paimon sink + this.supportLoadTable.setLoadTable(paimonTable); + } +} diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSink.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSink.java index ac1a0b97edd..cdec4b0c760 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSink.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSink.java @@ -17,54 +17,46 @@ package org.apache.seatunnel.connectors.seatunnel.paimon.sink; -import org.apache.seatunnel.shade.com.typesafe.config.Config; - -import org.apache.seatunnel.api.common.PrepareFailException; +import org.apache.seatunnel.api.common.JobContext; import org.apache.seatunnel.api.common.SeaTunnelAPIErrorCode; +import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.serialization.DefaultSerializer; import org.apache.seatunnel.api.serialization.Serializer; +import org.apache.seatunnel.api.sink.SaveModeHandler; import org.apache.seatunnel.api.sink.SeaTunnelSink; import org.apache.seatunnel.api.sink.SinkAggregatedCommitter; import org.apache.seatunnel.api.sink.SinkWriter; -import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.api.sink.SupportMultiTableSink; +import org.apache.seatunnel.api.sink.SupportSaveMode; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; -import org.apache.seatunnel.common.config.CheckConfigUtil; -import org.apache.seatunnel.common.config.CheckResult; import org.apache.seatunnel.common.constants.PluginType; -import org.apache.seatunnel.connectors.seatunnel.paimon.exception.PaimonConnectorErrorCode; +import org.apache.seatunnel.connectors.seatunnel.paimon.config.PaimonSinkConfig; import org.apache.seatunnel.connectors.seatunnel.paimon.exception.PaimonConnectorException; +import org.apache.seatunnel.connectors.seatunnel.paimon.handler.PaimonSaveModeHandler; import org.apache.seatunnel.connectors.seatunnel.paimon.sink.commit.PaimonAggregatedCommitInfo; import org.apache.seatunnel.connectors.seatunnel.paimon.sink.commit.PaimonAggregatedCommitter; import org.apache.seatunnel.connectors.seatunnel.paimon.sink.commit.PaimonCommitInfo; import org.apache.seatunnel.connectors.seatunnel.paimon.sink.state.PaimonSinkState; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.paimon.catalog.Catalog; -import org.apache.paimon.catalog.CatalogContext; -import org.apache.paimon.catalog.CatalogFactory; -import org.apache.paimon.catalog.Identifier; -import org.apache.paimon.options.Options; import org.apache.paimon.table.Table; -import com.google.auto.service.AutoService; - import java.io.IOException; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.Optional; -import static org.apache.seatunnel.connectors.seatunnel.paimon.config.PaimonConfig.DATABASE; -import static org.apache.seatunnel.connectors.seatunnel.paimon.config.PaimonConfig.HDFS_SITE_PATH; -import static org.apache.seatunnel.connectors.seatunnel.paimon.config.PaimonConfig.TABLE; -import static org.apache.seatunnel.connectors.seatunnel.paimon.config.PaimonConfig.WAREHOUSE; +import static org.apache.seatunnel.api.table.factory.FactoryUtil.discoverFactory; -@AutoService(SeaTunnelSink.class) public class PaimonSink implements SeaTunnelSink< - SeaTunnelRow, PaimonSinkState, PaimonCommitInfo, PaimonAggregatedCommitInfo> { + SeaTunnelRow, + PaimonSinkState, + PaimonCommitInfo, + PaimonAggregatedCommitInfo>, + SupportSaveMode, + SupportLoadTable
, + SupportMultiTableSink { private static final long serialVersionUID = 1L; @@ -72,79 +64,44 @@ public class PaimonSink private SeaTunnelRowType seaTunnelRowType; - private Config pluginConfig; - private Table table; - @Override - public String getPluginName() { - return PLUGIN_NAME; - } + private JobContext jobContext; - @Override - public void prepare(Config pluginConfig) throws PrepareFailException { - this.pluginConfig = pluginConfig; - CheckResult result = - CheckConfigUtil.checkAllExists( - pluginConfig, WAREHOUSE.key(), DATABASE.key(), TABLE.key()); - if (!result.isSuccess()) { - throw new PaimonConnectorException( - SeaTunnelAPIErrorCode.CONFIG_VALIDATION_FAILED, - String.format( - "PluginName: %s, PluginType: %s, Message: %s", - getPluginName(), PluginType.SINK, result.getMsg())); - } - // initialize paimon table - final String warehouse = pluginConfig.getString(WAREHOUSE.key()); - final String database = pluginConfig.getString(DATABASE.key()); - final String table = pluginConfig.getString(TABLE.key()); - final Map optionsMap = new HashMap<>(); - optionsMap.put(WAREHOUSE.key(), warehouse); - final Options options = Options.fromMap(optionsMap); - final Configuration hadoopConf = new Configuration(); - if (pluginConfig.hasPath(HDFS_SITE_PATH.key())) { - hadoopConf.addResource(new Path(pluginConfig.getString(HDFS_SITE_PATH.key()))); - } - final CatalogContext catalogContext = CatalogContext.create(options, hadoopConf); - try (Catalog catalog = CatalogFactory.createCatalog(catalogContext)) { - Identifier identifier = Identifier.create(database, table); - this.table = catalog.getTable(identifier); - } catch (Exception e) { - String errorMsg = - String.format( - "Failed to get table [%s] from database [%s] on warehouse [%s]", - database, table, warehouse); - throw new PaimonConnectorException( - PaimonConnectorErrorCode.GET_TABLE_FAILED, errorMsg, e); - } - } + private ReadonlyConfig readonlyConfig; - @Override - public void setTypeInfo(SeaTunnelRowType seaTunnelRowType) { - this.seaTunnelRowType = seaTunnelRowType; + private PaimonSinkConfig paimonSinkConfig; + + private CatalogTable catalogTable; + + public PaimonSink(ReadonlyConfig readonlyConfig, CatalogTable catalogTable) { + this.readonlyConfig = readonlyConfig; + this.paimonSinkConfig = new PaimonSinkConfig(readonlyConfig); + this.catalogTable = catalogTable; + this.seaTunnelRowType = catalogTable.getSeaTunnelRowType(); } @Override - public SeaTunnelDataType getConsumedType() { - return this.seaTunnelRowType; + public String getPluginName() { + return PLUGIN_NAME; } @Override public SinkWriter createWriter( SinkWriter.Context context) throws IOException { - return new PaimonSinkWriter(context, table, seaTunnelRowType); + return new PaimonSinkWriter(context, table, seaTunnelRowType, jobContext); } @Override public Optional> createAggregatedCommitter() throws IOException { - return Optional.of(new PaimonAggregatedCommitter(table)); + return Optional.of(new PaimonAggregatedCommitter(table, jobContext)); } @Override public SinkWriter restoreWriter( SinkWriter.Context context, List states) throws IOException { - return new PaimonSinkWriter(context, table, seaTunnelRowType, states); + return new PaimonSinkWriter(context, table, seaTunnelRowType, states, jobContext); } @Override @@ -156,4 +113,43 @@ public Optional> getAggregatedCommitInfoS public Optional> getCommitInfoSerializer() { return Optional.of(new DefaultSerializer<>()); } + + @Override + public void setJobContext(JobContext jobContext) { + this.jobContext = jobContext; + } + + @Override + public Optional getSaveModeHandler() { + org.apache.seatunnel.api.table.factory.CatalogFactory catalogFactory = + discoverFactory( + Thread.currentThread().getContextClassLoader(), + org.apache.seatunnel.api.table.factory.CatalogFactory.class, + "Paimon"); + if (catalogFactory == null) { + throw new PaimonConnectorException( + SeaTunnelAPIErrorCode.CONFIG_VALIDATION_FAILED, + String.format( + "PluginName: %s, PluginType: %s, Message: %s", + getPluginName(), + PluginType.SINK, + "Cannot find paimon catalog factory")); + } + org.apache.seatunnel.api.table.catalog.Catalog catalog = + catalogFactory.createCatalog(catalogFactory.factoryIdentifier(), readonlyConfig); + catalog.open(); + return Optional.of( + new PaimonSaveModeHandler( + this, + paimonSinkConfig.getSchemaSaveMode(), + paimonSinkConfig.getDataSaveMode(), + catalog, + catalogTable, + null)); + } + + @Override + public void setLoadTable(Table table) { + this.table = table; + } } diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkFactory.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkFactory.java index dfae43c4820..c0b4d997ead 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkFactory.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkFactory.java @@ -17,16 +17,30 @@ package org.apache.seatunnel.connectors.seatunnel.paimon.sink; +import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.configuration.util.OptionRule; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.TableIdentifier; +import org.apache.seatunnel.api.table.connector.TableSink; import org.apache.seatunnel.api.table.factory.Factory; import org.apache.seatunnel.api.table.factory.TableSinkFactory; +import org.apache.seatunnel.api.table.factory.TableSinkFactoryContext; import org.apache.seatunnel.connectors.seatunnel.paimon.config.PaimonConfig; +import org.apache.seatunnel.connectors.seatunnel.paimon.config.PaimonSinkConfig; + +import org.apache.commons.lang3.StringUtils; import com.google.auto.service.AutoService; @AutoService(Factory.class) public class PaimonSinkFactory implements TableSinkFactory { + public static final String REPLACE_TABLE_NAME_KEY = "${table_name}"; + + public static final String REPLACE_SCHEMA_NAME_KEY = "${schema_name}"; + + public static final String REPLACE_DATABASE_NAME_KEY = "${database_name}"; + @Override public String factoryIdentifier() { return "Paimon"; @@ -35,10 +49,56 @@ public String factoryIdentifier() { @Override public OptionRule optionRule() { return OptionRule.builder() - .required(PaimonConfig.WAREHOUSE) - .required(PaimonConfig.DATABASE) - .required(PaimonConfig.TABLE) - .optional(PaimonConfig.HDFS_SITE_PATH) + .required(PaimonConfig.WAREHOUSE, PaimonConfig.DATABASE, PaimonConfig.TABLE) + .optional( + PaimonConfig.HDFS_SITE_PATH, + PaimonSinkConfig.SCHEMA_SAVE_MODE, + PaimonSinkConfig.DATA_SAVE_MODE) .build(); } + + @Override + public TableSink createSink(TableSinkFactoryContext context) { + ReadonlyConfig readonlyConfig = context.getOptions(); + CatalogTable catalogTable = + renameCatalogTable(new PaimonSinkConfig(readonlyConfig), context.getCatalogTable()); + return () -> new PaimonSink(context.getOptions(), catalogTable); + } + + private CatalogTable renameCatalogTable( + PaimonSinkConfig paimonSinkConfig, CatalogTable catalogTable) { + TableIdentifier tableId = catalogTable.getTableId(); + String tableName; + String namespace; + if (StringUtils.isNotEmpty(paimonSinkConfig.getTable())) { + tableName = replaceName(paimonSinkConfig.getTable(), tableId); + } else { + tableName = tableId.getTableName(); + } + + if (StringUtils.isNotEmpty(paimonSinkConfig.getNamespace())) { + namespace = replaceName(paimonSinkConfig.getNamespace(), tableId); + } else { + namespace = tableId.getSchemaName(); + } + + TableIdentifier newTableId = + TableIdentifier.of( + tableId.getCatalogName(), namespace, tableId.getSchemaName(), tableName); + + return CatalogTable.of(newTableId, catalogTable); + } + + private String replaceName(String original, TableIdentifier tableId) { + if (tableId.getTableName() != null) { + original = original.replace(REPLACE_TABLE_NAME_KEY, tableId.getTableName()); + } + if (tableId.getSchemaName() != null) { + original = original.replace(REPLACE_SCHEMA_NAME_KEY, tableId.getSchemaName()); + } + if (tableId.getDatabaseName() != null) { + original = original.replace(REPLACE_DATABASE_NAME_KEY, tableId.getDatabaseName()); + } + return original; + } } diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkWriter.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkWriter.java index 930f62045fd..7b2e8327a99 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkWriter.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkWriter.java @@ -17,21 +17,28 @@ package org.apache.seatunnel.connectors.seatunnel.paimon.sink; +import org.apache.seatunnel.api.common.JobContext; import org.apache.seatunnel.api.sink.SinkWriter; +import org.apache.seatunnel.api.sink.SupportMultiTableSinkWriter; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.connectors.seatunnel.paimon.exception.PaimonConnectorErrorCode; import org.apache.seatunnel.connectors.seatunnel.paimon.exception.PaimonConnectorException; import org.apache.seatunnel.connectors.seatunnel.paimon.sink.commit.PaimonCommitInfo; import org.apache.seatunnel.connectors.seatunnel.paimon.sink.state.PaimonSinkState; +import org.apache.seatunnel.connectors.seatunnel.paimon.utils.JobContextUtil; import org.apache.seatunnel.connectors.seatunnel.paimon.utils.RowConverter; import org.apache.paimon.data.InternalRow; import org.apache.paimon.table.Table; import org.apache.paimon.table.sink.BatchTableCommit; import org.apache.paimon.table.sink.BatchTableWrite; -import org.apache.paimon.table.sink.BatchWriteBuilder; import org.apache.paimon.table.sink.CommitMessage; +import org.apache.paimon.table.sink.StreamTableCommit; +import org.apache.paimon.table.sink.StreamTableWrite; +import org.apache.paimon.table.sink.TableCommit; +import org.apache.paimon.table.sink.TableWrite; +import org.apache.paimon.table.sink.WriteBuilder; import lombok.extern.slf4j.Slf4j; @@ -46,13 +53,14 @@ @Slf4j public class PaimonSinkWriter - implements SinkWriter { + implements SinkWriter, + SupportMultiTableSinkWriter { private String commitUser = UUID.randomUUID().toString(); - private final BatchWriteBuilder tableWriteBuilder; + private final WriteBuilder tableWriteBuilder; - private final BatchTableWrite tableWrite; + private final TableWrite tableWrite; private long checkpointId = 0; @@ -64,37 +72,58 @@ public class PaimonSinkWriter private final SinkWriter.Context context; - public PaimonSinkWriter(Context context, Table table, SeaTunnelRowType seaTunnelRowType) { + private final JobContext jobContext; + + public PaimonSinkWriter( + Context context, + Table table, + SeaTunnelRowType seaTunnelRowType, + JobContext jobContext) { this.table = table; - this.tableWriteBuilder = this.table.newBatchWriteBuilder().withOverwrite(); + this.tableWriteBuilder = + JobContextUtil.isBatchJob(jobContext) + ? this.table.newBatchWriteBuilder().withOverwrite() + : this.table.newStreamWriteBuilder(); this.tableWrite = tableWriteBuilder.newWrite(); this.seaTunnelRowType = seaTunnelRowType; this.context = context; + this.jobContext = jobContext; } public PaimonSinkWriter( Context context, Table table, SeaTunnelRowType seaTunnelRowType, - List states) { + List states, + JobContext jobContext) { this.table = table; - this.tableWriteBuilder = this.table.newBatchWriteBuilder().withOverwrite(); + this.tableWriteBuilder = + JobContextUtil.isBatchJob(jobContext) + ? this.table.newBatchWriteBuilder().withOverwrite() + : this.table.newStreamWriteBuilder(); this.tableWrite = tableWriteBuilder.newWrite(); this.seaTunnelRowType = seaTunnelRowType; this.context = context; + this.jobContext = jobContext; if (Objects.isNull(states) || states.isEmpty()) { return; } this.commitUser = states.get(0).getCommitUser(); this.checkpointId = states.get(0).getCheckpointId(); - try (BatchTableCommit tableCommit = tableWriteBuilder.newCommit()) { + try (TableCommit tableCommit = tableWriteBuilder.newCommit()) { List commitables = states.stream() .map(PaimonSinkState::getCommittables) .flatMap(List::stream) .collect(Collectors.toList()); log.info("Trying to recommit states {}", commitables); - tableCommit.commit(commitables); + if (JobContextUtil.isBatchJob(jobContext)) { + log.debug("Trying to recommit states batch mode"); + ((BatchTableCommit) tableCommit).commit(commitables); + } else { + log.debug("Trying to recommit states streaming mode"); + ((StreamTableCommit) tableCommit).commit(Objects.hash(commitables), commitables); + } } catch (Exception e) { throw new PaimonConnectorException( PaimonConnectorErrorCode.TABLE_WRITE_COMMIT_FAILED, e); @@ -117,7 +146,13 @@ public void write(SeaTunnelRow element) throws IOException { @Override public Optional prepareCommit() throws IOException { try { - List fileCommittables = tableWrite.prepareCommit(); + List fileCommittables; + if (JobContextUtil.isBatchJob(jobContext)) { + fileCommittables = ((BatchTableWrite) tableWrite).prepareCommit(); + } else { + fileCommittables = + ((StreamTableWrite) tableWrite).prepareCommit(false, committables.size()); + } committables.addAll(fileCommittables); return Optional.of(new PaimonCommitInfo(fileCommittables)); } catch (Exception e) { diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/SupportLoadTable.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/SupportLoadTable.java new file mode 100644 index 00000000000..734762e23ca --- /dev/null +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/SupportLoadTable.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.paimon.sink; + +public interface SupportLoadTable { + void setLoadTable(T table); +} diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/commit/PaimonAggregatedCommitter.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/commit/PaimonAggregatedCommitter.java index 987d8fbb807..2c0be5d4241 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/commit/PaimonAggregatedCommitter.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/commit/PaimonAggregatedCommitter.java @@ -17,14 +17,20 @@ package org.apache.seatunnel.connectors.seatunnel.paimon.sink.commit; +import org.apache.seatunnel.api.common.JobContext; import org.apache.seatunnel.api.sink.SinkAggregatedCommitter; +import org.apache.seatunnel.api.sink.SupportMultiTableSinkAggregatedCommitter; import org.apache.seatunnel.connectors.seatunnel.paimon.exception.PaimonConnectorErrorCode; import org.apache.seatunnel.connectors.seatunnel.paimon.exception.PaimonConnectorException; +import org.apache.seatunnel.connectors.seatunnel.paimon.utils.JobContextUtil; import org.apache.paimon.operation.Lock; import org.apache.paimon.table.Table; import org.apache.paimon.table.sink.BatchTableCommit; import org.apache.paimon.table.sink.CommitMessage; +import org.apache.paimon.table.sink.StreamTableCommit; +import org.apache.paimon.table.sink.TableCommit; +import org.apache.paimon.table.sink.WriteBuilder; import lombok.extern.slf4j.Slf4j; @@ -32,35 +38,49 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Objects; import java.util.stream.Collectors; /** Paimon connector aggregated committer class */ @Slf4j public class PaimonAggregatedCommitter - implements SinkAggregatedCommitter { + implements SinkAggregatedCommitter, + SupportMultiTableSinkAggregatedCommitter { private static final long serialVersionUID = 1L; private final Lock.Factory localFactory = Lock.emptyFactory(); - private final Table table; + private final WriteBuilder tableWriteBuilder; - public PaimonAggregatedCommitter(Table table) { - this.table = table; + private final JobContext jobContext; + + public PaimonAggregatedCommitter(Table table, JobContext jobContext) { + this.jobContext = jobContext; + this.tableWriteBuilder = + JobContextUtil.isBatchJob(jobContext) + ? table.newBatchWriteBuilder() + : table.newStreamWriteBuilder(); } @Override public List commit( List aggregatedCommitInfo) throws IOException { - try (BatchTableCommit tableCommit = - table.newBatchWriteBuilder().withOverwrite().newCommit()) { + try (TableCommit tableCommit = tableWriteBuilder.newCommit()) { List fileCommittables = aggregatedCommitInfo.stream() .map(PaimonAggregatedCommitInfo::getCommittables) .flatMap(List::stream) .flatMap(List::stream) .collect(Collectors.toList()); - tableCommit.commit(fileCommittables); + if (JobContextUtil.isBatchJob(jobContext)) { + log.debug("Trying to commit states batch mode"); + ((BatchTableCommit) tableCommit).commit(fileCommittables); + } else { + log.debug("Trying to commit states streaming mode"); + ((StreamTableCommit) tableCommit) + .commit(Objects.hash(fileCommittables), fileCommittables); + } } catch (Exception e) { throw new PaimonConnectorException( PaimonConnectorErrorCode.TABLE_WRITE_COMMIT_FAILED, diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/JobContextUtil.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/JobContextUtil.java new file mode 100644 index 00000000000..3a4d9b72d40 --- /dev/null +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/JobContextUtil.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.paimon.utils; + +import org.apache.seatunnel.api.common.JobContext; +import org.apache.seatunnel.common.constants.JobMode; + +import lombok.extern.slf4j.Slf4j; + +/** Job env util */ +@Slf4j +public class JobContextUtil { + + public static boolean isBatchJob(JobContext jobContext) { + return jobContext.getJobMode().equals(JobMode.BATCH); + } +} diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowConverter.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowConverter.java index 44f8fb2624b..6b9a6bf01c5 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowConverter.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowConverter.java @@ -333,6 +333,10 @@ public static InternalRow convert( SeaTunnelRow seaTunnelRow, SeaTunnelRowType seaTunnelRowType) { BinaryRow binaryRow = new BinaryRow(seaTunnelRowType.getTotalFields()); BinaryWriter binaryWriter = new BinaryRowWriter(binaryRow); + // Convert SeaTunnel RowKind to Paimon RowKind + org.apache.paimon.types.RowKind rowKind = + RowKindConverter.convertSeaTunnelRowKind2PaimonRowKind(seaTunnelRow.getRowKind()); + binaryRow.setRowKind(rowKind); SeaTunnelDataType[] fieldTypes = seaTunnelRowType.getFieldTypes(); for (int i = 0; i < fieldTypes.length; i++) { // judge the field is or not equals null @@ -393,8 +397,8 @@ public static InternalRow convert( MapType mapType = (MapType) seaTunnelRowType.getFieldType(i); SeaTunnelDataType keyType = mapType.getKeyType(); SeaTunnelDataType valueType = mapType.getValueType(); - DataType paimonKeyType = RowTypeConverter.convert(keyType); - DataType paimonValueType = RowTypeConverter.convert(valueType); + DataType paimonKeyType = RowTypeConverter.reconvert(keyType); + DataType paimonValueType = RowTypeConverter.reconvert(valueType); Map field = (Map) seaTunnelRow.getField(i); Object[] keys = field.keySet().toArray(new Object[0]); Object[] values = field.values().toArray(new Object[0]); @@ -411,13 +415,13 @@ public static InternalRow convert( i, paimonArray, new InternalArraySerializer( - RowTypeConverter.convert(arrayType.getElementType()))); + RowTypeConverter.reconvert(arrayType.getElementType()))); break; case ROW: SeaTunnelDataType rowType = seaTunnelRowType.getFieldType(i); Object row = seaTunnelRow.getField(i); InternalRow paimonRow = convert((SeaTunnelRow) row, (SeaTunnelRowType) rowType); - RowType paimonRowType = RowTypeConverter.convert((SeaTunnelRowType) rowType); + RowType paimonRowType = RowTypeConverter.reconvert((SeaTunnelRowType) rowType); binaryWriter.writeRow(i, paimonRow, new InternalRowSerializer(paimonRowType)); break; default: diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowKindConverter.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowKindConverter.java new file mode 100644 index 00000000000..ce6a172e431 --- /dev/null +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowKindConverter.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.paimon.utils; + +import org.apache.seatunnel.api.table.type.RowKind; +import org.apache.seatunnel.common.exception.CommonErrorCode; +import org.apache.seatunnel.connectors.seatunnel.paimon.exception.PaimonConnectorException; + +import org.apache.paimon.data.InternalRow; + +public class RowKindConverter { + + /** + * Convert SeaTunnel RowKind {@link RowKind} to Paimon RowKind {@link InternalRow} + * + * @param seaTunnelRowInd + * @return + */ + public static org.apache.paimon.types.RowKind convertSeaTunnelRowKind2PaimonRowKind( + RowKind seaTunnelRowInd) { + switch (seaTunnelRowInd) { + case DELETE: + return org.apache.paimon.types.RowKind.DELETE; + case UPDATE_AFTER: + return org.apache.paimon.types.RowKind.UPDATE_AFTER; + case UPDATE_BEFORE: + return org.apache.paimon.types.RowKind.UPDATE_BEFORE; + case INSERT: + return org.apache.paimon.types.RowKind.INSERT; + default: + throw new PaimonConnectorException( + CommonErrorCode.UNSUPPORTED_DATA_TYPE, + "Unsupported rowKind type " + seaTunnelRowInd.shortString()); + } + } +} diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowTypeConverter.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowTypeConverter.java index 4dfd6b69fa2..16863ebff5f 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowTypeConverter.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowTypeConverter.java @@ -43,6 +43,7 @@ import org.apache.paimon.types.MapType; import org.apache.paimon.types.RowType; import org.apache.paimon.types.SmallIntType; +import org.apache.paimon.types.TimeType; import org.apache.paimon.types.TimestampType; import org.apache.paimon.types.TinyIntType; import org.apache.paimon.types.VarBinaryType; @@ -70,13 +71,93 @@ public static SeaTunnelRowType convert(RowType rowType) { return new SeaTunnelRowType(fieldNames, dataTypes); } + /** + * Convert Paimon row type {@link DataType} to SeaTunnel row type {@link SeaTunnelDataType} + * + * @param dataType Paimon data type + * @return SeaTunnel data type {@link SeaTunnelDataType} + */ + public static SeaTunnelDataType convert(DataType dataType) { + SeaTunnelDataType seaTunnelDataType; + PaimonToSeaTunnelTypeVisitor paimonToSeaTunnelTypeVisitor = + PaimonToSeaTunnelTypeVisitor.INSTANCE; + switch (dataType.getTypeRoot()) { + case CHAR: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((CharType) dataType); + break; + case VARCHAR: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((VarCharType) dataType); + break; + case BOOLEAN: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((BooleanType) dataType); + break; + case BINARY: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((BinaryType) dataType); + break; + case VARBINARY: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((VarBinaryType) dataType); + break; + case DECIMAL: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((DecimalType) dataType); + break; + case TINYINT: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((TinyIntType) dataType); + break; + case SMALLINT: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((SmallIntType) dataType); + break; + case INTEGER: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((IntType) dataType); + break; + case BIGINT: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((BigIntType) dataType); + break; + case FLOAT: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((FloatType) dataType); + break; + case DOUBLE: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((DoubleType) dataType); + break; + case DATE: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((DateType) dataType); + break; + case TIME_WITHOUT_TIME_ZONE: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((TimeType) dataType); + break; + case TIMESTAMP_WITHOUT_TIME_ZONE: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((TimestampType) dataType); + break; + case TIMESTAMP_WITH_LOCAL_TIME_ZONE: + seaTunnelDataType = + paimonToSeaTunnelTypeVisitor.visit((LocalZonedTimestampType) dataType); + break; + case ARRAY: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((ArrayType) dataType); + break; + case MAP: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((MapType) dataType); + break; + case ROW: + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((RowType) dataType); + break; + default: + String errorMsg = + String.format( + "Paimon dataType not support this genericType [%s]", + dataType.asSQLString()); + throw new PaimonConnectorException( + CommonErrorCodeDeprecated.UNSUPPORTED_DATA_TYPE, errorMsg); + } + return seaTunnelDataType; + } + /** * Convert SeaTunnel row type {@link SeaTunnelRowType} to Paimon row type {@link RowType} * * @param seaTunnelRowType SeaTunnel row type {@link SeaTunnelRowType} * @return Paimon row type {@link RowType} */ - public static RowType convert(SeaTunnelRowType seaTunnelRowType) { + public static RowType reconvert(SeaTunnelRowType seaTunnelRowType) { SeaTunnelDataType[] fieldTypes = seaTunnelRowType.getFieldTypes(); DataType[] dataTypes = Arrays.stream(fieldTypes) @@ -96,7 +177,7 @@ public static RowType convert(SeaTunnelRowType seaTunnelRowType) { * @param dataType SeaTunnel data type {@link SeaTunnelDataType} * @return Paimon data type {@link DataType} */ - public static DataType convert(SeaTunnelDataType dataType) { + public static DataType reconvert(SeaTunnelDataType dataType) { return SeaTunnelTypeToPaimonVisitor.INSTANCE.visit(dataType); } diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/SchemaUtil.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/SchemaUtil.java new file mode 100644 index 00000000000..c03a77149c9 --- /dev/null +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/SchemaUtil.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.paimon.utils; + +import org.apache.seatunnel.api.table.catalog.Column; +import org.apache.seatunnel.api.table.catalog.PrimaryKey; +import org.apache.seatunnel.api.table.catalog.TableSchema; +import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.connectors.seatunnel.paimon.data.PaimonTypeMapper; + +import org.apache.paimon.schema.Schema; +import org.apache.paimon.types.DataType; + +import java.util.Objects; + +/** The util seatunnel schema to paimon schema */ +public class SchemaUtil { + + public static DataType toPaimonType(Column column) { + return PaimonTypeMapper.INSTANCE.reconvert(column); + } + + public static Schema toPaimonSchema(TableSchema tableSchema) { + Schema.Builder paiSchemaBuilder = Schema.newBuilder(); + for (int i = 0; i < tableSchema.getColumns().size(); i++) { + Column column = tableSchema.getColumns().get(i); + paiSchemaBuilder.column(column.getName(), toPaimonType(column)); + } + PrimaryKey primaryKey = tableSchema.getPrimaryKey(); + if (Objects.nonNull(primaryKey) && primaryKey.getColumnNames().size() > 0) { + paiSchemaBuilder.primaryKey(primaryKey.getColumnNames()); + } + return paiSchemaBuilder.build(); + } + + public static SeaTunnelDataType toSeaTunnelType(DataType dataType) { + return PaimonTypeMapper.INSTANCE.convert(dataType).getDataType(); + } +} diff --git a/seatunnel-connectors-v2/connector-paimon/src/test/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowTypeConverterTest.java b/seatunnel-connectors-v2/connector-paimon/src/test/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowTypeConverterTest.java index f32b87f0070..f828be06505 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/test/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowTypeConverterTest.java +++ b/seatunnel-connectors-v2/connector-paimon/src/test/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowTypeConverterTest.java @@ -103,7 +103,7 @@ public void paimonToSeaTunnel() { @Test public void seaTunnelToPaimon() { - RowType convert = RowTypeConverter.convert(seaTunnelRowType); + RowType convert = RowTypeConverter.reconvert(seaTunnelRowType); Assertions.assertEquals(convert, rowType); } } diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/pom.xml b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/pom.xml index 4af6e8436e8..69ea9a9f74f 100644 --- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/pom.xml +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/pom.xml @@ -30,16 +30,25 @@ org.apache.seatunnel connector-fake ${project.version} + test org.apache.seatunnel connector-paimon ${project.version} + test + + + org.apache.seatunnel + seatunnel-hadoop3-3.1.4-uber + optional + test org.apache.seatunnel connector-assert ${project.version} + test diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/java/org/apache/seatunnel/e2e/connector/paimon/PaimonSinkCDCIT.java b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/java/org/apache/seatunnel/e2e/connector/paimon/PaimonSinkCDCIT.java new file mode 100644 index 00000000000..a960f7d4d37 --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/java/org/apache/seatunnel/e2e/connector/paimon/PaimonSinkCDCIT.java @@ -0,0 +1,260 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.e2e.connector.paimon; + +import org.apache.seatunnel.common.utils.FileUtils; +import org.apache.seatunnel.e2e.common.TestResource; +import org.apache.seatunnel.e2e.common.TestSuiteBase; +import org.apache.seatunnel.e2e.common.container.ContainerExtendedFactory; +import org.apache.seatunnel.e2e.common.container.EngineType; +import org.apache.seatunnel.e2e.common.container.TestContainer; +import org.apache.seatunnel.e2e.common.junit.DisabledOnContainer; + +import org.apache.paimon.catalog.Catalog; +import org.apache.paimon.catalog.CatalogContext; +import org.apache.paimon.catalog.CatalogFactory; +import org.apache.paimon.catalog.Identifier; +import org.apache.paimon.data.InternalRow; +import org.apache.paimon.options.Options; +import org.apache.paimon.reader.RecordReader; +import org.apache.paimon.table.Table; +import org.apache.paimon.table.source.ReadBuilder; +import org.apache.paimon.table.source.TableRead; +import org.apache.paimon.table.source.TableScan; + +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.TestTemplate; +import org.testcontainers.containers.Container; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; +import lombok.extern.slf4j.Slf4j; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.awaitility.Awaitility.given; + +@DisabledOnContainer( + value = {}, + type = {EngineType.SPARK, EngineType.FLINK}, + disabledReason = + "Spark and Flink engine can not auto create paimon table on worker node in local file(e.g flink tm) by savemode feature which can lead error") +@Slf4j +public class PaimonSinkCDCIT extends TestSuiteBase implements TestResource { + private static final String CATALOG_ROOT_DIR = "/tmp/"; + private static final String NAMESPACE = "paimon"; + private static final String NAMESPACE_TAR = "paimon.tar.gz"; + private static final String CATALOG_DIR = CATALOG_ROOT_DIR + NAMESPACE + "/"; + private static final String TARGET_TABLE = "st_test"; + private static final String TARGET_DATABASE = "seatunnel_namespace"; + private static final String FAKE_TABLE1 = "FakeTable1"; + private static final String FAKE_DATABASE1 = "FakeDatabase1"; + private static final String FAKE_TABLE2 = "FakeTable1"; + private static final String FAKE_DATABASE2 = "FakeDatabase2"; + + @BeforeAll + @Override + public void startUp() throws Exception {} + + @AfterAll + @Override + public void tearDown() throws Exception {} + + @TestTemplate + public void testFakeCDCSinkPaimon(TestContainer container) throws Exception { + Container.ExecResult execResult = container.executeJob("/fake_cdc_sink_paimon_case1.conf"); + Assertions.assertEquals(0, execResult.getExitCode()); + + given().ignoreExceptions() + .await() + .atLeast(100L, TimeUnit.MILLISECONDS) + .atMost(30L, TimeUnit.SECONDS) + .untilAsserted( + () -> { + // copy paimon to local + container.executeExtraCommands(containerExtendedFactory); + List paimonRecords = + loadPaimonData(TARGET_DATABASE, TARGET_TABLE); + Assertions.assertEquals(2, paimonRecords.size()); + paimonRecords.forEach( + paimonRecord -> { + if (paimonRecord.getPkId() == 1) { + Assertions.assertEquals("A_1", paimonRecord.getName()); + } + if (paimonRecord.getPkId() == 3) { + Assertions.assertEquals("C", paimonRecord.getName()); + } + }); + }); + + cleanPaimonTable(container); + } + + @TestTemplate + public void testFakeMultipleTableSinkPaimon(TestContainer container) throws Exception { + Container.ExecResult execResult = container.executeJob("/fake_cdc_sink_paimon_case2.conf"); + Assertions.assertEquals(0, execResult.getExitCode()); + + given().ignoreExceptions() + .await() + .atLeast(100L, TimeUnit.MILLISECONDS) + .atMost(30L, TimeUnit.SECONDS) + .untilAsserted( + () -> { + // copy paimon to local + container.executeExtraCommands(containerExtendedFactory); + // Check FakeDatabase1.FakeTable1 + List fake1PaimonRecords = + loadPaimonData(FAKE_DATABASE1, FAKE_TABLE1); + Assertions.assertEquals(2, fake1PaimonRecords.size()); + fake1PaimonRecords.forEach( + paimonRecord -> { + if (paimonRecord.getPkId() == 1) { + Assertions.assertEquals("A_1", paimonRecord.getName()); + } + if (paimonRecord.getPkId() == 3) { + Assertions.assertEquals("C", paimonRecord.getName()); + } + }); + // Check FakeDatabase2.FakeTable1 + List fake2PaimonRecords = + loadPaimonData(FAKE_DATABASE2, FAKE_TABLE2); + Assertions.assertEquals(2, fake2PaimonRecords.size()); + fake2PaimonRecords.forEach( + paimonRecord -> { + if (paimonRecord.getPkId() == 100) { + Assertions.assertEquals( + "A_100", paimonRecord.getName()); + } + if (paimonRecord.getPkId() == 200) { + Assertions.assertEquals("C", paimonRecord.getName()); + } + }); + }); + + cleanPaimonTable(container); + } + + protected final ContainerExtendedFactory cleanContainerExtendedFactory = + genericContainer -> + genericContainer.execInContainer("sh", "-c", "rm -rf " + CATALOG_DIR + "**"); + + private void cleanPaimonTable(TestContainer container) + throws IOException, InterruptedException { + // clean table + container.executeExtraCommands(cleanContainerExtendedFactory); + } + + protected final ContainerExtendedFactory containerExtendedFactory = + container -> { + FileUtils.deleteFile(CATALOG_ROOT_DIR + NAMESPACE_TAR); + FileUtils.createNewDir(CATALOG_DIR); + container.execInContainer( + "sh", + "-c", + "cd " + + CATALOG_ROOT_DIR + + " && tar -czvf " + + NAMESPACE_TAR + + " " + + NAMESPACE); + container.copyFileFromContainer( + CATALOG_ROOT_DIR + NAMESPACE_TAR, CATALOG_ROOT_DIR + NAMESPACE_TAR); + extractFiles(); + }; + + private void extractFiles() { + ProcessBuilder processBuilder = new ProcessBuilder(); + processBuilder.command( + "sh", "-c", "cd " + CATALOG_ROOT_DIR + " && tar -zxvf " + NAMESPACE_TAR); + try { + Process process = processBuilder.start(); + // wait command completed + int exitCode = process.waitFor(); + if (exitCode == 0) { + log.info("Extract files successful."); + } else { + log.error("Extract files failed with exit code " + exitCode); + } + } catch (IOException | InterruptedException e) { + e.printStackTrace(); + } + } + + private List loadPaimonData(String dbName, String tbName) throws Exception { + Table table = getTable(dbName, tbName); + ReadBuilder readBuilder = table.newReadBuilder(); + TableScan.Plan plan = readBuilder.newScan().plan(); + TableRead tableRead = readBuilder.newRead(); + List result = new ArrayList<>(); + log.info( + "====================================Paimon data==========================================="); + log.info( + "=========================================================================================="); + log.info( + "=========================================================================================="); + try (RecordReader reader = tableRead.createReader(plan)) { + reader.forEachRemaining( + row -> { + result.add(new PaimonRecord(row.getLong(0), row.getString(1).toString())); + log.info("key_id:" + row.getLong(0) + ", name:" + row.getString(1)); + }); + } + log.info( + "=========================================================================================="); + log.info( + "=========================================================================================="); + log.info( + "=========================================================================================="); + return result; + } + + private Table getTable(String dbName, String tbName) { + try { + return getCatalog().getTable(getIdentifier(dbName, tbName)); + } catch (Catalog.TableNotExistException e) { + // do something + throw new RuntimeException("table not exist"); + } + } + + private Identifier getIdentifier(String dbName, String tbName) { + return Identifier.create(dbName, tbName); + } + + private Catalog getCatalog() { + Options options = new Options(); + options.set("warehouse", "file://" + CATALOG_DIR); + Catalog catalog = CatalogFactory.createCatalog(CatalogContext.create(options)); + return catalog; + } + + @Data + @NoArgsConstructor + @AllArgsConstructor + public class PaimonRecord { + private Long pkId; + private String name; + } +} diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case1.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case1.conf new file mode 100644 index 00000000000..59e3a0cf727 --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case1.conf @@ -0,0 +1,86 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +###### +###### This config file is a demonstration of streaming processing in seatunnel config +###### + +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + FakeSource { + schema = { + fields { + pk_id = bigint + name = string + score = int + } + primaryKey { + name = "pk_id" + columnNames = [pk_id] + } + } + rows = [ + { + kind = INSERT + fields = [1, "A", 100] + }, + { + kind = INSERT + fields = [2, "B", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + } + { + kind = UPDATE_BEFORE + fields = [1, "A", 100] + }, + { + kind = UPDATE_AFTER + fields = [1, "A_1", 100] + }, + { + kind = DELETE + fields = [2, "B", 100] + } + ] + } +} + +sink { + Paimon { + warehouse = "file:///tmp/paimon" + database = "seatunnel_namespace" + table = "st_test" + } +} diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case2.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case2.conf new file mode 100644 index 00000000000..ddc92268710 --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case2.conf @@ -0,0 +1,142 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +###### +###### This config file is a demonstration of streaming processing in seatunnel config +###### + +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + FakeSource { + tables_configs = [ + { + schema = { + table = "FakeDatabase1.FakeTable1" + fields { + pk_id = bigint + name = string + score = int + } + primaryKey { + name = "pk_id" + columnNames = [pk_id] + } + } + rows = [ + { + kind = INSERT + fields = [1, "A", 100] + }, + { + kind = INSERT + fields = [2, "B", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + } + { + kind = UPDATE_BEFORE + fields = [1, "A", 100] + }, + { + kind = UPDATE_AFTER + fields = [1, "A_1", 100] + }, + { + kind = DELETE + fields = [2, "B", 100] + } + ] + }, + { + schema = { + table = "FakeDatabase2.FakeTable1" + fields { + pk_id = bigint + name = string + } + primaryKey { + name = "pk_id" + columnNames = [pk_id] + } + } + rows = [ + { + kind = INSERT + fields = [100, "A"] + }, + { + kind = INSERT + fields = [200, "B"] + }, + { + kind = INSERT + fields = [300, "C"] + }, + { + kind = INSERT + fields = [300, "C"] + }, + { + kind = INSERT + fields = [300, "C"] + }, + { + kind = INSERT + fields = [300, "C"] + } + { + kind = UPDATE_BEFORE + fields = [100, "A"] + }, + { + kind = UPDATE_AFTER + fields = [100, "A_100"] + }, + { + kind = DELETE + fields = [200, "B"] + } + ] + } + ] + } +} + +sink { + Paimon { + warehouse = "file:///tmp/paimon" + database = "${database_name}" + table = "${table_name}" + } +} diff --git a/seatunnel-shade/seatunnel-hadoop3-3.1.4-uber/pom.xml b/seatunnel-shade/seatunnel-hadoop3-3.1.4-uber/pom.xml index 00b55265c49..be5ced9214a 100644 --- a/seatunnel-shade/seatunnel-hadoop3-3.1.4-uber/pom.xml +++ b/seatunnel-shade/seatunnel-hadoop3-3.1.4-uber/pom.xml @@ -47,6 +47,11 @@ hadoop-client ${hadoop3.version} + + org.xerial.snappy + snappy-java + 1.1.10.4 + From eb46c489d965ef95f4c77917a11cf59a720a998e Mon Sep 17 00:00:00 2001 From: Marvin <29311598@qq.com> Date: Tue, 19 Mar 2024 17:42:31 +0800 Subject: [PATCH 20/59] [Bug] Fix minus constant error in SQLTransform (#6533) --- .../resources/sql_transform/func_numeric.conf | 9 +++++++- .../transform/sql/zeta/ZetaSQLFunction.java | 21 +++++++++++++++++++ .../transform/sql/zeta/ZetaSQLType.java | 4 ++++ 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/seatunnel-e2e/seatunnel-transforms-v2-e2e/seatunnel-transforms-v2-e2e-part-2/src/test/resources/sql_transform/func_numeric.conf b/seatunnel-e2e/seatunnel-transforms-v2-e2e/seatunnel-transforms-v2-e2e-part-2/src/test/resources/sql_transform/func_numeric.conf index e61faa803f3..87c25f6a479 100644 --- a/seatunnel-e2e/seatunnel-transforms-v2-e2e/seatunnel-transforms-v2-e2e-part-2/src/test/resources/sql_transform/func_numeric.conf +++ b/seatunnel-e2e/seatunnel-transforms-v2-e2e/seatunnel-transforms-v2-e2e-part-2/src/test/resources/sql_transform/func_numeric.conf @@ -53,7 +53,7 @@ transform { Sql { source_table_name = "fake" result_table_name = "fake1" - query = "select abs(c1) as c1_1, acos(id) as id1, asin(c2) as c2_1, atan(c2) as c2_2, cos(c2) as c2_3, cosh(c2) as c2_4, sin(c2) as c2_5, sinh(c2) as c2_6, tan(c3/4) as c3_1, tanh(c2) as c2_7, mod(c4, 5) as c4_1, mod(c4, 5.4) as c4_2, ceil(c5) as c5_1, exp(c10) as c10_1, floor(c5) as c5_2, ln(c5) as c5_3, log(10,c5) as c5_4, log10(c6) as c6_1, radians(c7) as c7_1, sqrt(c8) as c8_1, pi() as pi, power(c5,2) as c5_5, rand() as rand, round(c9,2) as c9_1, sign(c1) as c1_2, trunc(c9,2) as c9_2 from fake" + query = "select abs(-10.3) as c0_1, abs(c1) as c1_1, acos(id) as id1, asin(c2) as c2_1, atan(c2) as c2_2, cos(c2) as c2_3, cosh(c2) as c2_4, sin(c2) as c2_5, sinh(c2) as c2_6, tan(c3/4) as c3_1, tanh(c2) as c2_7, mod(c4, 5) as c4_1, mod(c4, 5.4) as c4_2, ceil(c5) as c5_1, exp(c10) as c10_1, floor(c5) as c5_2, ln(c5) as c5_3, log(10,c5) as c5_4, log10(c6) as c6_1, radians(c7) as c7_1, sqrt(c8) as c8_1, pi() as pi, power(c5,2) as c5_5, rand() as rand, round(c9,2) as c9_1, sign(c1) as c1_2, trunc(c9,2) as c9_2 from fake" } } @@ -62,6 +62,13 @@ sink { source_table_name = "fake1" rules = { field_rules = [ + { + field_name = "c0_1" + field_type = "double" + field_value = [ + {equals_to = 10.3} + ] + }, { field_name = "c1_1" field_type = "double" diff --git a/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/zeta/ZetaSQLFunction.java b/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/zeta/ZetaSQLFunction.java index 30794af42f4..7a8b83d4dbd 100644 --- a/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/zeta/ZetaSQLFunction.java +++ b/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/zeta/ZetaSQLFunction.java @@ -42,6 +42,7 @@ import net.sf.jsqlparser.expression.LongValue; import net.sf.jsqlparser.expression.NullValue; import net.sf.jsqlparser.expression.Parenthesis; +import net.sf.jsqlparser.expression.SignedExpression; import net.sf.jsqlparser.expression.StringValue; import net.sf.jsqlparser.expression.TimeKeyExpression; import net.sf.jsqlparser.expression.WhenClause; @@ -187,6 +188,26 @@ public Object computeForValue(Expression expression, Object[] inputFields) { if (expression instanceof NullValue) { return null; } + if (expression instanceof SignedExpression) { + SignedExpression signedExpression = (SignedExpression) expression; + if (signedExpression.getSign() == '-') { + Object value = computeForValue(signedExpression.getExpression(), inputFields); + if (value instanceof Integer) { + return -((Integer) value); + } + if (value instanceof Long) { + return -((Long) value); + } + if (value instanceof Double) { + return -((Double) value); + } + if (value instanceof Number) { + return -((Number) value).doubleValue(); + } + } else { + return computeForValue(signedExpression, inputFields); + } + } if (expression instanceof DoubleValue) { return ((DoubleValue) expression).getValue(); } diff --git a/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/zeta/ZetaSQLType.java b/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/zeta/ZetaSQLType.java index 635ce3274f2..4529dfffcc7 100644 --- a/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/zeta/ZetaSQLType.java +++ b/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/zeta/ZetaSQLType.java @@ -39,6 +39,7 @@ import net.sf.jsqlparser.expression.LongValue; import net.sf.jsqlparser.expression.NullValue; import net.sf.jsqlparser.expression.Parenthesis; +import net.sf.jsqlparser.expression.SignedExpression; import net.sf.jsqlparser.expression.StringValue; import net.sf.jsqlparser.expression.TimeKeyExpression; import net.sf.jsqlparser.expression.WhenClause; @@ -88,6 +89,9 @@ public SeaTunnelDataType getExpressionType(Expression expression) { if (expression instanceof NullValue) { return BasicType.VOID_TYPE; } + if (expression instanceof SignedExpression) { + return getExpressionType(((SignedExpression) expression).getExpression()); + } if (expression instanceof DoubleValue) { return BasicType.DOUBLE_TYPE; } From b0d70ce2240ffca4b8b8a3ee3cdc9cdb4b2ef23a Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Wed, 20 Mar 2024 12:08:28 +0800 Subject: [PATCH 21/59] [Improve] Add SaveMode log of process detail (#6375) --- .../api/sink/DefaultSaveModeHandler.java | 83 +++- .../api/sink/SaveModeExecuteWrapper.java | 40 ++ .../seatunnel/api/sink/SaveModeHandler.java | 11 + .../seatunnel/api/table/catalog/Catalog.java | 13 + .../api/table/catalog/InfoPreviewResult.java | 36 ++ .../api/table/catalog/PreviewResult.java | 38 ++ .../api/table/catalog/SQLPreviewResult.java | 37 ++ .../doris/catalog/DorisCatalog.java | 31 +- .../doris/util/DorisCatalogUtil.java | 4 + .../doris/catalog/PreviewActionTest.java | 119 +++++ .../catalog/ElasticSearchCatalog.java | 21 + .../catalog/PreviewActionTest.java | 96 ++++ .../iceberg/catalog/IcebergCatalog.java | 28 +- .../iceberg/catalog/PreviewActionTest.java | 111 +++++ .../jdbc/catalog/AbstractJdbcCatalog.java | 21 + .../jdbc/catalog/psql/PostgresCatalog.java | 7 + .../jdbc/catalog/PreviewActionTest.java | 441 ++++++++++++++++++ .../starrocks/catalog/StarRocksCatalog.java | 66 ++- .../starrocks/sink/StarRocksSaveModeUtil.java | 31 +- .../starrocks/catalog/PreviewActionTest.java | 126 +++++ .../catalog/StarRocksCreateTableTest.java | 6 +- .../flink/execution/SinkExecuteProcessor.java | 3 +- .../flink/execution/SinkExecuteProcessor.java | 3 +- .../spark/execution/SinkExecuteProcessor.java | 3 +- .../spark/execution/SinkExecuteProcessor.java | 3 +- .../parse/MultipleTableJobConfigParser.java | 3 +- 26 files changed, 1331 insertions(+), 50 deletions(-) create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/SaveModeExecuteWrapper.java create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/InfoPreviewResult.java create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/PreviewResult.java create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/SQLPreviewResult.java create mode 100644 seatunnel-connectors-v2/connector-doris/src/test/java/org/apache/seatunnel/connectors/doris/catalog/PreviewActionTest.java create mode 100644 seatunnel-connectors-v2/connector-elasticsearch/src/test/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/PreviewActionTest.java create mode 100644 seatunnel-connectors-v2/connector-iceberg/src/test/java/org/apache/seatunnel/connectors/seatunnel/iceberg/catalog/PreviewActionTest.java create mode 100644 seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/PreviewActionTest.java create mode 100644 seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/PreviewActionTest.java diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/DefaultSaveModeHandler.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/DefaultSaveModeHandler.java index 95666589796..bbbe99281b2 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/DefaultSaveModeHandler.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/DefaultSaveModeHandler.java @@ -23,19 +23,26 @@ import org.apache.seatunnel.common.exception.SeaTunnelRuntimeException; import lombok.AllArgsConstructor; +import lombok.extern.slf4j.Slf4j; + +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + +import java.util.Optional; import static org.apache.seatunnel.api.common.SeaTunnelAPIErrorCode.SINK_TABLE_NOT_EXIST; import static org.apache.seatunnel.api.common.SeaTunnelAPIErrorCode.SOURCE_ALREADY_HAS_DATA; @AllArgsConstructor +@Slf4j public class DefaultSaveModeHandler implements SaveModeHandler { - public SchemaSaveMode schemaSaveMode; - public DataSaveMode dataSaveMode; - public Catalog catalog; - public TablePath tablePath; - public CatalogTable catalogTable; - public String customSql; + @Nonnull public SchemaSaveMode schemaSaveMode; + @Nonnull public DataSaveMode dataSaveMode; + @Nonnull public Catalog catalog; + @Nonnull public TablePath tablePath; + @Nullable public CatalogTable catalogTable; + @Nullable public String customSql; public DefaultSaveModeHandler( SchemaSaveMode schemaSaveMode, @@ -132,17 +139,58 @@ protected boolean tableExists() { } protected void dropTable() { + try { + log.info( + "Dropping table {} with action {}", + tablePath, + catalog.previewAction( + Catalog.ActionType.DROP_TABLE, tablePath, Optional.empty())); + } catch (UnsupportedOperationException ignore) { + log.info("Dropping table {}", tablePath); + } catalog.dropTable(tablePath, true); } protected void createTable() { if (!catalog.databaseExists(tablePath.getDatabaseName())) { - catalog.createDatabase(TablePath.of(tablePath.getDatabaseName(), ""), true); + TablePath databasePath = TablePath.of(tablePath.getDatabaseName(), ""); + try { + log.info( + "Creating database {} with action {}", + tablePath.getDatabaseName(), + catalog.previewAction( + Catalog.ActionType.CREATE_DATABASE, + databasePath, + Optional.empty())); + } catch (UnsupportedOperationException ignore) { + log.info("Creating database {}", tablePath.getDatabaseName()); + } + catalog.createDatabase(databasePath, true); + } + try { + log.info( + "Creating table {} with action {}", + tablePath, + catalog.previewAction( + Catalog.ActionType.CREATE_TABLE, + tablePath, + Optional.ofNullable(catalogTable))); + } catch (UnsupportedOperationException ignore) { + log.info("Creating table {}", tablePath); } catalog.createTable(tablePath, catalogTable, true); } protected void truncateTable() { + try { + log.info( + "Truncating table {} with action {}", + tablePath, + catalog.previewAction( + Catalog.ActionType.TRUNCATE_TABLE, tablePath, Optional.empty())); + } catch (UnsupportedOperationException ignore) { + log.info("Truncating table {}", tablePath); + } catalog.truncateTable(tablePath, true); } @@ -151,9 +199,30 @@ protected boolean dataExists() { } protected void executeCustomSql() { + log.info("Executing custom SQL for table {} with SQL: {}", tablePath, customSql); catalog.executeSql(tablePath, customSql); } + @Override + public TablePath getHandleTablePath() { + return tablePath; + } + + @Override + public Catalog getHandleCatalog() { + return catalog; + } + + @Override + public SchemaSaveMode getSchemaSaveMode() { + return schemaSaveMode; + } + + @Override + public DataSaveMode getDataSaveMode() { + return dataSaveMode; + } + @Override public void close() throws Exception { catalog.close(); diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/SaveModeExecuteWrapper.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/SaveModeExecuteWrapper.java new file mode 100644 index 00000000000..5da173a9a9a --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/SaveModeExecuteWrapper.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.sink; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class SaveModeExecuteWrapper { + + public SaveModeExecuteWrapper(SaveModeHandler handler) { + this.handler = handler; + } + + public void execute() { + log.info( + "Executing save mode for table: {}, with SchemaSaveMode: {}, DataSaveMode: {} using Catalog: {}", + handler.getHandleTablePath(), + handler.getSchemaSaveMode(), + handler.getDataSaveMode(), + handler.getHandleCatalog().name()); + handler.handleSaveMode(); + } + + private final SaveModeHandler handler; +} diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/SaveModeHandler.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/SaveModeHandler.java index 6fe3d8b3c26..e75c2215dda 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/SaveModeHandler.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/SaveModeHandler.java @@ -17,12 +17,23 @@ package org.apache.seatunnel.api.sink; +import org.apache.seatunnel.api.table.catalog.Catalog; +import org.apache.seatunnel.api.table.catalog.TablePath; + public interface SaveModeHandler extends AutoCloseable { void handleSchemaSaveMode(); void handleDataSaveMode(); + SchemaSaveMode getSchemaSaveMode(); + + DataSaveMode getDataSaveMode(); + + TablePath getHandleTablePath(); + + Catalog getHandleCatalog(); + default void handleSaveMode() { handleSchemaSaveMode(); handleDataSaveMode(); diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/Catalog.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/Catalog.java index 560fa98d3bf..842ec98782d 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/Catalog.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/Catalog.java @@ -262,6 +262,19 @@ default boolean isExistsData(TablePath tablePath) { default void executeSql(TablePath tablePath, String sql) {} + default PreviewResult previewAction( + ActionType actionType, TablePath tablePath, Optional catalogTable) { + throw new UnsupportedOperationException("Preview action is not supported"); + } + + enum ActionType { + CREATE_TABLE, + CREATE_DATABASE, + DROP_TABLE, + DROP_DATABASE, + TRUNCATE_TABLE + } + // todo: Support for update table metadata } diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/InfoPreviewResult.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/InfoPreviewResult.java new file mode 100644 index 00000000000..9c6e06fce9c --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/InfoPreviewResult.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.table.catalog; + +public class InfoPreviewResult extends PreviewResult { + private final String info; + + public String getInfo() { + return info; + } + + public InfoPreviewResult(String info) { + super(Type.INFO); + this.info = info; + } + + @Override + public String toString() { + return info; + } +} diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/PreviewResult.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/PreviewResult.java new file mode 100644 index 00000000000..ad4b3cc1021 --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/PreviewResult.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.table.catalog; + +/** The result of a SQL preview action in {@link Catalog#previewAction}. */ +public abstract class PreviewResult { + + private final Type type; + + public PreviewResult(Type type) { + this.type = type; + } + + public Type getType() { + return type; + } + + public enum Type { + SQL, + INFO, + OTHER + } +} diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/SQLPreviewResult.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/SQLPreviewResult.java new file mode 100644 index 00000000000..6cdcc33fc25 --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/SQLPreviewResult.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.table.catalog; + +public class SQLPreviewResult extends PreviewResult { + + private final String sql; + + public String getSql() { + return sql; + } + + public SQLPreviewResult(String sql) { + super(Type.SQL); + this.sql = sql; + } + + @Override + public String toString() { + return sql; + } +} diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/catalog/DorisCatalog.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/catalog/DorisCatalog.java index 0e5faef550e..1816585731f 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/catalog/DorisCatalog.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/catalog/DorisCatalog.java @@ -20,7 +20,9 @@ import org.apache.seatunnel.api.table.catalog.Catalog; import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.catalog.PreviewResult; import org.apache.seatunnel.api.table.catalog.PrimaryKey; +import org.apache.seatunnel.api.table.catalog.SQLPreviewResult; import org.apache.seatunnel.api.table.catalog.TableIdentifier; import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.api.table.catalog.TableSchema; @@ -49,6 +51,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; + +import static com.google.common.base.Preconditions.checkArgument; public class DorisCatalog implements Catalog { @@ -339,8 +344,7 @@ public void truncateTable(TablePath tablePath, boolean ignoreIfNotExists) throws TableNotExistException, CatalogException { try { if (ignoreIfNotExists) { - conn.createStatement() - .execute(String.format("TRUNCATE TABLE %s", tablePath.getFullName())); + conn.createStatement().execute(DorisCatalogUtil.getTruncateTableQuery(tablePath)); } } catch (Exception e) { throw new CatalogException( @@ -359,4 +363,27 @@ public boolean isExistsData(TablePath tablePath) { throw new CatalogException(String.format("Failed executeSql error %s", sql), e); } } + + @Override + public PreviewResult previewAction( + ActionType actionType, TablePath tablePath, Optional catalogTable) { + if (actionType == ActionType.CREATE_TABLE) { + checkArgument(catalogTable.isPresent(), "CatalogTable cannot be null"); + return new SQLPreviewResult( + DorisCatalogUtil.getCreateTableStatement( + dorisConfig.getCreateTableTemplate(), tablePath, catalogTable.get())); + } else if (actionType == ActionType.DROP_TABLE) { + return new SQLPreviewResult(DorisCatalogUtil.getDropTableQuery(tablePath, true)); + } else if (actionType == ActionType.TRUNCATE_TABLE) { + return new SQLPreviewResult(DorisCatalogUtil.getTruncateTableQuery(tablePath)); + } else if (actionType == ActionType.CREATE_DATABASE) { + return new SQLPreviewResult( + DorisCatalogUtil.getCreateDatabaseQuery(tablePath.getDatabaseName(), true)); + } else if (actionType == ActionType.DROP_DATABASE) { + return new SQLPreviewResult( + DorisCatalogUtil.getDropDatabaseQuery(tablePath.getDatabaseName(), true)); + } else { + throw new UnsupportedOperationException("Unsupported action type: " + actionType); + } + } } diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/util/DorisCatalogUtil.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/util/DorisCatalogUtil.java index 133bcffd33c..f03d488de9e 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/util/DorisCatalogUtil.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/util/DorisCatalogUtil.java @@ -95,6 +95,10 @@ public static String getDropTableQuery(TablePath tablePath, boolean ignoreIfNotE return "DROP TABLE " + (ignoreIfNotExists ? "IF EXISTS " : "") + tablePath.getFullName(); } + public static String getTruncateTableQuery(TablePath tablePath) { + return "TRUNCATE TABLE " + tablePath.getFullName(); + } + /** * @param createTableTemplate create table template * @param catalogTable catalog table diff --git a/seatunnel-connectors-v2/connector-doris/src/test/java/org/apache/seatunnel/connectors/doris/catalog/PreviewActionTest.java b/seatunnel-connectors-v2/connector-doris/src/test/java/org/apache/seatunnel/connectors/doris/catalog/PreviewActionTest.java new file mode 100644 index 00000000000..ffca59c5e18 --- /dev/null +++ b/seatunnel-connectors-v2/connector-doris/src/test/java/org/apache/seatunnel/connectors/doris/catalog/PreviewActionTest.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.doris.catalog; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.table.catalog.Catalog; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.catalog.PreviewResult; +import org.apache.seatunnel.api.table.catalog.SQLPreviewResult; +import org.apache.seatunnel.api.table.catalog.TableIdentifier; +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.api.table.catalog.TableSchema; +import org.apache.seatunnel.api.table.type.BasicType; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Optional; + +public class PreviewActionTest { + + private static final CatalogTable CATALOG_TABLE = + CatalogTable.of( + TableIdentifier.of("catalog", "database", "table"), + TableSchema.builder() + .column( + PhysicalColumn.of( + "test", + BasicType.STRING_TYPE, + (Long) null, + true, + null, + "")) + .build(), + Collections.emptyMap(), + Collections.emptyList(), + "comment"); + + @Test + public void testDorisPreviewAction() { + DorisCatalogFactory factory = new DorisCatalogFactory(); + Catalog catalog = + factory.createCatalog( + "test", + ReadonlyConfig.fromMap( + new HashMap() { + { + put("fenodes", "localhost:9300"); + put("username", "root"); + put("password", "root"); + } + })); + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_DATABASE, + "CREATE DATABASE IF NOT EXISTS testddatabase", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_DATABASE, + "DROP DATABASE IF EXISTS testddatabase", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.TRUNCATE_TABLE, + "TRUNCATE TABLE testddatabase.testtable", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_TABLE, + "DROP TABLE IF EXISTS testddatabase.testtable", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_TABLE, + "CREATE TABLE IF NOT EXISTS `testddatabase`.`testtable` (\n" + + "`test` STRING NULL \n" + + ") ENGINE=OLAP\n" + + " UNIQUE KEY ()\n" + + "DISTRIBUTED BY HASH ()\n" + + " PROPERTIES (\n" + + "\"replication_allocation\" = \"tag.location.default: 1\",\n" + + "\"in_memory\" = \"false\",\n" + + "\"storage_format\" = \"V2\",\n" + + "\"disable_auto_compaction\" = \"false\"\n" + + ")", + Optional.of(CATALOG_TABLE)); + } + + private void assertPreviewResult( + Catalog catalog, + Catalog.ActionType actionType, + String expectedSql, + Optional catalogTable) { + PreviewResult previewResult = + catalog.previewAction( + actionType, TablePath.of("testddatabase.testtable"), catalogTable); + Assertions.assertInstanceOf(SQLPreviewResult.class, previewResult); + Assertions.assertEquals(expectedSql, ((SQLPreviewResult) previewResult).getSql()); + } +} diff --git a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/ElasticSearchCatalog.java b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/ElasticSearchCatalog.java index 23265fa182f..066a69c2dc3 100644 --- a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/ElasticSearchCatalog.java +++ b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/ElasticSearchCatalog.java @@ -21,7 +21,9 @@ import org.apache.seatunnel.api.configuration.util.ConfigUtil; import org.apache.seatunnel.api.table.catalog.Catalog; import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.InfoPreviewResult; import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.catalog.PreviewResult; import org.apache.seatunnel.api.table.catalog.TableIdentifier; import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.api.table.catalog.TableSchema; @@ -44,6 +46,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import static com.google.common.base.Preconditions.checkNotNull; @@ -232,4 +235,22 @@ private Map buildTableOptions(TablePath tablePath) { options.put("config", ConfigUtil.convertToJsonString(tablePath)); return options; } + + @Override + public PreviewResult previewAction( + ActionType actionType, TablePath tablePath, Optional catalogTable) { + if (actionType == ActionType.CREATE_TABLE) { + return new InfoPreviewResult("create index " + tablePath.getTableName()); + } else if (actionType == ActionType.DROP_TABLE) { + return new InfoPreviewResult("delete index " + tablePath.getTableName()); + } else if (actionType == ActionType.TRUNCATE_TABLE) { + return new InfoPreviewResult("delete and create index " + tablePath.getTableName()); + } else if (actionType == ActionType.CREATE_DATABASE) { + return new InfoPreviewResult("create index " + tablePath.getTableName()); + } else if (actionType == ActionType.DROP_DATABASE) { + return new InfoPreviewResult("delete index " + tablePath.getTableName()); + } else { + throw new UnsupportedOperationException("Unsupported action type: " + actionType); + } + } } diff --git a/seatunnel-connectors-v2/connector-elasticsearch/src/test/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/PreviewActionTest.java b/seatunnel-connectors-v2/connector-elasticsearch/src/test/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/PreviewActionTest.java new file mode 100644 index 00000000000..a81ce8f19aa --- /dev/null +++ b/seatunnel-connectors-v2/connector-elasticsearch/src/test/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/PreviewActionTest.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.elasticsearch.catalog; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.table.catalog.Catalog; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.InfoPreviewResult; +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.catalog.PreviewResult; +import org.apache.seatunnel.api.table.catalog.TableIdentifier; +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.api.table.catalog.TableSchema; +import org.apache.seatunnel.api.table.type.BasicType; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Optional; + +public class PreviewActionTest { + + private static final CatalogTable CATALOG_TABLE = + CatalogTable.of( + TableIdentifier.of("catalog", "database", "table"), + TableSchema.builder() + .column( + PhysicalColumn.of( + "test", + BasicType.STRING_TYPE, + (Long) null, + true, + null, + "")) + .build(), + Collections.emptyMap(), + Collections.emptyList(), + "comment"); + + @Test + public void testElasticSearchPreviewAction() { + ElasticSearchCatalogFactory factory = new ElasticSearchCatalogFactory(); + Catalog catalog = factory.createCatalog("test", ReadonlyConfig.fromMap(new HashMap<>())); + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_DATABASE, + "create index testtable", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_DATABASE, + "delete index testtable", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.TRUNCATE_TABLE, + "delete and create index testtable", + Optional.empty()); + assertPreviewResult( + catalog, Catalog.ActionType.DROP_TABLE, "delete index testtable", Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_TABLE, + "create index testtable", + Optional.of(CATALOG_TABLE)); + } + + private void assertPreviewResult( + Catalog catalog, + Catalog.ActionType actionType, + String expectedSql, + Optional catalogTable) { + PreviewResult previewResult = + catalog.previewAction( + actionType, TablePath.of("testddatabase.testtable"), catalogTable); + Assertions.assertInstanceOf(InfoPreviewResult.class, previewResult); + Assertions.assertEquals(expectedSql, ((InfoPreviewResult) previewResult).getInfo()); + } +} diff --git a/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/catalog/IcebergCatalog.java b/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/catalog/IcebergCatalog.java index 32beff121ba..520f9bdbac9 100644 --- a/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/catalog/IcebergCatalog.java +++ b/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/catalog/IcebergCatalog.java @@ -20,7 +20,9 @@ import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.table.catalog.Catalog; import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.InfoPreviewResult; import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.catalog.PreviewResult; import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.api.table.catalog.TableSchema; import org.apache.seatunnel.api.table.catalog.exception.CatalogException; @@ -29,13 +31,11 @@ import org.apache.seatunnel.api.table.catalog.exception.TableAlreadyExistException; import org.apache.seatunnel.api.table.catalog.exception.TableNotExistException; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.connectors.seatunnel.iceberg.IcebergCatalogLoader; import org.apache.seatunnel.connectors.seatunnel.iceberg.config.CommonConfig; import org.apache.seatunnel.connectors.seatunnel.iceberg.utils.SchemaUtils; import org.apache.iceberg.PartitionField; -import org.apache.iceberg.Schema; import org.apache.iceberg.Snapshot; import org.apache.iceberg.Table; import org.apache.iceberg.catalog.Namespace; @@ -49,8 +49,10 @@ import java.io.Closeable; import java.io.IOException; import java.util.List; +import java.util.Optional; import java.util.stream.Collectors; +import static com.google.common.base.Preconditions.checkArgument; import static org.apache.seatunnel.connectors.seatunnel.iceberg.utils.SchemaUtils.toIcebergTableIdentifier; import static org.apache.seatunnel.connectors.seatunnel.iceberg.utils.SchemaUtils.toTablePath; @@ -254,10 +256,22 @@ public CatalogTable toCatalogTable(Table icebergTable, TablePath tablePath) { catalogName); } - public Schema toIcebergSchema(TableSchema tableSchema) { - // Generate struct type - SeaTunnelRowType rowType = tableSchema.toPhysicalRowDataType(); - Types.StructType structType = SchemaUtils.toIcebergType(rowType).asStructType(); - return new Schema(structType.fields()); + @Override + public PreviewResult previewAction( + ActionType actionType, TablePath tablePath, Optional catalogTable) { + if (actionType == ActionType.CREATE_TABLE) { + checkArgument(catalogTable.isPresent(), "CatalogTable cannot be null"); + return new InfoPreviewResult("create table " + toIcebergTableIdentifier(tablePath)); + } else if (actionType == ActionType.DROP_TABLE) { + return new InfoPreviewResult("drop table " + toIcebergTableIdentifier(tablePath)); + } else if (actionType == ActionType.TRUNCATE_TABLE) { + return new InfoPreviewResult("truncate table " + toIcebergTableIdentifier(tablePath)); + } else if (actionType == ActionType.CREATE_DATABASE) { + return new InfoPreviewResult("do nothing"); + } else if (actionType == ActionType.DROP_DATABASE) { + return new InfoPreviewResult("do nothing"); + } else { + throw new UnsupportedOperationException("Unsupported action type: " + actionType); + } } } diff --git a/seatunnel-connectors-v2/connector-iceberg/src/test/java/org/apache/seatunnel/connectors/seatunnel/iceberg/catalog/PreviewActionTest.java b/seatunnel-connectors-v2/connector-iceberg/src/test/java/org/apache/seatunnel/connectors/seatunnel/iceberg/catalog/PreviewActionTest.java new file mode 100644 index 00000000000..ca88c39a0c2 --- /dev/null +++ b/seatunnel-connectors-v2/connector-iceberg/src/test/java/org/apache/seatunnel/connectors/seatunnel/iceberg/catalog/PreviewActionTest.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.iceberg.catalog; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.table.catalog.Catalog; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.InfoPreviewResult; +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.catalog.PreviewResult; +import org.apache.seatunnel.api.table.catalog.TableIdentifier; +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.api.table.catalog.TableSchema; +import org.apache.seatunnel.api.table.type.BasicType; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Optional; + +public class PreviewActionTest { + + private static final CatalogTable CATALOG_TABLE = + CatalogTable.of( + TableIdentifier.of("catalog", "database", "table"), + TableSchema.builder() + .column( + PhysicalColumn.of( + "test", + BasicType.STRING_TYPE, + (Long) null, + true, + null, + "")) + .build(), + Collections.emptyMap(), + Collections.emptyList(), + "comment"); + + @Test + public void testElasticSearchPreviewAction() { + IcebergCatalogFactory factory = new IcebergCatalogFactory(); + Catalog catalog = + factory.createCatalog( + "test", + ReadonlyConfig.fromMap( + new HashMap() { + { + put("catalog_name", "seatunnel_test"); + put( + "iceberg.catalog.config", + new HashMap() { + { + put("type", "hadoop"); + put( + "warehouse", + "file:///tmp/seatunnel/iceberg/hadoop-sink/"); + } + }); + } + })); + assertPreviewResult( + catalog, Catalog.ActionType.CREATE_DATABASE, "do nothing", Optional.empty()); + assertPreviewResult( + catalog, Catalog.ActionType.DROP_DATABASE, "do nothing", Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.TRUNCATE_TABLE, + "truncate table testddatabase.testtable", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_TABLE, + "drop table testddatabase.testtable", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_TABLE, + "create table testddatabase.testtable", + Optional.of(CATALOG_TABLE)); + } + + private void assertPreviewResult( + Catalog catalog, + Catalog.ActionType actionType, + String expectedSql, + Optional catalogTable) { + PreviewResult previewResult = + catalog.previewAction( + actionType, TablePath.of("testddatabase.testtable"), catalogTable); + Assertions.assertInstanceOf(InfoPreviewResult.class, previewResult); + Assertions.assertEquals(expectedSql, ((InfoPreviewResult) previewResult).getInfo()); + } +} diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/AbstractJdbcCatalog.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/AbstractJdbcCatalog.java index f695cc30cb1..3775f9f47d4 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/AbstractJdbcCatalog.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/AbstractJdbcCatalog.java @@ -22,7 +22,9 @@ import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.Column; import org.apache.seatunnel.api.table.catalog.ConstraintKey; +import org.apache.seatunnel.api.table.catalog.PreviewResult; import org.apache.seatunnel.api.table.catalog.PrimaryKey; +import org.apache.seatunnel.api.table.catalog.SQLPreviewResult; import org.apache.seatunnel.api.table.catalog.TableIdentifier; import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.api.table.catalog.TableSchema; @@ -577,4 +579,23 @@ public boolean isExistsData(TablePath tablePath) { throw new CatalogException(String.format("Failed executeSql error %s", sql), e); } } + + @Override + public PreviewResult previewAction( + ActionType actionType, TablePath tablePath, Optional catalogTable) { + if (actionType == ActionType.CREATE_TABLE) { + checkArgument(catalogTable.isPresent(), "CatalogTable cannot be null"); + return new SQLPreviewResult(getCreateTableSql(tablePath, catalogTable.get())); + } else if (actionType == ActionType.DROP_TABLE) { + return new SQLPreviewResult(getDropTableSql(tablePath)); + } else if (actionType == ActionType.TRUNCATE_TABLE) { + return new SQLPreviewResult(getTruncateTableSql(tablePath)); + } else if (actionType == ActionType.CREATE_DATABASE) { + return new SQLPreviewResult(getCreateDatabaseSql(tablePath.getDatabaseName())); + } else if (actionType == ActionType.DROP_DATABASE) { + return new SQLPreviewResult(getDropDatabaseSql(tablePath.getDatabaseName())); + } else { + throw new UnsupportedOperationException("Unsupported action type: " + actionType); + } + } } diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/psql/PostgresCatalog.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/psql/PostgresCatalog.java index 30e140c68a2..4697d1999ef 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/psql/PostgresCatalog.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/psql/PostgresCatalog.java @@ -186,6 +186,13 @@ protected void createTableInternal(TablePath tablePath, CatalogTable table) } } + @Override + protected String getCreateTableSql(TablePath tablePath, CatalogTable table) { + PostgresCreateTableSqlBuilder postgresCreateTableSqlBuilder = + new PostgresCreateTableSqlBuilder(table); + return postgresCreateTableSqlBuilder.build(tablePath); + } + @Override protected String getDropTableSql(TablePath tablePath) { return "DROP TABLE \"" diff --git a/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/PreviewActionTest.java b/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/PreviewActionTest.java new file mode 100644 index 00000000000..a0cdf7d8a83 --- /dev/null +++ b/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/PreviewActionTest.java @@ -0,0 +1,441 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.jdbc.catalog; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.table.catalog.Catalog; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.catalog.PreviewResult; +import org.apache.seatunnel.api.table.catalog.SQLPreviewResult; +import org.apache.seatunnel.api.table.catalog.TableIdentifier; +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.api.table.catalog.TableSchema; +import org.apache.seatunnel.api.table.type.BasicType; +import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.dm.DamengCatalogFactory; +import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.mysql.MySqlCatalogFactory; +import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.oceanbase.OceanBaseCatalogFactory; +import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.oracle.OracleCatalogFactory; +import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.psql.PostgresCatalogFactory; +import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.sqlserver.SqlServerCatalogFactory; +import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.tidb.TiDBCatalogFactory; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Optional; + +public class PreviewActionTest { + + private static final CatalogTable CATALOG_TABLE = + CatalogTable.of( + TableIdentifier.of("catalog", "database", "table"), + TableSchema.builder() + .column( + PhysicalColumn.of( + "test", + BasicType.STRING_TYPE, + (Long) null, + true, + null, + "")) + .build(), + Collections.emptyMap(), + Collections.emptyList(), + "comment"); + + @Test + public void testMySQLPreviewAction() { + MySqlCatalogFactory factory = new MySqlCatalogFactory(); + Catalog catalog = + factory.createCatalog( + "test", + ReadonlyConfig.fromMap( + new HashMap() { + { + put("base-url", "jdbc:mysql://localhost:3306/test"); + put("username", "root"); + put("password", "root"); + } + })); + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_DATABASE, + "CREATE DATABASE `testddatabase`;", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_DATABASE, + "DROP DATABASE `testddatabase`;", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.TRUNCATE_TABLE, + "TRUNCATE TABLE `testddatabase`.`testtable`;", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_TABLE, + "DROP TABLE `testddatabase`.`testtable`;", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_TABLE, + "CREATE TABLE `testtable` (\n" + + "\t`test` LONGTEXT NULL COMMENT ''\n" + + ") COMMENT = 'comment';", + Optional.of(CATALOG_TABLE)); + } + + @Test + public void testDMPreviewAction() { + DamengCatalogFactory factory = new DamengCatalogFactory(); + Catalog catalog = + factory.createCatalog( + "test", + ReadonlyConfig.fromMap( + new HashMap() { + { + put("base-url", "jdbc:mysql://localhost:3306/test"); + put("username", "root"); + put("password", "root"); + } + })); + Assertions.assertThrows( + UnsupportedOperationException.class, + () -> + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_DATABASE, + "CREATE DATABASE `testddatabase`;", + Optional.empty())); + Assertions.assertThrows( + UnsupportedOperationException.class, + () -> + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_DATABASE, + "DROP DATABASE `testddatabase`;", + Optional.empty())); + Assertions.assertThrows( + UnsupportedOperationException.class, + () -> + assertPreviewResult( + catalog, + Catalog.ActionType.TRUNCATE_TABLE, + "TRUNCATE TABLE `testddatabase`.`testtable`;", + Optional.empty())); + assertPreviewResult( + catalog, Catalog.ActionType.DROP_TABLE, "DROP TABLE TESTTABLE", Optional.empty()); + Assertions.assertThrows( + UnsupportedOperationException.class, + () -> + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_TABLE, + "CREATE TABLE `testtable` (\n" + + "\t`test` LONGTEXT NULL COMMENT ''\n" + + ") COMMENT = 'comment';", + Optional.of(CATALOG_TABLE))); + } + + @Test + public void testOceanBasePreviewAction() { + OceanBaseCatalogFactory factory = new OceanBaseCatalogFactory(); + Catalog catalog = + factory.createCatalog( + "test", + ReadonlyConfig.fromMap( + new HashMap() { + { + put("base-url", "jdbc:mysql://localhost:3306/test"); + put("compatibleMode", "oracle"); + put("username", "root"); + put("password", "root"); + } + })); + Assertions.assertThrows( + UnsupportedOperationException.class, + () -> + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_DATABASE, + "CREATE DATABASE `testddatabase`;", + Optional.empty())); + Assertions.assertThrows( + UnsupportedOperationException.class, + () -> + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_DATABASE, + "DROP DATABASE `testddatabase`;", + Optional.empty())); + assertPreviewResult( + catalog, + Catalog.ActionType.TRUNCATE_TABLE, + "TRUNCATE TABLE \"null\".\"testtable\"", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_TABLE, + "DROP TABLE \"testtable\"", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_TABLE, + "CREATE TABLE \"testtable\" (\n" + "\"test\" VARCHAR2(4000)\n" + ")", + Optional.of(CATALOG_TABLE)); + + Catalog catalog2 = + factory.createCatalog( + "test", + ReadonlyConfig.fromMap( + new HashMap() { + { + put("base-url", "jdbc:mysql://localhost:3306/test"); + put("compatibleMode", "mysql"); + put("username", "root"); + put("password", "root"); + } + })); + assertPreviewResult( + catalog2, + Catalog.ActionType.CREATE_DATABASE, + "CREATE DATABASE `testddatabase`;", + Optional.empty()); + assertPreviewResult( + catalog2, + Catalog.ActionType.DROP_DATABASE, + "DROP DATABASE `testddatabase`;", + Optional.empty()); + assertPreviewResult( + catalog2, + Catalog.ActionType.TRUNCATE_TABLE, + "TRUNCATE TABLE `testddatabase`.`testtable`;", + Optional.empty()); + assertPreviewResult( + catalog2, + Catalog.ActionType.DROP_TABLE, + "DROP TABLE `testddatabase`.`testtable`;", + Optional.empty()); + assertPreviewResult( + catalog2, + Catalog.ActionType.CREATE_TABLE, + "CREATE TABLE `testtable` (\n" + + "\t`test` LONGTEXT NULL COMMENT ''\n" + + ") COMMENT = 'comment';", + Optional.of(CATALOG_TABLE)); + } + + @Test + public void testOraclePreviewAction() { + OracleCatalogFactory factory = new OracleCatalogFactory(); + Catalog catalog = + factory.createCatalog( + "test", + ReadonlyConfig.fromMap( + new HashMap() { + { + put("base-url", "jdbc:mysql://localhost:3306/test"); + put("username", "root"); + put("password", "root"); + } + })); + Assertions.assertThrows( + UnsupportedOperationException.class, + () -> + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_DATABASE, + "CREATE DATABASE `testddatabase`;", + Optional.empty())); + Assertions.assertThrows( + UnsupportedOperationException.class, + () -> + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_DATABASE, + "DROP DATABASE `testddatabase`;", + Optional.empty())); + assertPreviewResult( + catalog, + Catalog.ActionType.TRUNCATE_TABLE, + "TRUNCATE TABLE \"null\".\"testtable\"", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_TABLE, + "DROP TABLE \"testtable\"", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_TABLE, + "CREATE TABLE \"testtable\" (\n" + "\"test\" VARCHAR2(4000)\n" + ")", + Optional.of(CATALOG_TABLE)); + } + + @Test + public void testPostgresPreviewAction() { + PostgresCatalogFactory factory = new PostgresCatalogFactory(); + Catalog catalog = + factory.createCatalog( + "test", + ReadonlyConfig.fromMap( + new HashMap() { + { + put("base-url", "jdbc:mysql://localhost:3306/test"); + put("username", "root"); + put("password", "root"); + } + })); + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_DATABASE, + "CREATE DATABASE \"testddatabase\"", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_DATABASE, + "DROP DATABASE \"testddatabase\"", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.TRUNCATE_TABLE, + "TRUNCATE TABLE \"null\".\"testtable\"", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_TABLE, + "DROP TABLE \"null\".\"testtable\"", + Optional.empty()); + + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_TABLE, + "CREATE TABLE \"testtable\" (\n" + "\"test\" text\n" + ");", + Optional.of(CATALOG_TABLE)); + } + + @Test + public void testSqlServerPreviewAction() { + SqlServerCatalogFactory factory = new SqlServerCatalogFactory(); + Catalog catalog = + factory.createCatalog( + "test", + ReadonlyConfig.fromMap( + new HashMap() { + { + put( + "base-url", + "jdbc:sqlserver://localhost:1433;databaseName=column_type_test"); + put("username", "root"); + put("password", "root"); + } + })); + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_DATABASE, + "CREATE DATABASE testddatabase", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_DATABASE, + "DROP DATABASE testddatabase;", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.TRUNCATE_TABLE, + "TRUNCATE TABLE [testddatabase].[testtable]", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_TABLE, + "DROP TABLE testddatabase.testtable", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_TABLE, + "IF OBJECT_ID('[testddatabase].[testtable]', 'U') IS NULL \n" + + "BEGIN \n" + + "CREATE TABLE [testddatabase].[testtable] ( \n" + + "\t[test] TEXT NULL\n" + + ");\n" + + "EXEC testddatabase.sys.sp_addextendedproperty 'MS_Description', N'comment', 'schema', N'null', 'table', N'testtable';\n" + + "EXEC testddatabase.sys.sp_addextendedproperty 'MS_Description', N'', 'schema', N'null', 'table', N'testtable', 'column', N'test';\n" + + "\n" + + "END", + Optional.of(CATALOG_TABLE)); + } + + @Test + public void testTiDBPreviewAction() { + TiDBCatalogFactory factory = new TiDBCatalogFactory(); + Catalog catalog = + factory.createCatalog( + "test", + ReadonlyConfig.fromMap( + new HashMap() { + { + put("base-url", "jdbc:mysql://localhost:3306/test"); + put("username", "root"); + put("password", "root"); + } + })); + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_DATABASE, + "CREATE DATABASE `testddatabase`;", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_DATABASE, + "DROP DATABASE `testddatabase`;", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.TRUNCATE_TABLE, + "TRUNCATE TABLE `testddatabase`.`testtable`;", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_TABLE, + "DROP TABLE `testddatabase`.`testtable`;", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_TABLE, + "CREATE TABLE `testtable` (\n" + + "\t`test` LONGTEXT NULL COMMENT ''\n" + + ") COMMENT = 'comment';", + Optional.of(CATALOG_TABLE)); + } + + private void assertPreviewResult( + Catalog catalog, + Catalog.ActionType actionType, + String expectedSql, + Optional catalogTable) { + PreviewResult previewResult = + catalog.previewAction( + actionType, TablePath.of("testddatabase.testtable"), catalogTable); + Assertions.assertInstanceOf(SQLPreviewResult.class, previewResult); + Assertions.assertEquals(expectedSql, ((SQLPreviewResult) previewResult).getSql()); + } +} diff --git a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/StarRocksCatalog.java b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/StarRocksCatalog.java index 3dc7eebfa67..8a14b08efe4 100644 --- a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/StarRocksCatalog.java +++ b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/StarRocksCatalog.java @@ -20,7 +20,9 @@ import org.apache.seatunnel.api.table.catalog.Catalog; import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.catalog.PreviewResult; import org.apache.seatunnel.api.table.catalog.PrimaryKey; +import org.apache.seatunnel.api.table.catalog.SQLPreviewResult; import org.apache.seatunnel.api.table.catalog.TableIdentifier; import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.api.table.catalog.TableSchema; @@ -44,6 +46,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.base.Preconditions; import com.mysql.cj.MysqlType; import lombok.extern.slf4j.Slf4j; @@ -214,7 +217,7 @@ public CatalogTable getTable(TablePath tablePath) public void createTable(TablePath tablePath, CatalogTable table, boolean ignoreIfExists) throws TableAlreadyExistException, DatabaseNotExistException, CatalogException { this.createTable( - StarRocksSaveModeUtil.fillingCreateSql( + StarRocksSaveModeUtil.getCreateTableSql( template, tablePath.getDatabaseName(), tablePath.getTableName(), @@ -225,12 +228,8 @@ public void createTable(TablePath tablePath, CatalogTable table, boolean ignoreI public void dropTable(TablePath tablePath, boolean ignoreIfNotExists) throws TableNotExistException, CatalogException { try (Connection conn = DriverManager.getConnection(defaultUrl, username, pwd)) { - if (ignoreIfNotExists) { - conn.createStatement().execute("DROP TABLE IF EXISTS " + tablePath.getFullName()); - } else { - conn.createStatement() - .execute(String.format("DROP TABLE %s", tablePath.getFullName())); - } + conn.createStatement() + .execute(StarRocksSaveModeUtil.getDropTableSql(tablePath, ignoreIfNotExists)); } catch (Exception e) { throw new CatalogException( String.format("Failed listing database in catalog %s", catalogName), e); @@ -242,7 +241,7 @@ public void truncateTable(TablePath tablePath, boolean ignoreIfNotExists) try (Connection conn = DriverManager.getConnection(defaultUrl, username, pwd)) { if (ignoreIfNotExists) { conn.createStatement() - .execute(String.format("TRUNCATE TABLE %s", tablePath.getFullName())); + .execute(StarRocksSaveModeUtil.getTruncateTableSql(tablePath)); } } catch (Exception e) { throw new CatalogException( @@ -277,16 +276,10 @@ public boolean isExistsData(TablePath tablePath) { public void createDatabase(TablePath tablePath, boolean ignoreIfExists) throws DatabaseAlreadyExistException, CatalogException { try (Connection conn = DriverManager.getConnection(defaultUrl, username, pwd)) { - if (ignoreIfExists) { - conn.createStatement() - .execute( - "CREATE DATABASE IF NOT EXISTS `" - + tablePath.getDatabaseName() - + "`"); - } else { - conn.createStatement() - .execute("CREATE DATABASE `" + tablePath.getDatabaseName() + "`"); - } + conn.createStatement() + .execute( + StarRocksSaveModeUtil.getCreateDatabaseSql( + tablePath.getDatabaseName(), ignoreIfExists)); } catch (Exception e) { throw new CatalogException( String.format("Failed listing database in catalog %s", catalogName), e); @@ -297,13 +290,10 @@ public void createDatabase(TablePath tablePath, boolean ignoreIfExists) public void dropDatabase(TablePath tablePath, boolean ignoreIfNotExists) throws DatabaseNotExistException, CatalogException { try (Connection conn = DriverManager.getConnection(defaultUrl, username, pwd)) { - if (ignoreIfNotExists) { - conn.createStatement() - .execute("DROP DATABASE IF EXISTS `" + tablePath.getDatabaseName() + "`"); - } else { - conn.createStatement() - .execute(String.format("DROP DATABASE `%s`", tablePath.getDatabaseName())); - } + conn.createStatement() + .execute( + StarRocksSaveModeUtil.getDropDatabaseSql( + tablePath.getDatabaseName(), ignoreIfNotExists)); } catch (Exception e) { throw new CatalogException( String.format("Failed listing database in catalog %s", catalogName), e); @@ -501,4 +491,30 @@ public boolean tableExists(TablePath tablePath) throws CatalogException { return false; } } + + @Override + public PreviewResult previewAction( + ActionType actionType, TablePath tablePath, Optional catalogTable) { + if (actionType == ActionType.CREATE_TABLE) { + Preconditions.checkArgument(catalogTable.isPresent(), "CatalogTable cannot be null"); + return new SQLPreviewResult( + StarRocksSaveModeUtil.getCreateTableSql( + template, + tablePath.getDatabaseName(), + tablePath.getTableName(), + catalogTable.get().getTableSchema())); + } else if (actionType == ActionType.DROP_TABLE) { + return new SQLPreviewResult(StarRocksSaveModeUtil.getDropTableSql(tablePath, true)); + } else if (actionType == ActionType.TRUNCATE_TABLE) { + return new SQLPreviewResult(StarRocksSaveModeUtil.getTruncateTableSql(tablePath)); + } else if (actionType == ActionType.CREATE_DATABASE) { + return new SQLPreviewResult( + StarRocksSaveModeUtil.getCreateDatabaseSql(tablePath.getDatabaseName(), true)); + } else if (actionType == ActionType.DROP_DATABASE) { + return new SQLPreviewResult( + "DROP DATABASE IF EXISTS `" + tablePath.getDatabaseName() + "`"); + } else { + throw new UnsupportedOperationException("Unsupported action type: " + actionType); + } + } } diff --git a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/sink/StarRocksSaveModeUtil.java b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/sink/StarRocksSaveModeUtil.java index 0ca7774a7c3..3b65d65a5b3 100644 --- a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/sink/StarRocksSaveModeUtil.java +++ b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/sink/StarRocksSaveModeUtil.java @@ -19,6 +19,7 @@ import org.apache.seatunnel.api.sink.SaveModeConstants; import org.apache.seatunnel.api.table.catalog.Column; +import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.api.table.catalog.TableSchema; import org.apache.seatunnel.api.table.type.ArrayType; import org.apache.seatunnel.api.table.type.DecimalType; @@ -37,7 +38,7 @@ public class StarRocksSaveModeUtil { - public static String fillingCreateSql( + public static String getCreateTableSql( String template, String database, String table, TableSchema tableSchema) { String primaryKey = ""; if (tableSchema.getPrimaryKey() != null) { @@ -178,4 +179,32 @@ private static String dataTypeToStarrocksType(SeaTunnelDataType dataType, lon } throw new IllegalArgumentException("Unsupported SeaTunnel's data type: " + dataType); } + + public static String getCreateDatabaseSql(String database, boolean ignoreIfExists) { + if (ignoreIfExists) { + return "CREATE DATABASE IF NOT EXISTS `" + database + "`"; + } else { + return "CREATE DATABASE `" + database + "`"; + } + } + + public static String getDropDatabaseSql(String database, boolean ignoreIfNotExists) { + if (ignoreIfNotExists) { + return "DROP DATABASE IF EXISTS `" + database + "`"; + } else { + return "DROP DATABASE `" + database + "`"; + } + } + + public static String getDropTableSql(TablePath tablePath, boolean ignoreIfNotExists) { + if (ignoreIfNotExists) { + return "DROP TABLE IF EXISTS " + tablePath.getFullName(); + } else { + return "DROP TABLE " + tablePath.getFullName(); + } + } + + public static String getTruncateTableSql(TablePath tablePath) { + return "TRUNCATE TABLE " + tablePath.getFullName(); + } } diff --git a/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/PreviewActionTest.java b/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/PreviewActionTest.java new file mode 100644 index 00000000000..37c06345fd8 --- /dev/null +++ b/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/PreviewActionTest.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.starrocks.catalog; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.table.catalog.Catalog; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.catalog.PreviewResult; +import org.apache.seatunnel.api.table.catalog.PrimaryKey; +import org.apache.seatunnel.api.table.catalog.SQLPreviewResult; +import org.apache.seatunnel.api.table.catalog.TableIdentifier; +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.api.table.catalog.TableSchema; +import org.apache.seatunnel.api.table.type.BasicType; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Optional; + +public class PreviewActionTest { + + private static final CatalogTable CATALOG_TABLE = + CatalogTable.of( + TableIdentifier.of("catalog", "database", "table"), + TableSchema.builder() + .column( + PhysicalColumn.of( + "test", + BasicType.STRING_TYPE, + (Long) null, + true, + null, + "")) + .column( + PhysicalColumn.of( + "test2", + BasicType.STRING_TYPE, + (Long) null, + true, + null, + "")) + .primaryKey(PrimaryKey.of("test", Collections.singletonList("test"))) + .build(), + Collections.emptyMap(), + Collections.emptyList(), + "comment"); + + @Test + public void testDorisPreviewAction() { + StarRocksCatalogFactory factory = new StarRocksCatalogFactory(); + Catalog catalog = + factory.createCatalog( + "test", + ReadonlyConfig.fromMap( + new HashMap() { + { + put("base-url", "jdbc:mysql://localhost:9030"); + put("username", "root"); + put("password", "root"); + } + })); + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_DATABASE, + "CREATE DATABASE IF NOT EXISTS `testddatabase`", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_DATABASE, + "DROP DATABASE IF EXISTS `testddatabase`", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.TRUNCATE_TABLE, + "TRUNCATE TABLE testddatabase.testtable", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.DROP_TABLE, + "DROP TABLE IF EXISTS testddatabase.testtable", + Optional.empty()); + assertPreviewResult( + catalog, + Catalog.ActionType.CREATE_TABLE, + "CREATE TABLE IF NOT EXISTS `testddatabase`.`testtable` (\n" + + "`test` STRING NULL ,\n" + + "`test2` STRING NULL \n" + + ") ENGINE=OLAP\n" + + " PRIMARY KEY (`test`)\n" + + "DISTRIBUTED BY HASH (`test`)PROPERTIES (\n" + + " \"replication_num\" = \"1\" \n" + + ")", + Optional.of(CATALOG_TABLE)); + } + + private void assertPreviewResult( + Catalog catalog, + Catalog.ActionType actionType, + String expectedSql, + Optional catalogTable) { + PreviewResult previewResult = + catalog.previewAction( + actionType, TablePath.of("testddatabase.testtable"), catalogTable); + Assertions.assertInstanceOf(SQLPreviewResult.class, previewResult); + Assertions.assertEquals(expectedSql, ((SQLPreviewResult) previewResult).getSql()); + } +} diff --git a/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/StarRocksCreateTableTest.java b/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/StarRocksCreateTableTest.java index e3471b6bf23..0a3f36196aa 100644 --- a/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/StarRocksCreateTableTest.java +++ b/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/StarRocksCreateTableTest.java @@ -54,7 +54,7 @@ public void test() { PhysicalColumn.of("create_time", BasicType.LONG_TYPE, (Long) null, true, null, "")); String result = - StarRocksSaveModeUtil.fillingCreateSql( + StarRocksSaveModeUtil.getCreateTableSql( "CREATE TABLE IF NOT EXISTS `${database}`.`${table_name}` ( \n" + "${rowtype_primary_key} , \n" + "${rowtype_unique_key} , \n" @@ -187,7 +187,7 @@ public void testInSeq() { "L_COMMENT", BasicType.STRING_TYPE, (Long) null, false, null, "")); String result = - StarRocksSaveModeUtil.fillingCreateSql( + StarRocksSaveModeUtil.getCreateTableSql( "CREATE TABLE IF NOT EXISTS `${database}`.`${table_name}` (\n" + "`L_COMMITDATE`,\n" + "${rowtype_primary_key},\n" @@ -244,7 +244,7 @@ public void testWithVarchar() { columns.add(PhysicalColumn.of("description", BasicType.STRING_TYPE, 70000, true, null, "")); String result = - StarRocksSaveModeUtil.fillingCreateSql( + StarRocksSaveModeUtil.getCreateTableSql( "CREATE TABLE IF NOT EXISTS `${database}`.`${table_name}` ( \n" + "${rowtype_primary_key} , \n" + "`create_time` DATETIME NOT NULL , \n" diff --git a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-13-starter/src/main/java/org/apache/seatunnel/core/starter/flink/execution/SinkExecuteProcessor.java b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-13-starter/src/main/java/org/apache/seatunnel/core/starter/flink/execution/SinkExecuteProcessor.java index 05fde53336f..f775bfb46f3 100644 --- a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-13-starter/src/main/java/org/apache/seatunnel/core/starter/flink/execution/SinkExecuteProcessor.java +++ b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-13-starter/src/main/java/org/apache/seatunnel/core/starter/flink/execution/SinkExecuteProcessor.java @@ -23,6 +23,7 @@ import org.apache.seatunnel.api.common.JobContext; import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.configuration.util.ConfigValidator; +import org.apache.seatunnel.api.sink.SaveModeExecuteWrapper; import org.apache.seatunnel.api.sink.SaveModeHandler; import org.apache.seatunnel.api.sink.SeaTunnelSink; import org.apache.seatunnel.api.sink.SupportSaveMode; @@ -123,7 +124,7 @@ public List execute(List upstreamDataS Optional saveModeHandler = saveModeSink.getSaveModeHandler(); if (saveModeHandler.isPresent()) { try (SaveModeHandler handler = saveModeHandler.get()) { - handler.handleSaveMode(); + new SaveModeExecuteWrapper(handler).execute(); } catch (Exception e) { throw new SeaTunnelRuntimeException(HANDLE_SAVE_MODE_FAILED, e); } diff --git a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/SinkExecuteProcessor.java b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/SinkExecuteProcessor.java index f4af78f81c7..6257a94dde7 100644 --- a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/SinkExecuteProcessor.java +++ b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/SinkExecuteProcessor.java @@ -23,6 +23,7 @@ import org.apache.seatunnel.api.common.JobContext; import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.configuration.util.ConfigValidator; +import org.apache.seatunnel.api.sink.SaveModeExecuteWrapper; import org.apache.seatunnel.api.sink.SaveModeHandler; import org.apache.seatunnel.api.sink.SeaTunnelSink; import org.apache.seatunnel.api.sink.SupportSaveMode; @@ -124,7 +125,7 @@ public List execute(List upstreamDataS Optional saveModeHandler = saveModeSink.getSaveModeHandler(); if (saveModeHandler.isPresent()) { try (SaveModeHandler handler = saveModeHandler.get()) { - handler.handleSaveMode(); + new SaveModeExecuteWrapper(handler).execute(); } catch (Exception e) { throw new SeaTunnelRuntimeException(HANDLE_SAVE_MODE_FAILED, e); } diff --git a/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-2-starter/src/main/java/org/apache/seatunnel/core/starter/spark/execution/SinkExecuteProcessor.java b/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-2-starter/src/main/java/org/apache/seatunnel/core/starter/spark/execution/SinkExecuteProcessor.java index 48baa7f746c..886f6d6a158 100644 --- a/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-2-starter/src/main/java/org/apache/seatunnel/core/starter/spark/execution/SinkExecuteProcessor.java +++ b/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-2-starter/src/main/java/org/apache/seatunnel/core/starter/spark/execution/SinkExecuteProcessor.java @@ -23,6 +23,7 @@ import org.apache.seatunnel.api.common.JobContext; import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.configuration.util.ConfigValidator; +import org.apache.seatunnel.api.sink.SaveModeExecuteWrapper; import org.apache.seatunnel.api.sink.SaveModeHandler; import org.apache.seatunnel.api.sink.SeaTunnelSink; import org.apache.seatunnel.api.sink.SupportSaveMode; @@ -142,7 +143,7 @@ public List execute(List upstreamDataStreams Optional saveModeHandler = saveModeSink.getSaveModeHandler(); if (saveModeHandler.isPresent()) { try (SaveModeHandler handler = saveModeHandler.get()) { - handler.handleSaveMode(); + new SaveModeExecuteWrapper(handler).execute(); } catch (Exception e) { throw new SeaTunnelRuntimeException(HANDLE_SAVE_MODE_FAILED, e); } diff --git a/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-starter-common/src/main/java/org/apache/seatunnel/core/starter/spark/execution/SinkExecuteProcessor.java b/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-starter-common/src/main/java/org/apache/seatunnel/core/starter/spark/execution/SinkExecuteProcessor.java index fc5eade3f4d..654cfaa1815 100644 --- a/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-starter-common/src/main/java/org/apache/seatunnel/core/starter/spark/execution/SinkExecuteProcessor.java +++ b/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-starter-common/src/main/java/org/apache/seatunnel/core/starter/spark/execution/SinkExecuteProcessor.java @@ -23,6 +23,7 @@ import org.apache.seatunnel.api.common.JobContext; import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.configuration.util.ConfigValidator; +import org.apache.seatunnel.api.sink.SaveModeExecuteWrapper; import org.apache.seatunnel.api.sink.SaveModeHandler; import org.apache.seatunnel.api.sink.SeaTunnelSink; import org.apache.seatunnel.api.sink.SupportSaveMode; @@ -143,7 +144,7 @@ public List execute(List upstreamDataStreams Optional saveModeHandler = saveModeSink.getSaveModeHandler(); if (saveModeHandler.isPresent()) { try (SaveModeHandler handler = saveModeHandler.get()) { - handler.handleSaveMode(); + new SaveModeExecuteWrapper(handler).execute(); } catch (Exception e) { throw new SeaTunnelRuntimeException(HANDLE_SAVE_MODE_FAILED, e); } diff --git a/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java index 50e35d9117e..f988f293a5f 100644 --- a/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java +++ b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java @@ -22,6 +22,7 @@ import org.apache.seatunnel.api.common.CommonOptions; import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.env.EnvCommonOptions; +import org.apache.seatunnel.api.sink.SaveModeExecuteWrapper; import org.apache.seatunnel.api.sink.SaveModeHandler; import org.apache.seatunnel.api.sink.SeaTunnelSink; import org.apache.seatunnel.api.sink.SupportMultiTableSink; @@ -677,7 +678,7 @@ public static void handleSaveMode(SeaTunnelSink sink) { Optional saveModeHandler = saveModeSink.getSaveModeHandler(); if (saveModeHandler.isPresent()) { try (SaveModeHandler handler = saveModeHandler.get()) { - handler.handleSaveMode(); + new SaveModeExecuteWrapper(handler).execute(); } catch (Exception e) { throw new SeaTunnelRuntimeException(HANDLE_SAVE_MODE_FAILED, e); } From 216efb764b31e5d3078036219e73c30fad16aea5 Mon Sep 17 00:00:00 2001 From: Jarvis Date: Wed, 20 Mar 2024 14:53:29 +0800 Subject: [PATCH 22/59] [Improve] add test case (#6536) --- .../seatunnel/engine/e2e/LocalModeIT.java | 106 ++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/LocalModeIT.java diff --git a/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/LocalModeIT.java b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/LocalModeIT.java new file mode 100644 index 00000000000..b47a5b812ba --- /dev/null +++ b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/LocalModeIT.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.engine.e2e; + +import org.apache.seatunnel.engine.client.SeaTunnelClient; +import org.apache.seatunnel.engine.common.config.ConfigProvider; +import org.apache.seatunnel.engine.common.config.SeaTunnelConfig; +import org.apache.seatunnel.engine.server.SeaTunnelServerStarter; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import com.hazelcast.client.config.ClientConfig; +import com.hazelcast.config.Config; +import com.hazelcast.instance.impl.HazelcastInstanceImpl; + +import java.util.Collections; +import java.util.Map; + +public class LocalModeIT { + + SeaTunnelConfig seaTunnelConfig = ConfigProvider.locateAndGetSeaTunnelConfig(); + + @Test + public void localModeWithPortNotInDefaultRange() { + + HazelcastInstanceImpl node1 = null; + SeaTunnelClient engineClient = null; + try { + Config hazelcastConfig = seaTunnelConfig.getHazelcastConfig(); + hazelcastConfig.getNetworkConfig().setPort(9999); + SeaTunnelConfig updatedConfig = new SeaTunnelConfig(); + updatedConfig.setHazelcastConfig(hazelcastConfig); + node1 = SeaTunnelServerStarter.createHazelcastInstance(updatedConfig); + ClientConfig clientConfig = ConfigProvider.locateAndGetClientConfig(); + clientConfig + .getConnectionStrategyConfig() + .getConnectionRetryConfig() + .setClusterConnectTimeoutMillis(3000); + Assertions.assertThrows( + IllegalStateException.class, + () -> new SeaTunnelClient(clientConfig), + "Unable to connect to any cluster."); + } finally { + if (engineClient != null) { + engineClient.close(); + } + if (node1 != null) { + node1.shutdown(); + } + } + } + + @Test + public void localMode() { + HazelcastInstanceImpl node1 = null; + HazelcastInstanceImpl node2 = null; + SeaTunnelClient engineClient = null; + String cluster_name = "new_cluster_name"; + try { + node1 = SeaTunnelServerStarter.createHazelcastInstance(seaTunnelConfig); + + Config hazelcastConfig = seaTunnelConfig.getHazelcastConfig(); + hazelcastConfig.setClusterName(cluster_name).getNetworkConfig().setPort(9999); + SeaTunnelConfig updatedConfig = new SeaTunnelConfig(); + updatedConfig.setHazelcastConfig(hazelcastConfig); + node2 = SeaTunnelServerStarter.createHazelcastInstance(updatedConfig); + + ClientConfig clientConfig = ConfigProvider.locateAndGetClientConfig(); + clientConfig.setClusterName(cluster_name); + clientConfig + .getNetworkConfig() + .setAddresses(Collections.singletonList("localhost:9999")); + engineClient = new SeaTunnelClient(clientConfig); + + Map clusterHealthMetrics = engineClient.getClusterHealthMetrics(); + Assertions.assertEquals(1, clusterHealthMetrics.size()); + Assertions.assertTrue(clusterHealthMetrics.containsKey("[localhost]:9999")); + } finally { + if (engineClient != null) { + engineClient.close(); + } + if (node1 != null) { + node1.shutdown(); + } + if (node2 != null) { + node2.shutdown(); + } + } + } +} From b1dcd4a2bc9292f1c46571d342195e53078f7942 Mon Sep 17 00:00:00 2001 From: Carl-Zhou-CN <1058249259@qq.com> Date: Thu, 21 Mar 2024 13:27:33 +0800 Subject: [PATCH 23/59] [BugFix][Spark-translation] map type cast error (#6552) --- .../src/test/resources/fake_to_assert.conf | 9 +++++++++ .../serialization/SeaTunnelRowConverter.java | 20 +++++++++---------- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-fake-e2e/src/test/resources/fake_to_assert.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-fake-e2e/src/test/resources/fake_to_assert.conf index 084ddbb533c..cab504d3b8f 100644 --- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-fake-e2e/src/test/resources/fake_to_assert.conf +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-fake-e2e/src/test/resources/fake_to_assert.conf @@ -67,8 +67,17 @@ source { } } +transform { + Sql { + source_table_name = "fake" + result_table_name = "tmp1" + query = """select * from fake""" + } +} + sink { Assert { + source_table_name = "tmp1" rules { row_rules = [ { diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-common/src/main/java/org/apache/seatunnel/translation/spark/serialization/SeaTunnelRowConverter.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-common/src/main/java/org/apache/seatunnel/translation/spark/serialization/SeaTunnelRowConverter.java index 000e0baa06f..db23d4912a9 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-common/src/main/java/org/apache/seatunnel/translation/spark/serialization/SeaTunnelRowConverter.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-common/src/main/java/org/apache/seatunnel/translation/spark/serialization/SeaTunnelRowConverter.java @@ -31,7 +31,7 @@ import org.apache.spark.unsafe.types.UTF8String; import scala.Tuple2; -import scala.collection.immutable.HashMap.HashTrieMap; +import scala.collection.immutable.AbstractMap; import scala.collection.mutable.WrappedArray; import java.io.IOException; @@ -179,7 +179,7 @@ private Object reconvert(Object field, SeaTunnelDataType dataType) { case STRING: return field.toString(); case MAP: - return reconvertMap((HashTrieMap) field, (MapType) dataType); + return reconvertMap((AbstractMap) field, (MapType) dataType); case ARRAY: return reconvertArray((WrappedArray.ofRef) field, (ArrayType) dataType); default: @@ -206,23 +206,23 @@ private SeaTunnelRow reconvert(SeaTunnelRow engineRow, SeaTunnelRowType rowType) } /** - * Convert HashTrieMap to LinkedHashMap + * Convert AbstractMap to LinkedHashMap * - * @param hashTrieMap HashTrieMap data + * @param abstractMap AbstractMap data * @param mapType fields type map * @return java.util.LinkedHashMap - * @see HashTrieMap + * @see AbstractMap */ - private Map reconvertMap(HashTrieMap hashTrieMap, MapType mapType) { - if (hashTrieMap == null || hashTrieMap.size() == 0) { + private Map reconvertMap(AbstractMap abstractMap, MapType mapType) { + if (abstractMap == null || abstractMap.size() == 0) { return Collections.emptyMap(); } - int num = hashTrieMap.size(); + int num = abstractMap.size(); Map newMap = new LinkedHashMap<>(num); SeaTunnelDataType keyType = mapType.getKeyType(); SeaTunnelDataType valueType = mapType.getValueType(); - scala.collection.immutable.List keyList = hashTrieMap.keySet().toList(); - scala.collection.immutable.List valueList = hashTrieMap.values().toList(); + scala.collection.immutable.List keyList = abstractMap.keySet().toList(); + scala.collection.immutable.List valueList = abstractMap.values().toList(); for (int i = 0; i < num; i++) { Object key = keyList.apply(i); Object value = valueList.apply(i); From 5f3c9c36a51d8db7aee45583bc186ba549576e36 Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Thu, 21 Mar 2024 13:58:10 +0800 Subject: [PATCH 24/59] [Fix][Connector-V2] Fix connector support SPI but without no args constructor (#6551) --- .../cdc/mongodb/MongodbIncrementalSource.java | 6 ---- .../mysql/source/MySqlIncrementalSource.java | 6 ---- .../source/PostgresIncrementalSource.java | 5 --- .../source/SqlServerIncrementalSource.java | 6 ---- .../connectors/doris/source/DorisSource.java | 2 -- .../elasticsearch/sink/ElasticsearchSink.java | 3 -- .../file/oss/{ => jindo}/config/OssConf.java | 2 +- .../{ => jindo}/config/OssConfigOptions.java | 2 +- .../exception/OssJindoConnectorException.java | 2 +- .../oss/{ => jindo}/sink/OssFileSink.java | 8 ++--- .../{ => jindo}/sink/OssFileSinkFactory.java | 4 +-- .../oss/{ => jindo}/source/OssFileSource.java | 8 ++--- .../source/OssFileSourceFactory.java | 4 +-- .../connectors/test/OssJindoFactoryTest.java | 4 +-- .../seatunnel/file/oss/sink/OssFileSink.java | 4 --- .../seatunnel/file/s3/sink/S3FileSink.java | 4 --- .../hudi/source/HudiSourceFactory.java | 3 +- .../seatunnel/iceberg/sink/IcebergSink.java | 3 -- .../iceberg/source/IcebergSource.java | 2 -- .../starrocks/source/StarRocksSource.java | 3 -- .../ConnectorSpecificationCheckTest.java | 32 +++++++++++++++++++ 21 files changed, 51 insertions(+), 62 deletions(-) rename seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/{ => jindo}/config/OssConf.java (96%) rename seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/{ => jindo}/config/OssConfigOptions.java (96%) rename seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/{ => jindo}/exception/OssJindoConnectorException.java (95%) rename seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/{ => jindo}/sink/OssFileSink.java (88%) rename seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/{ => jindo}/sink/OssFileSinkFactory.java (96%) rename seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/{ => jindo}/source/OssFileSource.java (94%) rename seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/{ => jindo}/source/OssFileSourceFactory.java (95%) diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-mongodb/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mongodb/MongodbIncrementalSource.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mongodb/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mongodb/MongodbIncrementalSource.java index 0045b03c481..e85b4b57a7c 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-mongodb/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mongodb/MongodbIncrementalSource.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mongodb/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mongodb/MongodbIncrementalSource.java @@ -19,7 +19,6 @@ import org.apache.seatunnel.api.configuration.Option; import org.apache.seatunnel.api.configuration.ReadonlyConfig; -import org.apache.seatunnel.api.source.SeaTunnelSource; import org.apache.seatunnel.api.source.SupportParallelism; import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; @@ -41,16 +40,11 @@ import org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.source.dialect.MongodbDialect; import org.apache.seatunnel.connectors.seatunnel.cdc.mongodb.source.offset.ChangeStreamOffsetFactory; -import com.google.auto.service.AutoService; -import lombok.NoArgsConstructor; - import javax.annotation.Nonnull; import java.util.List; import java.util.Optional; -@NoArgsConstructor -@AutoService(SeaTunnelSource.class) public class MongodbIncrementalSource extends IncrementalSource implements SupportParallelism { diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/source/MySqlIncrementalSource.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/source/MySqlIncrementalSource.java index 67ff9ff6079..da11ede2464 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/source/MySqlIncrementalSource.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-mysql/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/source/MySqlIncrementalSource.java @@ -19,7 +19,6 @@ import org.apache.seatunnel.api.configuration.Option; import org.apache.seatunnel.api.configuration.ReadonlyConfig; -import org.apache.seatunnel.api.source.SeaTunnelSource; import org.apache.seatunnel.api.source.SupportParallelism; import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; @@ -42,14 +41,9 @@ import org.apache.seatunnel.connectors.seatunnel.cdc.mysql.source.offset.BinlogOffsetFactory; import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.JdbcCatalogOptions; -import com.google.auto.service.AutoService; -import lombok.NoArgsConstructor; - import java.time.ZoneId; import java.util.List; -@NoArgsConstructor -@AutoService(SeaTunnelSource.class) public class MySqlIncrementalSource extends IncrementalSource implements SupportParallelism { static final String IDENTIFIER = "MySQL-CDC"; diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-postgres/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/postgres/source/PostgresIncrementalSource.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-postgres/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/postgres/source/PostgresIncrementalSource.java index 0ed65a873c9..053e38440c0 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-postgres/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/postgres/source/PostgresIncrementalSource.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-postgres/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/postgres/source/PostgresIncrementalSource.java @@ -19,7 +19,6 @@ import org.apache.seatunnel.api.configuration.Option; import org.apache.seatunnel.api.configuration.ReadonlyConfig; -import org.apache.seatunnel.api.source.SeaTunnelSource; import org.apache.seatunnel.api.source.SupportParallelism; import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; @@ -42,12 +41,10 @@ import org.apache.kafka.connect.data.Struct; -import com.google.auto.service.AutoService; import io.debezium.jdbc.JdbcConnection; import io.debezium.relational.TableId; import io.debezium.relational.history.ConnectTableChangeSerializer; import io.debezium.relational.history.TableChanges; -import lombok.NoArgsConstructor; import java.time.ZoneId; import java.util.List; @@ -55,8 +52,6 @@ import java.util.function.Function; import java.util.stream.Collectors; -@NoArgsConstructor -@AutoService(SeaTunnelSource.class) public class PostgresIncrementalSource extends IncrementalSource implements SupportParallelism { diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-sqlserver/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/sqlserver/source/source/SqlServerIncrementalSource.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-sqlserver/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/sqlserver/source/source/SqlServerIncrementalSource.java index 4ab64ff692f..ce9df11514f 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-sqlserver/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/sqlserver/source/source/SqlServerIncrementalSource.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-sqlserver/src/main/java/org/apache/seatunnel/connectors/seatunnel/cdc/sqlserver/source/source/SqlServerIncrementalSource.java @@ -19,7 +19,6 @@ import org.apache.seatunnel.api.configuration.Option; import org.apache.seatunnel.api.configuration.ReadonlyConfig; -import org.apache.seatunnel.api.source.SeaTunnelSource; import org.apache.seatunnel.api.source.SupportParallelism; import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; @@ -42,14 +41,9 @@ import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.JdbcCatalogOptions; import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.sqlserver.SqlServerURLParser; -import com.google.auto.service.AutoService; -import lombok.NoArgsConstructor; - import java.time.ZoneId; import java.util.List; -@NoArgsConstructor -@AutoService(SeaTunnelSource.class) public class SqlServerIncrementalSource extends IncrementalSource implements SupportParallelism { diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/DorisSource.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/DorisSource.java index c18a9444f27..c04f074021a 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/DorisSource.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/source/DorisSource.java @@ -29,14 +29,12 @@ import org.apache.seatunnel.connectors.doris.source.split.DorisSourceSplit; import org.apache.seatunnel.connectors.doris.source.split.DorisSourceSplitEnumerator; -import com.google.auto.service.AutoService; import lombok.extern.slf4j.Slf4j; import java.util.Collections; import java.util.List; @Slf4j -@AutoService(SeaTunnelSource.class) public class DorisSource implements SeaTunnelSource { diff --git a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/sink/ElasticsearchSink.java b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/sink/ElasticsearchSink.java index 79862879f91..3d160adc070 100644 --- a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/sink/ElasticsearchSink.java +++ b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/sink/ElasticsearchSink.java @@ -35,15 +35,12 @@ import org.apache.seatunnel.connectors.seatunnel.elasticsearch.state.ElasticsearchCommitInfo; import org.apache.seatunnel.connectors.seatunnel.elasticsearch.state.ElasticsearchSinkState; -import com.google.auto.service.AutoService; - import java.util.Optional; import static org.apache.seatunnel.api.table.factory.FactoryUtil.discoverFactory; import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.config.SinkConfig.MAX_BATCH_SIZE; import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.config.SinkConfig.MAX_RETRY_COUNT; -@AutoService(SeaTunnelSink.class) public class ElasticsearchSink implements SeaTunnelSink< SeaTunnelRow, diff --git a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/config/OssConf.java b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/config/OssConf.java similarity index 96% rename from seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/config/OssConf.java rename to seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/config/OssConf.java index 79f74e90104..718be0bc8aa 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/config/OssConf.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/config/OssConf.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.seatunnel.connectors.seatunnel.file.oss.config; +package org.apache.seatunnel.connectors.seatunnel.file.oss.jindo.config; import org.apache.seatunnel.shade.com.typesafe.config.Config; diff --git a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/config/OssConfigOptions.java b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/config/OssConfigOptions.java similarity index 96% rename from seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/config/OssConfigOptions.java rename to seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/config/OssConfigOptions.java index 2a2e272534e..8876cbc564b 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/config/OssConfigOptions.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/config/OssConfigOptions.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.seatunnel.connectors.seatunnel.file.oss.config; +package org.apache.seatunnel.connectors.seatunnel.file.oss.jindo.config; import org.apache.seatunnel.api.configuration.Option; import org.apache.seatunnel.api.configuration.Options; diff --git a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/exception/OssJindoConnectorException.java b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/exception/OssJindoConnectorException.java similarity index 95% rename from seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/exception/OssJindoConnectorException.java rename to seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/exception/OssJindoConnectorException.java index 9e27e1dc928..93327dc233e 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/exception/OssJindoConnectorException.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/exception/OssJindoConnectorException.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.seatunnel.connectors.seatunnel.file.oss.exception; +package org.apache.seatunnel.connectors.seatunnel.file.oss.jindo.exception; import org.apache.seatunnel.common.exception.SeaTunnelErrorCode; import org.apache.seatunnel.common.exception.SeaTunnelRuntimeException; diff --git a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/sink/OssFileSink.java b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/sink/OssFileSink.java similarity index 88% rename from seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/sink/OssFileSink.java rename to seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/sink/OssFileSink.java index f5be2c78628..ac6ee94992f 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/sink/OssFileSink.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/sink/OssFileSink.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.seatunnel.connectors.seatunnel.file.oss.sink; +package org.apache.seatunnel.connectors.seatunnel.file.oss.jindo.sink; import org.apache.seatunnel.shade.com.typesafe.config.Config; @@ -26,9 +26,9 @@ import org.apache.seatunnel.common.config.CheckResult; import org.apache.seatunnel.common.constants.PluginType; import org.apache.seatunnel.connectors.seatunnel.file.config.FileSystemType; -import org.apache.seatunnel.connectors.seatunnel.file.oss.config.OssConf; -import org.apache.seatunnel.connectors.seatunnel.file.oss.config.OssConfigOptions; -import org.apache.seatunnel.connectors.seatunnel.file.oss.exception.OssJindoConnectorException; +import org.apache.seatunnel.connectors.seatunnel.file.oss.jindo.config.OssConf; +import org.apache.seatunnel.connectors.seatunnel.file.oss.jindo.config.OssConfigOptions; +import org.apache.seatunnel.connectors.seatunnel.file.oss.jindo.exception.OssJindoConnectorException; import org.apache.seatunnel.connectors.seatunnel.file.sink.BaseFileSink; import com.google.auto.service.AutoService; diff --git a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/sink/OssFileSinkFactory.java b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/sink/OssFileSinkFactory.java similarity index 96% rename from seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/sink/OssFileSinkFactory.java rename to seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/sink/OssFileSinkFactory.java index 60a426ccb90..b1bfb439e0b 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/sink/OssFileSinkFactory.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/sink/OssFileSinkFactory.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.seatunnel.connectors.seatunnel.file.oss.sink; +package org.apache.seatunnel.connectors.seatunnel.file.oss.jindo.sink; import org.apache.seatunnel.api.configuration.util.OptionRule; import org.apache.seatunnel.api.table.factory.Factory; @@ -23,7 +23,7 @@ import org.apache.seatunnel.connectors.seatunnel.file.config.BaseSinkConfig; import org.apache.seatunnel.connectors.seatunnel.file.config.FileFormat; import org.apache.seatunnel.connectors.seatunnel.file.config.FileSystemType; -import org.apache.seatunnel.connectors.seatunnel.file.oss.config.OssConfigOptions; +import org.apache.seatunnel.connectors.seatunnel.file.oss.jindo.config.OssConfigOptions; import com.google.auto.service.AutoService; diff --git a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/source/OssFileSource.java b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/source/OssFileSource.java similarity index 94% rename from seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/source/OssFileSource.java rename to seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/source/OssFileSource.java index 7d73f16e7b6..6eea28eda34 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/source/OssFileSource.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/source/OssFileSource.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.seatunnel.connectors.seatunnel.file.oss.source; +package org.apache.seatunnel.connectors.seatunnel.file.oss.jindo.source; import org.apache.seatunnel.shade.com.typesafe.config.Config; @@ -33,9 +33,9 @@ import org.apache.seatunnel.connectors.seatunnel.file.config.FileSystemType; import org.apache.seatunnel.connectors.seatunnel.file.exception.FileConnectorErrorCode; import org.apache.seatunnel.connectors.seatunnel.file.exception.FileConnectorException; -import org.apache.seatunnel.connectors.seatunnel.file.oss.config.OssConf; -import org.apache.seatunnel.connectors.seatunnel.file.oss.config.OssConfigOptions; -import org.apache.seatunnel.connectors.seatunnel.file.oss.exception.OssJindoConnectorException; +import org.apache.seatunnel.connectors.seatunnel.file.oss.jindo.config.OssConf; +import org.apache.seatunnel.connectors.seatunnel.file.oss.jindo.config.OssConfigOptions; +import org.apache.seatunnel.connectors.seatunnel.file.oss.jindo.exception.OssJindoConnectorException; import org.apache.seatunnel.connectors.seatunnel.file.source.BaseFileSource; import org.apache.seatunnel.connectors.seatunnel.file.source.reader.ReadStrategyFactory; diff --git a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/source/OssFileSourceFactory.java b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/source/OssFileSourceFactory.java similarity index 95% rename from seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/source/OssFileSourceFactory.java rename to seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/source/OssFileSourceFactory.java index 5a31832b33f..a6c9276c76e 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/source/OssFileSourceFactory.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/jindo/source/OssFileSourceFactory.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.seatunnel.connectors.seatunnel.file.oss.source; +package org.apache.seatunnel.connectors.seatunnel.file.oss.jindo.source; import org.apache.seatunnel.api.configuration.util.OptionRule; import org.apache.seatunnel.api.source.SeaTunnelSource; @@ -25,7 +25,7 @@ import org.apache.seatunnel.connectors.seatunnel.file.config.BaseSourceConfigOptions; import org.apache.seatunnel.connectors.seatunnel.file.config.FileFormat; import org.apache.seatunnel.connectors.seatunnel.file.config.FileSystemType; -import org.apache.seatunnel.connectors.seatunnel.file.oss.config.OssConfigOptions; +import org.apache.seatunnel.connectors.seatunnel.file.oss.jindo.config.OssConfigOptions; import com.google.auto.service.AutoService; diff --git a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/test/java/org/apache/seatunnel/connectors/test/OssJindoFactoryTest.java b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/test/java/org/apache/seatunnel/connectors/test/OssJindoFactoryTest.java index 85955f086f9..c014f1d4923 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/test/java/org/apache/seatunnel/connectors/test/OssJindoFactoryTest.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-jindo-oss/src/test/java/org/apache/seatunnel/connectors/test/OssJindoFactoryTest.java @@ -17,8 +17,8 @@ package org.apache.seatunnel.connectors.test; -import org.apache.seatunnel.connectors.seatunnel.file.oss.sink.OssFileSinkFactory; -import org.apache.seatunnel.connectors.seatunnel.file.oss.source.OssFileSourceFactory; +import org.apache.seatunnel.connectors.seatunnel.file.oss.jindo.sink.OssFileSinkFactory; +import org.apache.seatunnel.connectors.seatunnel.file.oss.jindo.source.OssFileSourceFactory; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; diff --git a/seatunnel-connectors-v2/connector-file/connector-file-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/sink/OssFileSink.java b/seatunnel-connectors-v2/connector-file/connector-file-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/sink/OssFileSink.java index 5cab55f8edd..de4726fd5ce 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/sink/OssFileSink.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/sink/OssFileSink.java @@ -18,15 +18,11 @@ package org.apache.seatunnel.connectors.seatunnel.file.oss.sink; import org.apache.seatunnel.api.configuration.ReadonlyConfig; -import org.apache.seatunnel.api.sink.SeaTunnelSink; import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.connectors.seatunnel.file.config.FileSystemType; import org.apache.seatunnel.connectors.seatunnel.file.oss.config.OssHadoopConf; import org.apache.seatunnel.connectors.seatunnel.file.sink.BaseMultipleTableFileSink; -import com.google.auto.service.AutoService; - -@AutoService(SeaTunnelSink.class) public class OssFileSink extends BaseMultipleTableFileSink { public OssFileSink(ReadonlyConfig readonlyConfig, CatalogTable catalogTable) { super(OssHadoopConf.buildWithConfig(readonlyConfig), readonlyConfig, catalogTable); diff --git a/seatunnel-connectors-v2/connector-file/connector-file-s3/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/s3/sink/S3FileSink.java b/seatunnel-connectors-v2/connector-file/connector-file-s3/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/s3/sink/S3FileSink.java index 137d1991931..b7583569523 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-s3/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/s3/sink/S3FileSink.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-s3/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/s3/sink/S3FileSink.java @@ -25,7 +25,6 @@ import org.apache.seatunnel.api.sink.DefaultSaveModeHandler; import org.apache.seatunnel.api.sink.SaveModeHandler; import org.apache.seatunnel.api.sink.SchemaSaveMode; -import org.apache.seatunnel.api.sink.SeaTunnelSink; import org.apache.seatunnel.api.sink.SupportSaveMode; import org.apache.seatunnel.api.table.catalog.Catalog; import org.apache.seatunnel.api.table.catalog.CatalogTable; @@ -39,13 +38,10 @@ import org.apache.seatunnel.connectors.seatunnel.file.s3.config.S3ConfigOptions; import org.apache.seatunnel.connectors.seatunnel.file.sink.BaseMultipleTableFileSink; -import com.google.auto.service.AutoService; - import java.util.Optional; import static org.apache.seatunnel.api.table.factory.FactoryUtil.discoverFactory; -@AutoService(SeaTunnelSink.class) public class S3FileSink extends BaseMultipleTableFileSink implements SupportSaveMode { private CatalogTable catalogTable; diff --git a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/source/HudiSourceFactory.java b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/source/HudiSourceFactory.java index 796645297be..778efc62a37 100644 --- a/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/source/HudiSourceFactory.java +++ b/seatunnel-connectors-v2/connector-hudi/src/main/java/org/apache/seatunnel/connectors/seatunnel/hudi/source/HudiSourceFactory.java @@ -19,12 +19,13 @@ import org.apache.seatunnel.api.configuration.util.OptionRule; import org.apache.seatunnel.api.source.SeaTunnelSource; +import org.apache.seatunnel.api.table.factory.Factory; import org.apache.seatunnel.api.table.factory.TableSourceFactory; import org.apache.seatunnel.connectors.seatunnel.hudi.config.HudiSourceConfig; import com.google.auto.service.AutoService; -@AutoService(SeaTunnelSource.class) +@AutoService(Factory.class) public class HudiSourceFactory implements TableSourceFactory { @Override diff --git a/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/sink/IcebergSink.java b/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/sink/IcebergSink.java index 06131bc9f75..ad92aa1d75f 100644 --- a/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/sink/IcebergSink.java +++ b/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/sink/IcebergSink.java @@ -44,8 +44,6 @@ import org.apache.seatunnel.connectors.seatunnel.iceberg.sink.commit.IcebergCommitInfo; import org.apache.seatunnel.connectors.seatunnel.iceberg.sink.state.IcebergSinkState; -import com.google.auto.service.AutoService; - import java.io.IOException; import java.util.List; import java.util.Objects; @@ -54,7 +52,6 @@ import static org.apache.seatunnel.api.table.factory.FactoryUtil.discoverFactory; -@AutoService(SeaTunnelSink.class) public class IcebergSink implements SeaTunnelSink< SeaTunnelRow, diff --git a/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/source/IcebergSource.java b/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/source/IcebergSource.java index 3d1cde13a99..7a2fdf9d4ff 100644 --- a/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/source/IcebergSource.java +++ b/seatunnel-connectors-v2/connector-iceberg/src/main/java/org/apache/seatunnel/connectors/seatunnel/iceberg/source/IcebergSource.java @@ -49,7 +49,6 @@ import org.apache.iceberg.Schema; import org.apache.iceberg.types.Types; -import com.google.auto.service.AutoService; import lombok.SneakyThrows; import java.util.ArrayList; @@ -58,7 +57,6 @@ import static org.apache.seatunnel.shade.com.google.common.base.Preconditions.checkArgument; -@AutoService(SeaTunnelSource.class) public class IcebergSource implements SeaTunnelSource< SeaTunnelRow, IcebergFileScanTaskSplit, IcebergSplitEnumeratorState>, diff --git a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/source/StarRocksSource.java b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/source/StarRocksSource.java index 9bde1b22a38..211a8b96fca 100644 --- a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/source/StarRocksSource.java +++ b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/source/StarRocksSource.java @@ -26,12 +26,9 @@ import org.apache.seatunnel.connectors.seatunnel.starrocks.config.CommonConfig; import org.apache.seatunnel.connectors.seatunnel.starrocks.config.SourceConfig; -import com.google.auto.service.AutoService; - import java.util.Collections; import java.util.List; -@AutoService(SeaTunnelSource.class) public class StarRocksSource implements SeaTunnelSource { diff --git a/seatunnel-dist/src/test/java/org/apache/seatunnel/api/connector/ConnectorSpecificationCheckTest.java b/seatunnel-dist/src/test/java/org/apache/seatunnel/api/connector/ConnectorSpecificationCheckTest.java index 243d2fc5a86..5b46d812012 100644 --- a/seatunnel-dist/src/test/java/org/apache/seatunnel/api/connector/ConnectorSpecificationCheckTest.java +++ b/seatunnel-dist/src/test/java/org/apache/seatunnel/api/connector/ConnectorSpecificationCheckTest.java @@ -17,6 +17,7 @@ package org.apache.seatunnel.api.connector; +import org.apache.seatunnel.api.sink.SeaTunnelSink; import org.apache.seatunnel.api.source.SeaTunnelSource; import org.apache.seatunnel.api.table.factory.FactoryUtil; import org.apache.seatunnel.api.table.factory.TableSinkFactory; @@ -33,14 +34,28 @@ import java.lang.reflect.Method; import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Optional; +import java.util.ServiceLoader; @Slf4j public class ConnectorSpecificationCheckTest { @Test public void testAllConnectorImplementFactoryWithUpToDateMethod() throws ClassNotFoundException { + + ServiceLoader sources = + ServiceLoader.load( + SeaTunnelSource.class, Thread.currentThread().getContextClassLoader()); + Map sourceWithSPI = new HashMap<>(); + Iterator sourceIterator = sources.iterator(); + while (sourceIterator.hasNext()) { + SeaTunnelSource source = sourceIterator.next(); + sourceWithSPI.put(source.getPluginName(), source.getClass().getName()); + } List sourceFactories = FactoryUtil.discoverFactories( Thread.currentThread().getContextClassLoader(), TableSourceFactory.class); @@ -57,6 +72,10 @@ public void testAllConnectorImplementFactoryWithUpToDateMethod() throws ClassNot TableSourceFactoryContext.class) .isPresent() && !blockList.contains(factory.getClass().getSimpleName())) { + Assertions.assertFalse( + sourceWithSPI.containsKey(factory.factoryIdentifier()), + "Please remove `@AutoService(SeaTunnelSource.class)` annotation in " + + sourceWithSPI.get(factory.factoryIdentifier())); Class sourceClass = factory.getSourceClass(); Optional prepare = ReflectionUtils.getDeclaredMethod(sourceClass, "prepare"); @@ -84,12 +103,25 @@ public void testAllConnectorImplementFactoryWithUpToDateMethod() throws ClassNot List sinkFactories = FactoryUtil.discoverFactories( Thread.currentThread().getContextClassLoader(), TableSinkFactory.class); + ServiceLoader sinks = + ServiceLoader.load( + SeaTunnelSink.class, Thread.currentThread().getContextClassLoader()); + Map sinkWithSPI = new HashMap<>(); + Iterator sinkIterator = sinks.iterator(); + while (sinkIterator.hasNext()) { + SeaTunnelSink sink = sinkIterator.next(); + sinkWithSPI.put(sink.getPluginName(), sink.getClass().getName()); + } for (TableSinkFactory factory : sinkFactories) { String factoryName = factory.getClass().getSimpleName(); if (ReflectionUtils.getDeclaredMethod( factory.getClass(), "createSink", TableSinkFactoryContext.class) .isPresent() && !blockList.contains(factoryName)) { + Assertions.assertFalse( + sinkWithSPI.containsKey(factory.factoryIdentifier()), + "Please remove `@AutoService(SeaTunnelSink.class)` annotation in " + + sinkWithSPI.get(factory.factoryIdentifier())); Class sinkClass = (Class) Class.forName( From c8a682c9c995c8bd40f1c0f38f46e7f05f5fada1 Mon Sep 17 00:00:00 2001 From: dailai Date: Thu, 21 Mar 2024 15:12:44 +0800 Subject: [PATCH 25/59] [Fix][SQLTransform] fix the scale loss for the sql transform (#6553) --- .../seatunnel/transform/sql/SQLTransform.java | 1 + .../transform/sql/SQLTransformTest.java | 109 ++++++++++++++++++ 2 files changed, 110 insertions(+) create mode 100644 seatunnel-transforms-v2/src/test/java/org/apache/seatunnel/transform/sql/SQLTransformTest.java diff --git a/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/SQLTransform.java b/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/SQLTransform.java index bddb1c64f21..ac573f29ce4 100644 --- a/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/SQLTransform.java +++ b/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/sql/SQLTransform.java @@ -167,6 +167,7 @@ protected TableSchema transformTableSchema() { fieldNames[i], fieldTypes[i], simpleColumn.getColumnLength(), + simpleColumn.getScale(), simpleColumn.isNullable(), simpleColumn.getDefaultValue(), simpleColumn.getComment()); diff --git a/seatunnel-transforms-v2/src/test/java/org/apache/seatunnel/transform/sql/SQLTransformTest.java b/seatunnel-transforms-v2/src/test/java/org/apache/seatunnel/transform/sql/SQLTransformTest.java new file mode 100644 index 00000000000..afafa57514b --- /dev/null +++ b/seatunnel-transforms-v2/src/test/java/org/apache/seatunnel/transform/sql/SQLTransformTest.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.transform.sql; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.catalog.TableIdentifier; +import org.apache.seatunnel.api.table.catalog.TableSchema; +import org.apache.seatunnel.api.table.type.BasicType; +import org.apache.seatunnel.api.table.type.LocalTimeType; +import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.api.table.type.SeaTunnelRowType; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Objects; + +public class SQLTransformTest { + + private static final String TEST_NAME = "test"; + private static final String TIMESTAMP_FILEDNAME = "create_time"; + private static final String[] FILED_NAMES = + new String[] {"id", "name", "age", TIMESTAMP_FILEDNAME}; + private static final String GENERATE_PARTITION_KEY = "dt"; + private static final ReadonlyConfig READONLY_CONFIG = + ReadonlyConfig.fromMap( + new HashMap() { + { + put( + "query", + "select *,FORMATDATETIME(create_time,'yyyy-MM-dd HH:mm') as dt from test"); + } + }); + + @Test + public void testScaleSupport() { + SQLTransform sqlTransform = new SQLTransform(READONLY_CONFIG, getCatalogTable()); + TableSchema tableSchema = sqlTransform.transformTableSchema(); + tableSchema + .getColumns() + .forEach( + column -> { + if (column.getName().equals(TIMESTAMP_FILEDNAME)) { + Assertions.assertEquals(9, column.getScale()); + } else if (column.getName().equals(GENERATE_PARTITION_KEY)) { + Assertions.assertTrue(Objects.isNull(column.getScale())); + } else { + Assertions.assertEquals(3, column.getColumnLength()); + } + }); + } + + private CatalogTable getCatalogTable() { + SeaTunnelRowType rowType = + new SeaTunnelRowType( + FILED_NAMES, + new SeaTunnelDataType[] { + BasicType.INT_TYPE, + BasicType.STRING_TYPE, + BasicType.INT_TYPE, + LocalTimeType.LOCAL_DATE_TIME_TYPE + }); + TableSchema.Builder schemaBuilder = TableSchema.builder(); + for (int i = 0; i < rowType.getTotalFields(); i++) { + Integer scale = null; + Long columnLength = null; + if (rowType.getFieldName(i).equals(TIMESTAMP_FILEDNAME)) { + scale = 9; + } else { + columnLength = 3L; + } + PhysicalColumn column = + PhysicalColumn.of( + rowType.getFieldName(i), + rowType.getFieldType(i), + columnLength, + scale, + true, + null, + null); + schemaBuilder.column(column); + } + return CatalogTable.of( + TableIdentifier.of(TEST_NAME, TEST_NAME, null, TEST_NAME), + schemaBuilder.build(), + new HashMap<>(), + new ArrayList<>(), + "It has column information."); + } +} From 2493650d49364d85a5d169ca3183a7f1119eecf1 Mon Sep 17 00:00:00 2001 From: hailin0 Date: Fri, 22 Mar 2024 18:50:10 +0800 Subject: [PATCH 26/59] [Chore] Remove unused configuration for log4j1.x (#6544) --- .../src/main/resources/log4j.properties | 22 ------------------- .../src/test/resources/log4j.properties | 22 ------------------- 2 files changed, 44 deletions(-) delete mode 100644 seatunnel-core/seatunnel-starter/src/main/resources/log4j.properties delete mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-iceberg-hadoop3-e2e/src/test/resources/log4j.properties diff --git a/seatunnel-core/seatunnel-starter/src/main/resources/log4j.properties b/seatunnel-core/seatunnel-starter/src/main/resources/log4j.properties deleted file mode 100644 index db5d9e51220..00000000000 --- a/seatunnel-core/seatunnel-starter/src/main/resources/log4j.properties +++ /dev/null @@ -1,22 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Set everything to be logged to the console -log4j.rootCategory=INFO, console -log4j.appender.console=org.apache.log4j.ConsoleAppender -log4j.appender.console.target=System.err -log4j.appender.console.layout=org.apache.log4j.PatternLayout -log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-iceberg-hadoop3-e2e/src/test/resources/log4j.properties b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-iceberg-hadoop3-e2e/src/test/resources/log4j.properties deleted file mode 100644 index db5d9e51220..00000000000 --- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-iceberg-hadoop3-e2e/src/test/resources/log4j.properties +++ /dev/null @@ -1,22 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Set everything to be logged to the console -log4j.rootCategory=INFO, console -log4j.appender.console=org.apache.log4j.ConsoleAppender -log4j.appender.console.target=System.err -log4j.appender.console.layout=org.apache.log4j.PatternLayout -log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n From bac19f210c1030b79ec2fae02c7812298bddbbed Mon Sep 17 00:00:00 2001 From: Leon Yoah <95527066+LeonYoah@users.noreply.github.com> Date: Sun, 24 Mar 2024 22:10:10 +0800 Subject: [PATCH 27/59] [bugfix][zeta] Fixed the problem that the [jvm_options] configuration file G1GC does not take effect (#6564) --- config/jvm_options | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/jvm_options b/config/jvm_options index b695d66f110..c8a7218d1c0 100644 --- a/config/jvm_options +++ b/config/jvm_options @@ -27,4 +27,4 @@ -XX:MaxMetaspaceSize=2g # G1GC --XX:+UseG1GC \ No newline at end of file +-XX:+UseG1GC From ddca95f32c8cfec8ef22d087ce4ae5ccf78fcdd7 Mon Sep 17 00:00:00 2001 From: ZhilinLi Date: Wed, 27 Mar 2024 09:59:51 +0800 Subject: [PATCH 28/59] [Improve][JDBC] Optimized code style for getting jdbc field types (#6583) --- .../converter/AbstractJdbcRowConverter.java | 28 ++++---- .../kingbase/KingbaseJdbcRowConverter.java | 28 ++++---- .../psql/PostgresJdbcRowConverter.java | 28 ++++---- .../sqlserver/SqlserverJdbcRowConverter.java | 4 +- ...JdbcUtils.java => JdbcFieldTypeUtils.java} | 64 ++++++++----------- 5 files changed, 72 insertions(+), 80 deletions(-) rename seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/utils/{JdbcUtils.java => JdbcFieldTypeUtils.java} (71%) diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/converter/AbstractJdbcRowConverter.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/converter/AbstractJdbcRowConverter.java index 5a4a6b60d85..8ff8ac47d7c 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/converter/AbstractJdbcRowConverter.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/converter/AbstractJdbcRowConverter.java @@ -23,7 +23,7 @@ import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; import org.apache.seatunnel.connectors.seatunnel.jdbc.exception.JdbcConnectorException; -import org.apache.seatunnel.connectors.seatunnel.jdbc.utils.JdbcUtils; +import org.apache.seatunnel.connectors.seatunnel.jdbc.utils.JdbcFieldTypeUtils; import lombok.extern.slf4j.Slf4j; @@ -56,34 +56,34 @@ public SeaTunnelRow toInternal(ResultSet rs, TableSchema tableSchema) throws SQL int resultSetIndex = fieldIndex + 1; switch (seaTunnelDataType.getSqlType()) { case STRING: - fields[fieldIndex] = JdbcUtils.getString(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getString(rs, resultSetIndex); break; case BOOLEAN: - fields[fieldIndex] = JdbcUtils.getBoolean(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getBoolean(rs, resultSetIndex); break; case TINYINT: - fields[fieldIndex] = JdbcUtils.getByte(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getByte(rs, resultSetIndex); break; case SMALLINT: - fields[fieldIndex] = JdbcUtils.getShort(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getShort(rs, resultSetIndex); break; case INT: - fields[fieldIndex] = JdbcUtils.getInt(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getInt(rs, resultSetIndex); break; case BIGINT: - fields[fieldIndex] = JdbcUtils.getLong(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getLong(rs, resultSetIndex); break; case FLOAT: - fields[fieldIndex] = JdbcUtils.getFloat(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getFloat(rs, resultSetIndex); break; case DOUBLE: - fields[fieldIndex] = JdbcUtils.getDouble(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getDouble(rs, resultSetIndex); break; case DECIMAL: - fields[fieldIndex] = JdbcUtils.getBigDecimal(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getBigDecimal(rs, resultSetIndex); break; case DATE: - Date sqlDate = JdbcUtils.getDate(rs, resultSetIndex); + Date sqlDate = JdbcFieldTypeUtils.getDate(rs, resultSetIndex); fields[fieldIndex] = Optional.ofNullable(sqlDate).map(e -> e.toLocalDate()).orElse(null); break; @@ -91,14 +91,14 @@ public SeaTunnelRow toInternal(ResultSet rs, TableSchema tableSchema) throws SQL fields[fieldIndex] = readTime(rs, resultSetIndex); break; case TIMESTAMP: - Timestamp sqlTimestamp = JdbcUtils.getTimestamp(rs, resultSetIndex); + Timestamp sqlTimestamp = JdbcFieldTypeUtils.getTimestamp(rs, resultSetIndex); fields[fieldIndex] = Optional.ofNullable(sqlTimestamp) .map(e -> e.toLocalDateTime()) .orElse(null); break; case BYTES: - fields[fieldIndex] = JdbcUtils.getBytes(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getBytes(rs, resultSetIndex); break; case NULL: fields[fieldIndex] = null; @@ -116,7 +116,7 @@ public SeaTunnelRow toInternal(ResultSet rs, TableSchema tableSchema) throws SQL } protected LocalTime readTime(ResultSet rs, int resultSetIndex) throws SQLException { - Time sqlTime = JdbcUtils.getTime(rs, resultSetIndex); + Time sqlTime = JdbcFieldTypeUtils.getTime(rs, resultSetIndex); return Optional.ofNullable(sqlTime).map(e -> e.toLocalTime()).orElse(null); } diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/kingbase/KingbaseJdbcRowConverter.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/kingbase/KingbaseJdbcRowConverter.java index 4aa41c0f4c1..4a9411b99b5 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/kingbase/KingbaseJdbcRowConverter.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/kingbase/KingbaseJdbcRowConverter.java @@ -25,7 +25,7 @@ import org.apache.seatunnel.connectors.seatunnel.jdbc.exception.JdbcConnectorException; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.converter.AbstractJdbcRowConverter; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.DatabaseIdentifier; -import org.apache.seatunnel.connectors.seatunnel.jdbc.utils.JdbcUtils; +import org.apache.seatunnel.connectors.seatunnel.jdbc.utils.JdbcFieldTypeUtils; import java.math.BigDecimal; import java.sql.Date; @@ -56,51 +56,51 @@ public SeaTunnelRow toInternal(ResultSet rs, TableSchema tableSchema) throws SQL int resultSetIndex = fieldIndex + 1; switch (seaTunnelDataType.getSqlType()) { case STRING: - fields[fieldIndex] = JdbcUtils.getString(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getString(rs, resultSetIndex); break; case BOOLEAN: - fields[fieldIndex] = JdbcUtils.getBoolean(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getBoolean(rs, resultSetIndex); break; case TINYINT: - fields[fieldIndex] = JdbcUtils.getByte(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getByte(rs, resultSetIndex); break; case SMALLINT: - fields[fieldIndex] = JdbcUtils.getShort(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getShort(rs, resultSetIndex); break; case INT: - fields[fieldIndex] = JdbcUtils.getInt(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getInt(rs, resultSetIndex); break; case BIGINT: - fields[fieldIndex] = JdbcUtils.getLong(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getLong(rs, resultSetIndex); break; case FLOAT: - fields[fieldIndex] = JdbcUtils.getFloat(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getFloat(rs, resultSetIndex); break; case DOUBLE: - fields[fieldIndex] = JdbcUtils.getDouble(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getDouble(rs, resultSetIndex); break; case DECIMAL: - fields[fieldIndex] = JdbcUtils.getBigDecimal(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getBigDecimal(rs, resultSetIndex); break; case DATE: - Date sqlDate = JdbcUtils.getDate(rs, resultSetIndex); + Date sqlDate = JdbcFieldTypeUtils.getDate(rs, resultSetIndex); fields[fieldIndex] = Optional.ofNullable(sqlDate).map(Date::toLocalDate).orElse(null); break; case TIME: - Time sqlTime = JdbcUtils.getTime(rs, resultSetIndex); + Time sqlTime = JdbcFieldTypeUtils.getTime(rs, resultSetIndex); fields[fieldIndex] = Optional.ofNullable(sqlTime).map(Time::toLocalTime).orElse(null); break; case TIMESTAMP: - Timestamp sqlTimestamp = JdbcUtils.getTimestamp(rs, resultSetIndex); + Timestamp sqlTimestamp = JdbcFieldTypeUtils.getTimestamp(rs, resultSetIndex); fields[fieldIndex] = Optional.ofNullable(sqlTimestamp) .map(Timestamp::toLocalDateTime) .orElse(null); break; case BYTES: - fields[fieldIndex] = JdbcUtils.getBytes(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getBytes(rs, resultSetIndex); break; case NULL: fields[fieldIndex] = null; diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresJdbcRowConverter.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresJdbcRowConverter.java index 171ab406f53..f1cd4f8ec98 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresJdbcRowConverter.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresJdbcRowConverter.java @@ -26,7 +26,7 @@ import org.apache.seatunnel.connectors.seatunnel.jdbc.exception.JdbcConnectorException; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.converter.AbstractJdbcRowConverter; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.DatabaseIdentifier; -import org.apache.seatunnel.connectors.seatunnel.jdbc.utils.JdbcUtils; +import org.apache.seatunnel.connectors.seatunnel.jdbc.utils.JdbcFieldTypeUtils; import java.sql.Array; import java.sql.Date; @@ -65,52 +65,52 @@ public SeaTunnelRow toInternal(ResultSet rs, TableSchema tableSchema) throws SQL ? null : rs.getObject(resultSetIndex).toString(); } else { - fields[fieldIndex] = JdbcUtils.getString(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getString(rs, resultSetIndex); } break; case BOOLEAN: - fields[fieldIndex] = JdbcUtils.getBoolean(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getBoolean(rs, resultSetIndex); break; case TINYINT: - fields[fieldIndex] = JdbcUtils.getByte(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getByte(rs, resultSetIndex); break; case SMALLINT: - fields[fieldIndex] = JdbcUtils.getShort(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getShort(rs, resultSetIndex); break; case INT: - fields[fieldIndex] = JdbcUtils.getInt(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getInt(rs, resultSetIndex); break; case BIGINT: - fields[fieldIndex] = JdbcUtils.getLong(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getLong(rs, resultSetIndex); break; case FLOAT: - fields[fieldIndex] = JdbcUtils.getFloat(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getFloat(rs, resultSetIndex); break; case DOUBLE: - fields[fieldIndex] = JdbcUtils.getDouble(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getDouble(rs, resultSetIndex); break; case DECIMAL: - fields[fieldIndex] = JdbcUtils.getBigDecimal(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getBigDecimal(rs, resultSetIndex); break; case DATE: - Date sqlDate = JdbcUtils.getDate(rs, resultSetIndex); + Date sqlDate = JdbcFieldTypeUtils.getDate(rs, resultSetIndex); fields[fieldIndex] = Optional.ofNullable(sqlDate).map(e -> e.toLocalDate()).orElse(null); break; case TIME: - Time sqlTime = JdbcUtils.getTime(rs, resultSetIndex); + Time sqlTime = JdbcFieldTypeUtils.getTime(rs, resultSetIndex); fields[fieldIndex] = Optional.ofNullable(sqlTime).map(e -> e.toLocalTime()).orElse(null); break; case TIMESTAMP: - Timestamp sqlTimestamp = JdbcUtils.getTimestamp(rs, resultSetIndex); + Timestamp sqlTimestamp = JdbcFieldTypeUtils.getTimestamp(rs, resultSetIndex); fields[fieldIndex] = Optional.ofNullable(sqlTimestamp) .map(e -> e.toLocalDateTime()) .orElse(null); break; case BYTES: - fields[fieldIndex] = JdbcUtils.getBytes(rs, resultSetIndex); + fields[fieldIndex] = JdbcFieldTypeUtils.getBytes(rs, resultSetIndex); break; case NULL: fields[fieldIndex] = null; diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/sqlserver/SqlserverJdbcRowConverter.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/sqlserver/SqlserverJdbcRowConverter.java index efb17292444..5ae0dec1afa 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/sqlserver/SqlserverJdbcRowConverter.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/sqlserver/SqlserverJdbcRowConverter.java @@ -25,7 +25,7 @@ import org.apache.seatunnel.connectors.seatunnel.jdbc.exception.JdbcConnectorException; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.converter.AbstractJdbcRowConverter; import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.DatabaseIdentifier; -import org.apache.seatunnel.connectors.seatunnel.jdbc.utils.JdbcUtils; +import org.apache.seatunnel.connectors.seatunnel.jdbc.utils.JdbcFieldTypeUtils; import java.math.BigDecimal; import java.sql.PreparedStatement; @@ -46,7 +46,7 @@ public String converterName() { @Override protected LocalTime readTime(ResultSet rs, int resultSetIndex) throws SQLException { - Timestamp sqlTime = JdbcUtils.getTimestamp(rs, resultSetIndex); + Timestamp sqlTime = JdbcFieldTypeUtils.getTimestamp(rs, resultSetIndex); return Optional.ofNullable(sqlTime) .map(e -> e.toLocalDateTime().toLocalTime()) .orElse(null); diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/utils/JdbcUtils.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/utils/JdbcFieldTypeUtils.java similarity index 71% rename from seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/utils/JdbcUtils.java rename to seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/utils/JdbcFieldTypeUtils.java index b9f7f1eac3f..ca8edb65769 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/utils/JdbcUtils.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/utils/JdbcFieldTypeUtils.java @@ -23,61 +23,40 @@ import java.sql.Time; import java.sql.Timestamp; -public final class JdbcUtils { +public final class JdbcFieldTypeUtils { - private JdbcUtils() {} - - public static String getString(ResultSet resultSet, int columnIndex) throws SQLException { - return resultSet.getString(columnIndex); - } + private JdbcFieldTypeUtils() {} public static Boolean getBoolean(ResultSet resultSet, int columnIndex) throws SQLException { - if (null == resultSet.getObject(columnIndex)) { - return null; - } - return resultSet.getBoolean(columnIndex); + return getNullableValue(resultSet, columnIndex, ResultSet::getBoolean); } public static Byte getByte(ResultSet resultSet, int columnIndex) throws SQLException { - if (null == resultSet.getObject(columnIndex)) { - return null; - } - return resultSet.getByte(columnIndex); + return getNullableValue(resultSet, columnIndex, ResultSet::getByte); } public static Short getShort(ResultSet resultSet, int columnIndex) throws SQLException { - if (null == resultSet.getObject(columnIndex)) { - return null; - } - return resultSet.getShort(columnIndex); + return getNullableValue(resultSet, columnIndex, ResultSet::getShort); } public static Integer getInt(ResultSet resultSet, int columnIndex) throws SQLException { - if (null == resultSet.getObject(columnIndex)) { - return null; - } - return resultSet.getInt(columnIndex); + return getNullableValue(resultSet, columnIndex, ResultSet::getInt); } public static Long getLong(ResultSet resultSet, int columnIndex) throws SQLException { - if (null == resultSet.getObject(columnIndex)) { - return null; - } - return resultSet.getLong(columnIndex); + return getNullableValue(resultSet, columnIndex, ResultSet::getLong); } public static Float getFloat(ResultSet resultSet, int columnIndex) throws SQLException { - if (null == resultSet.getObject(columnIndex)) { - return null; - } - return resultSet.getFloat(columnIndex); + return getNullableValue(resultSet, columnIndex, ResultSet::getFloat); } public static Double getDouble(ResultSet resultSet, int columnIndex) throws SQLException { - if (null == resultSet.getObject(columnIndex)) { - return null; - } - return resultSet.getDouble(columnIndex); + return getNullableValue(resultSet, columnIndex, ResultSet::getDouble); + } + + public static String getString(ResultSet resultSet, int columnIndex) throws SQLException { + return resultSet.getString(columnIndex); } public static BigDecimal getBigDecimal(ResultSet resultSet, int columnIndex) @@ -98,9 +77,22 @@ public static Timestamp getTimestamp(ResultSet resultSet, int columnIndex) throw } public static byte[] getBytes(ResultSet resultSet, int columnIndex) throws SQLException { - if (null == resultSet.getObject(columnIndex)) { + return resultSet.getBytes(columnIndex); + } + + private static T getNullableValue( + ResultSet resultSet, + int columnIndex, + ThrowingFunction getter) + throws SQLException { + if (resultSet.getObject(columnIndex) == null) { return null; } - return resultSet.getBytes(columnIndex); + return getter.apply(resultSet, columnIndex); + } + + @FunctionalInterface + private interface ThrowingFunction { + R apply(T t, int columnIndex) throws E; } } From 809870a60ff0d852468e795b61c4dfd281bb7551 Mon Sep 17 00:00:00 2001 From: Jarvis Date: Wed, 27 Mar 2024 10:23:24 +0800 Subject: [PATCH 29/59] [Improve][RestAPI] return finished job info when job is finished (#6576) --- docs/en/seatunnel-engine/rest-api.md | 18 ++++-- .../seatunnel/engine/e2e/RestApiIT.java | 36 ++++++++++++ .../rest/RestHttpGetCommandProcessor.java | 56 +++++++++++++++++++ 3 files changed, 104 insertions(+), 6 deletions(-) diff --git a/docs/en/seatunnel-engine/rest-api.md b/docs/en/seatunnel-engine/rest-api.md index 4a56c7da7e2..3f1069b566b 100644 --- a/docs/en/seatunnel-engine/rest-api.md +++ b/docs/en/seatunnel-engine/rest-api.md @@ -92,8 +92,6 @@ network: "jobId": "", "jobName": "", "jobStatus": "", - "envOptions": { - }, "createTime": "", "jobDag": { "vertices": [ @@ -101,16 +99,24 @@ network: "edges": [ ] }, - "pluginJarsUrls": [ - ], - "isStartWithSavePoint": false, "metrics": { "sourceReceivedCount": "", "sinkWriteCount": "" - } + }, + "finishedTime": "", + "errorMsg": null, + "envOptions": { + }, + "pluginJarsUrls": [ + ], + "isStartWithSavePoint": false } ``` +`jobId`, `jobName`, `jobStatus`, `createTime`, `jobDag`, `metrics` always be returned. +`envOptions`, `pluginJarsUrls`, `isStartWithSavePoint` will return when job is running. +`finishedTime`, `errorMsg` will return when job is finished. + When we can't get the job info, the response will be: ```json diff --git a/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/RestApiIT.java b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/RestApiIT.java index 5427a8e1c2b..c7be274ad2a 100644 --- a/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/RestApiIT.java +++ b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/RestApiIT.java @@ -52,6 +52,8 @@ public class RestApiIT { private static ClientJobProxy clientJobProxy; + private static ClientJobProxy batchJobProxy; + private static HazelcastInstanceImpl node1; private static HazelcastInstanceImpl node2; @@ -85,6 +87,19 @@ void beforeClass() throws Exception { () -> Assertions.assertEquals( JobStatus.RUNNING, clientJobProxy.getJobStatus())); + + String batchFilePath = TestUtils.getResource("fakesource_to_console.conf"); + JobConfig batchConf = new JobConfig(); + batchConf.setName("fake_to_console"); + ClientJobExecutionEnvironment batchJobExecutionEnv = + engineClient.createExecutionContext(batchFilePath, batchConf, seaTunnelConfig); + batchJobProxy = batchJobExecutionEnv.execute(); + Awaitility.await() + .atMost(5, TimeUnit.MINUTES) + .untilAsserted( + () -> + Assertions.assertEquals( + JobStatus.FINISHED, batchJobProxy.getJobStatus())); } @Test @@ -108,6 +123,27 @@ public void testGetRunningJobById() { }); } + @Test + public void testGetJobById() { + Arrays.asList(node2, node1) + .forEach( + instance -> { + given().get( + HOST + + instance.getCluster() + .getLocalMember() + .getAddress() + .getPort() + + RestConstant.RUNNING_JOB_URL + + "/" + + batchJobProxy.getJobId()) + .then() + .statusCode(200) + .body("jobName", equalTo("fake_to_console")) + .body("jobStatus", equalTo("FINISHED")); + }); + } + @Test public void testGetAnNotExistJobById() { Arrays.asList(node2, node1) diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestHttpGetCommandProcessor.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestHttpGetCommandProcessor.java index 81a1047c749..79f29575a14 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestHttpGetCommandProcessor.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/rest/RestHttpGetCommandProcessor.java @@ -272,8 +272,36 @@ private void handleJobInfoById(HttpGetCommand command, String uri) { .getMap(Constant.IMAP_RUNNING_JOB_INFO) .get(Long.valueOf(jobId)); + JobState finishedJobState = + (JobState) + this.textCommandService + .getNode() + .getNodeEngine() + .getHazelcastInstance() + .getMap(Constant.IMAP_FINISHED_JOB_STATE) + .get(Long.valueOf(jobId)); if (!jobId.isEmpty() && jobInfo != null) { this.prepareResponse(command, convertToJson(jobInfo, Long.parseLong(jobId))); + } else if (!jobId.isEmpty() && finishedJobState != null) { + JobMetrics finishedJobMetrics = + (JobMetrics) + this.textCommandService + .getNode() + .getNodeEngine() + .getHazelcastInstance() + .getMap(Constant.IMAP_FINISHED_JOB_METRICS) + .get(Long.valueOf(jobId)); + JobDAGInfo finishedJobDAGInfo = + (JobDAGInfo) + this.textCommandService + .getNode() + .getNodeEngine() + .getHazelcastInstance() + .getMap(Constant.IMAP_FINISHED_JOB_VERTEX_INFO) + .get(Long.valueOf(jobId)); + this.prepareResponse( + command, + convertToJson(finishedJobState, finishedJobMetrics, finishedJobDAGInfo)); } else { this.prepareResponse(command, new JsonObject().add(RestConstant.JOB_ID, jobId)); } @@ -411,6 +439,34 @@ private JsonObject convertToJson(JobInfo jobInfo, long jobId) { return jobInfoJson; } + private JsonObject convertToJson( + JobState finishedJobState, + JobMetrics finishedJobMetrics, + JobDAGInfo finishedJobDAGInfo) { + JsonObject jobInfoJson = new JsonObject(); + jobInfoJson + .add(RestConstant.JOB_ID, String.valueOf(finishedJobState.getJobId())) + .add(RestConstant.JOB_NAME, finishedJobState.getJobName()) + .add(RestConstant.JOB_STATUS, finishedJobState.getJobStatus().toString()) + .add(RestConstant.ERROR_MSG, finishedJobState.getErrorMessage()) + .add( + RestConstant.CREATE_TIME, + new SimpleDateFormat("yyyy-MM-dd HH:mm:ss") + .format(new Date(finishedJobState.getSubmitTime()))) + .add( + RestConstant.FINISH_TIME, + new SimpleDateFormat("yyyy-MM-dd HH:mm:ss") + .format(new Date(finishedJobState.getFinishTime()))) + .add( + RestConstant.JOB_DAG, + Json.parse(JsonUtils.toJsonString(finishedJobDAGInfo)).asObject()) + .add(RestConstant.PLUGIN_JARS_URLS, new JsonArray()) + .add( + RestConstant.METRICS, + JsonUtil.toJsonObject(getJobMetrics(finishedJobMetrics.toJsonString()))); + return jobInfoJson; + } + private JsonObject convertToJson( JobState jobState, String jobMetrics, JsonObject jobDAGInfo, long jobId) { JsonObject jobInfoJson = new JsonObject(); From 2b1234c7ae885bb26540d3e98d75c623db5d556d Mon Sep 17 00:00:00 2001 From: dailai Date: Wed, 27 Mar 2024 10:23:50 +0800 Subject: [PATCH 30/59] [Feature][Paimon] Support specify paimon table write properties, partition keys and primary keys (#6535) --- docs/en/concept/schema-feature.md | 1 + docs/en/connector-v2/sink/Paimon.md | 53 +++- docs/zh/concept/schema-feature.md | 1 + docs/zh/connector-v2/sink/Paimon.md | 53 +++- .../catalog/schema/ReadonlyConfigParser.java | 8 +- .../catalog/schema/TableSchemaOptions.java | 6 + .../paimon/catalog/PaimonCatalog.java | 34 +-- .../paimon/catalog/PaimonCatalogFactory.java | 7 +- .../paimon/config/PaimonSinkConfig.java | 49 +++- .../paimon/data/PaimonTypeMapper.java | 5 +- .../paimon/sink/PaimonSinkFactory.java | 5 +- .../paimon/sink/PaimonSinkWriter.java | 17 +- .../seatunnel/paimon/utils/RowConverter.java | 29 +- .../paimon/utils/RowTypeConverter.java | 81 +++++- .../seatunnel/paimon/utils/SchemaUtil.java | 37 ++- .../paimon/utils/RowConverterTest.java | 57 +++- .../paimon/utils/RowTypeConverterTest.java | 57 +++- .../e2e/connector/paimon/PaimonSinkCDCIT.java | 260 +++++++++++++++++- .../resources/fake_cdc_sink_paimon_case1.conf | 2 +- .../resources/fake_cdc_sink_paimon_case3.conf | 93 +++++++ .../resources/fake_cdc_sink_paimon_case4.conf | 91 ++++++ .../resources/fake_cdc_sink_paimon_case5.conf | 93 +++++++ .../resources/fake_cdc_sink_paimon_case6.conf | 93 +++++++ .../resources/fake_cdc_sink_paimon_case7.conf | 127 +++++++++ 24 files changed, 1159 insertions(+), 100 deletions(-) create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case3.conf create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case4.conf create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case5.conf create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case6.conf create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case7.conf diff --git a/docs/en/concept/schema-feature.md b/docs/en/concept/schema-feature.md index 15f8186cce1..9ae2c3d39e2 100644 --- a/docs/en/concept/schema-feature.md +++ b/docs/en/concept/schema-feature.md @@ -64,6 +64,7 @@ columns = [ | type | Yes | - | The data type of the column | | nullable | No | true | If the column can be nullable | | columnLength | No | 0 | The length of the column which will be useful when you need to define the length | +| columnScale | No | - | The scale of the column which will be useful when you need to define the scale | | defaultValue | No | null | The default value of the column | | comment | No | null | The comment of the column | diff --git a/docs/en/connector-v2/sink/Paimon.md b/docs/en/connector-v2/sink/Paimon.md index 5e9d3c431f7..707a0dc0dbd 100644 --- a/docs/en/connector-v2/sink/Paimon.md +++ b/docs/en/connector-v2/sink/Paimon.md @@ -12,14 +12,17 @@ Sink connector for Apache Paimon. It can support cdc mode 、auto create table. ## Options -| name | type | required | default value | Description | -|------------------|--------|----------|------------------------------|---------------------------------| -| warehouse | String | Yes | - | Paimon warehouse path | -| database | String | Yes | - | The database you want to access | -| table | String | Yes | - | The table you want to access | -| hdfs_site_path | String | No | - | | -| schema_save_mode | Enum | no | CREATE_SCHEMA_WHEN_NOT_EXIST | The schema save mode | -| data_save_mode | Enum | no | APPEND_DATA | The data save mode | +| name | type | required | default value | Description | +|-----------------------------|--------|----------|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| warehouse | String | Yes | - | Paimon warehouse path | +| database | String | Yes | - | The database you want to access | +| table | String | Yes | - | The table you want to access | +| hdfs_site_path | String | No | - | | +| schema_save_mode | Enum | No | CREATE_SCHEMA_WHEN_NOT_EXIST | The schema save mode | +| data_save_mode | Enum | No | APPEND_DATA | The data save mode | +| paimon.table.primary-keys | String | No | - | Default comma-separated list of columns (primary key) that identify a row in tables.(Notice: The partition field needs to be included in the primary key fields) | +| paimon.table.partition-keys | String | No | - | Default comma-separated list of partition fields to use when creating tables. | +| paimon.table.write-props | Map | No | - | Properties passed through to paimon table initialization, [reference](https://paimon.apache.org/docs/0.6/maintenance/configurations/#coreoptions). | ## Examples @@ -54,6 +57,40 @@ sink { } ``` +### Single table with write props of paimon + +```hocon +env { + parallelism = 1 + job.mode = "STREAMING" + checkpoint.interval = 5000 +} + +source { + Mysql-CDC { + base-url = "jdbc:mysql://127.0.0.1:3306/seatunnel" + username = "root" + password = "******" + table-names = ["seatunnel.role"] + } +} + +sink { + Paimon { + catalog_name="seatunnel_test" + warehouse="file:///tmp/seatunnel/paimon/hadoop-sink/" + database="seatunnel" + table="role" + paimon.table.write-props = { + bucket = 2 + file.format = "parquet" + } + paimon.table.partition-keys = "dt" + paimon.table.primary-keys = "pk_id,dt" + } +} +``` + ### Multiple table ```hocon diff --git a/docs/zh/concept/schema-feature.md b/docs/zh/concept/schema-feature.md index cc69b6d83ea..adb40892980 100644 --- a/docs/zh/concept/schema-feature.md +++ b/docs/zh/concept/schema-feature.md @@ -64,6 +64,7 @@ columns = [ | type | Yes | - | 列的数据类型 | | nullable | No | true | 列是否可空 | | columnLength | No | 0 | 列的长度,当您需要定义长度时将很有用 | +| columnScale | No | - | 列的精度,当您需要定义精度时将很有用 | | defaultValue | No | null | 列的默认值 | | comment | No | null | 列的注释 | diff --git a/docs/zh/connector-v2/sink/Paimon.md b/docs/zh/connector-v2/sink/Paimon.md index b1b4baef9b1..306bc12b562 100644 --- a/docs/zh/connector-v2/sink/Paimon.md +++ b/docs/zh/connector-v2/sink/Paimon.md @@ -12,14 +12,17 @@ Apache Paimon数据连接器。支持cdc写以及自动建表。 ## 连接器选项 -| 名称 | 类型 | 是否必须 | 默认值 | 描述 | -|------------------|--------|------|------------------------------|--------------------| -| warehouse | String | Yes | - | Paimon warehouse路径 | -| database | String | Yes | - | 数据库名称 | -| table | String | Yes | - | 表名 | -| hdfs_site_path | String | No | - | | -| schema_save_mode | Enum | no | CREATE_SCHEMA_WHEN_NOT_EXIST | schema保存模式 | -| data_save_mode | Enum | no | APPEND_DATA | 数据保存模式 | +| 名称 | 类型 | 是否必须 | 默认值 | 描述 | +|-----------------------------|-----|------|------------------------------|---------------------------------------------------------------------------------------------------| +| warehouse | 字符串 | 是 | - | Paimon warehouse路径 | +| database | 字符串 | 是 | - | 数据库名称 | +| table | 字符串 | 是 | - | 表名 | +| hdfs_site_path | 字符串 | 否 | - | | +| schema_save_mode | 枚举 | 否 | CREATE_SCHEMA_WHEN_NOT_EXIST | Schema保存模式 | +| data_save_mode | 枚举 | 否 | APPEND_DATA | 数据保存模式 | +| paimon.table.primary-keys | 字符串 | 否 | - | 主键字段列表,联合主键使用逗号分隔(注意:分区字段需要包含在主键字段中) | +| paimon.table.partition-keys | 字符串 | 否 | - | 分区字段列表,多字段使用逗号分隔 | +| paimon.table.write-props | Map | 否 | - | Paimon表初始化指定的属性, [参考](https://paimon.apache.org/docs/0.6/maintenance/configurations/#coreoptions) | ## 示例 @@ -54,6 +57,40 @@ sink { } ``` +### 指定paimon的写属性的单表 + +```hocon +env { + parallelism = 1 + job.mode = "STREAMING" + checkpoint.interval = 5000 +} + +source { + Mysql-CDC { + base-url = "jdbc:mysql://127.0.0.1:3306/seatunnel" + username = "root" + password = "******" + table-names = ["seatunnel.role"] + } +} + +sink { + Paimon { + catalog_name="seatunnel_test" + warehouse="file:///tmp/seatunnel/paimon/hadoop-sink/" + database="seatunnel" + table="role" + paimon.table.write-props = { + bucket = 2 + file.format = "parquet" + } + paimon.table.partition-keys = "dt" + paimon.table.primary-keys = "pk_id,dt" + } +} +``` + ### 多表 ```hocon diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/schema/ReadonlyConfigParser.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/schema/ReadonlyConfigParser.java index bac7f7b7a86..e043c0ecd72 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/schema/ReadonlyConfigParser.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/schema/ReadonlyConfigParser.java @@ -133,6 +133,11 @@ public List parse(ReadonlyConfig schemaConfig) { Integer columnLength = columnConfig.get( TableSchemaOptions.ColumnOptions.COLUMN_LENGTH); + + Integer columnScale = + columnConfig.get( + TableSchemaOptions.ColumnOptions.COLUMN_SCALE); + Boolean nullable = columnConfig.get(TableSchemaOptions.ColumnOptions.NULLABLE); Object defaultValue = @@ -143,7 +148,8 @@ public List parse(ReadonlyConfig schemaConfig) { return PhysicalColumn.of( name, seaTunnelDataType, - columnLength, + Long.valueOf(columnLength), + columnScale, nullable, defaultValue, comment); diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/schema/TableSchemaOptions.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/schema/TableSchemaOptions.java index 492fe1909c0..9ede187ea96 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/schema/TableSchemaOptions.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/schema/TableSchemaOptions.java @@ -86,6 +86,12 @@ public static class ColumnOptions { .noDefaultValue() .withDescription("SeaTunnel Schema Column Type"); + public static final Option COLUMN_SCALE = + Options.key("columnScale") + .intType() + .noDefaultValue() + .withDescription("SeaTunnel Schema Column scale"); + public static final Option COLUMN_LENGTH = Options.key("columnLength") .intType() diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalog.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalog.java index 7312ed28b06..8d3395af3c4 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalog.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalog.java @@ -17,10 +17,9 @@ package org.apache.seatunnel.connectors.seatunnel.paimon.catalog; -import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.table.catalog.Catalog; import org.apache.seatunnel.api.table.catalog.CatalogTable; -import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.catalog.Column; import org.apache.seatunnel.api.table.catalog.TablePath; import org.apache.seatunnel.api.table.catalog.TableSchema; import org.apache.seatunnel.api.table.catalog.exception.CatalogException; @@ -28,11 +27,11 @@ import org.apache.seatunnel.api.table.catalog.exception.DatabaseNotExistException; import org.apache.seatunnel.api.table.catalog.exception.TableAlreadyExistException; import org.apache.seatunnel.api.table.catalog.exception.TableNotExistException; -import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.connectors.seatunnel.paimon.config.PaimonSinkConfig; import org.apache.seatunnel.connectors.seatunnel.paimon.utils.SchemaUtil; import org.apache.paimon.catalog.Identifier; +import org.apache.paimon.schema.Schema; import org.apache.paimon.table.FileStoreTable; import org.apache.paimon.table.Table; import org.apache.paimon.types.DataField; @@ -48,14 +47,14 @@ public class PaimonCatalog implements Catalog, PaimonTable { private static final String DEFAULT_DATABASE = "default"; private String catalogName; - private ReadonlyConfig readonlyConfig; + private PaimonSinkConfig paimonSinkConfig; private PaimonCatalogLoader paimonCatalogLoader; private org.apache.paimon.catalog.Catalog catalog; - public PaimonCatalog(String catalogName, ReadonlyConfig readonlyConfig) { - this.readonlyConfig = readonlyConfig; + public PaimonCatalog(String catalogName, PaimonSinkConfig paimonSinkConfig) { + this.paimonSinkConfig = paimonSinkConfig; this.catalogName = catalogName; - this.paimonCatalogLoader = new PaimonCatalogLoader(new PaimonSinkConfig(readonlyConfig)); + this.paimonCatalogLoader = new PaimonCatalogLoader(paimonSinkConfig); } @Override @@ -135,10 +134,9 @@ public Table getPaimonTable(TablePath tablePath) public void createTable(TablePath tablePath, CatalogTable table, boolean ignoreIfExists) throws TableAlreadyExistException, DatabaseNotExistException, CatalogException { try { - catalog.createTable( - toIdentifier(tablePath), - SchemaUtil.toPaimonSchema(table.getTableSchema()), - ignoreIfExists); + Schema paimonSchema = + SchemaUtil.toPaimonSchema(table.getTableSchema(), this.paimonSinkConfig); + catalog.createTable(toIdentifier(tablePath), paimonSchema, ignoreIfExists); } catch (org.apache.paimon.catalog.Catalog.TableAlreadyExistException e) { throw new TableAlreadyExistException(this.catalogName, tablePath); } catch (org.apache.paimon.catalog.Catalog.DatabaseNotExistException e) { @@ -183,18 +181,8 @@ private CatalogTable toCatalogTable( TableSchema.Builder builder = TableSchema.builder(); dataFields.forEach( dataField -> { - String name = dataField.name(); - SeaTunnelDataType seaTunnelType = - SchemaUtil.toSeaTunnelType(dataField.type()); - PhysicalColumn physicalColumn = - PhysicalColumn.of( - name, - seaTunnelType, - (Long) null, - true, - null, - dataField.description()); - builder.column(physicalColumn); + Column column = SchemaUtil.toSeaTunnelType(dataField.type()); + builder.column(column); }); List partitionKeys = schema.partitionKeys(); diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalogFactory.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalogFactory.java index 4d94f385d9f..b8c8eb10880 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalogFactory.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/catalog/PaimonCatalogFactory.java @@ -30,7 +30,7 @@ public class PaimonCatalogFactory implements CatalogFactory { @Override public Catalog createCatalog(String catalogName, ReadonlyConfig readonlyConfig) { - return new PaimonCatalog(catalogName, readonlyConfig); + return new PaimonCatalog(catalogName, new PaimonSinkConfig(readonlyConfig)); } @Override @@ -48,7 +48,10 @@ public OptionRule optionRule() { .optional( PaimonSinkConfig.HDFS_SITE_PATH, PaimonSinkConfig.SCHEMA_SAVE_MODE, - PaimonSinkConfig.DATA_SAVE_MODE) + PaimonSinkConfig.DATA_SAVE_MODE, + PaimonSinkConfig.PRIMARY_KEYS, + PaimonSinkConfig.PARTITION_KEYS, + PaimonSinkConfig.WRITE_PROPS) .build(); } } diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/config/PaimonSinkConfig.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/config/PaimonSinkConfig.java index 589fd948167..d369c74bca9 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/config/PaimonSinkConfig.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/config/PaimonSinkConfig.java @@ -17,17 +17,26 @@ package org.apache.seatunnel.connectors.seatunnel.paimon.config; +import org.apache.seatunnel.shade.com.google.common.annotations.VisibleForTesting; +import org.apache.seatunnel.shade.com.google.common.collect.ImmutableList; + import org.apache.seatunnel.api.configuration.Option; import org.apache.seatunnel.api.configuration.Options; import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.sink.DataSaveMode; import org.apache.seatunnel.api.sink.SchemaSaveMode; -import lombok.Getter; +import lombok.Data; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import static java.util.stream.Collectors.toList; import static org.apache.seatunnel.shade.com.google.common.base.Preconditions.checkNotNull; -@Getter +@Data public class PaimonSinkConfig extends PaimonConfig { public static final Option SCHEMA_SAVE_MODE = Options.key("schema_save_mode") @@ -41,6 +50,27 @@ public class PaimonSinkConfig extends PaimonConfig { .defaultValue(DataSaveMode.APPEND_DATA) .withDescription("data_save_mode"); + public static final Option PRIMARY_KEYS = + Options.key("paimon.table.primary-keys") + .stringType() + .noDefaultValue() + .withDescription( + "Default comma-separated list of columns that identify a row in tables (primary key)"); + + public static final Option PARTITION_KEYS = + Options.key("paimon.table.partition-keys") + .stringType() + .noDefaultValue() + .withDescription( + "Default comma-separated list of partition fields to use when creating tables."); + + public static final Option> WRITE_PROPS = + Options.key("paimon.table.write-props") + .mapType() + .defaultValue(new HashMap<>()) + .withDescription( + "Properties passed through to paimon table initialization, such as 'file.format', 'bucket'(org.apache.paimon.CoreOptions)"); + private String catalogName; private String warehouse; private String namespace; @@ -48,6 +78,10 @@ public class PaimonSinkConfig extends PaimonConfig { private String hdfsSitePath; private SchemaSaveMode schemaSaveMode; private DataSaveMode dataSaveMode; + private Integer bucket; + private List primaryKeys; + private List partitionKeys; + private Map writeProps; public PaimonSinkConfig(ReadonlyConfig readonlyConfig) { this.catalogName = checkArgumentNotNull(readonlyConfig.get(CATALOG_NAME)); @@ -57,10 +91,21 @@ public PaimonSinkConfig(ReadonlyConfig readonlyConfig) { this.hdfsSitePath = readonlyConfig.get(HDFS_SITE_PATH); this.schemaSaveMode = readonlyConfig.get(SCHEMA_SAVE_MODE); this.dataSaveMode = readonlyConfig.get(DATA_SAVE_MODE); + this.primaryKeys = stringToList(readonlyConfig.get(PRIMARY_KEYS), ","); + this.partitionKeys = stringToList(readonlyConfig.get(PARTITION_KEYS), ","); + this.writeProps = readonlyConfig.get(WRITE_PROPS); } protected T checkArgumentNotNull(T argument) { checkNotNull(argument); return argument; } + + @VisibleForTesting + public static List stringToList(String value, String regex) { + if (value == null || value.isEmpty()) { + return ImmutableList.of(); + } + return Arrays.stream(value.split(regex)).map(String::trim).collect(toList()); + } } diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/data/PaimonTypeMapper.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/data/PaimonTypeMapper.java index 1f8b1cff32f..cbf512f61dd 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/data/PaimonTypeMapper.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/data/PaimonTypeMapper.java @@ -18,7 +18,6 @@ package org.apache.seatunnel.connectors.seatunnel.paimon.data; import org.apache.seatunnel.api.table.catalog.Column; -import org.apache.seatunnel.api.table.catalog.PhysicalColumn; import org.apache.seatunnel.api.table.converter.TypeConverter; import org.apache.seatunnel.connectors.seatunnel.paimon.sink.PaimonSink; import org.apache.seatunnel.connectors.seatunnel.paimon.utils.RowTypeConverter; @@ -40,11 +39,11 @@ public String identifier() { @Override public Column convert(DataType dataType) { - return PhysicalColumn.builder().dataType(RowTypeConverter.convert(dataType)).build(); + return RowTypeConverter.convert(dataType); } @Override public DataType reconvert(Column column) { - return RowTypeConverter.reconvert(column.getDataType()); + return RowTypeConverter.reconvert(column); } } diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkFactory.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkFactory.java index c0b4d997ead..2f5b316dd56 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkFactory.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkFactory.java @@ -53,7 +53,10 @@ public OptionRule optionRule() { .optional( PaimonConfig.HDFS_SITE_PATH, PaimonSinkConfig.SCHEMA_SAVE_MODE, - PaimonSinkConfig.DATA_SAVE_MODE) + PaimonSinkConfig.DATA_SAVE_MODE, + PaimonSinkConfig.PRIMARY_KEYS, + PaimonSinkConfig.PARTITION_KEYS, + PaimonSinkConfig.WRITE_PROPS) .build(); } diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkWriter.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkWriter.java index 7b2e8327a99..a858c3ee7f1 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkWriter.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/sink/PaimonSinkWriter.java @@ -30,6 +30,8 @@ import org.apache.seatunnel.connectors.seatunnel.paimon.utils.RowConverter; import org.apache.paimon.data.InternalRow; +import org.apache.paimon.schema.TableSchema; +import org.apache.paimon.table.FileStoreTable; import org.apache.paimon.table.Table; import org.apache.paimon.table.sink.BatchTableCommit; import org.apache.paimon.table.sink.BatchTableWrite; @@ -74,6 +76,8 @@ public class PaimonSinkWriter private final JobContext jobContext; + private TableSchema tableSchema; + public PaimonSinkWriter( Context context, Table table, @@ -88,6 +92,7 @@ public PaimonSinkWriter( this.seaTunnelRowType = seaTunnelRowType; this.context = context; this.jobContext = jobContext; + this.tableSchema = ((FileStoreTable) table).schema(); } public PaimonSinkWriter( @@ -96,15 +101,7 @@ public PaimonSinkWriter( SeaTunnelRowType seaTunnelRowType, List states, JobContext jobContext) { - this.table = table; - this.tableWriteBuilder = - JobContextUtil.isBatchJob(jobContext) - ? this.table.newBatchWriteBuilder().withOverwrite() - : this.table.newStreamWriteBuilder(); - this.tableWrite = tableWriteBuilder.newWrite(); - this.seaTunnelRowType = seaTunnelRowType; - this.context = context; - this.jobContext = jobContext; + this(context, table, seaTunnelRowType, jobContext); if (Objects.isNull(states) || states.isEmpty()) { return; } @@ -132,7 +129,7 @@ public PaimonSinkWriter( @Override public void write(SeaTunnelRow element) throws IOException { - InternalRow rowData = RowConverter.convert(element, seaTunnelRowType); + InternalRow rowData = RowConverter.reconvert(element, seaTunnelRowType, tableSchema); try { tableWrite.write(rowData); } catch (Exception e) { diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowConverter.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowConverter.java index 6b9a6bf01c5..fe1c24da80a 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowConverter.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowConverter.java @@ -41,15 +41,19 @@ import org.apache.paimon.data.serializer.InternalArraySerializer; import org.apache.paimon.data.serializer.InternalMapSerializer; import org.apache.paimon.data.serializer.InternalRowSerializer; +import org.apache.paimon.schema.TableSchema; +import org.apache.paimon.types.DataField; import org.apache.paimon.types.DataType; import org.apache.paimon.types.DataTypes; import org.apache.paimon.types.RowType; +import org.apache.paimon.types.TimestampType; import java.math.BigDecimal; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.util.HashMap; +import java.util.List; import java.util.Map; /** The converter for converting {@link InternalRow} and {@link SeaTunnelRow} */ @@ -129,7 +133,7 @@ public static Object convert(InternalArray array, SeaTunnelDataType dataType) * @param dataType SeaTunnel array data type * @return Paimon array object {@link BinaryArray} */ - public static BinaryArray convert(Object array, SeaTunnelDataType dataType) { + public static BinaryArray reconvert(Object array, SeaTunnelDataType dataType) { int length = ((Object[]) array).length; BinaryArray binaryArray = new BinaryArray(); BinaryArrayWriter binaryArrayWriter; @@ -327,10 +331,12 @@ public static SeaTunnelRow convert(InternalRow rowData, SeaTunnelRowType seaTunn * * @param seaTunnelRow SeaTunnel row object * @param seaTunnelRowType SeaTunnel row type + * @param tableSchema Paimon table schema * @return Paimon row object */ - public static InternalRow convert( - SeaTunnelRow seaTunnelRow, SeaTunnelRowType seaTunnelRowType) { + public static InternalRow reconvert( + SeaTunnelRow seaTunnelRow, SeaTunnelRowType seaTunnelRowType, TableSchema tableSchema) { + List fields = tableSchema.fields(); BinaryRow binaryRow = new BinaryRow(seaTunnelRowType.getTotalFields()); BinaryWriter binaryWriter = new BinaryRowWriter(binaryRow); // Convert SeaTunnel RowKind to Paimon RowKind @@ -390,8 +396,12 @@ public static InternalRow convert( i, Timestamp.fromLocalDateTime(date.atTime(time)), 3); break; case TIMESTAMP: + String fieldName = seaTunnelRowType.getFieldName(i); + DataField dataField = SchemaUtil.getDataField(fields, fieldName); + int precision = ((TimestampType) dataField.type()).getPrecision(); LocalDateTime datetime = (LocalDateTime) seaTunnelRow.getField(i); - binaryWriter.writeTimestamp(i, Timestamp.fromLocalDateTime(datetime), 9); + binaryWriter.writeTimestamp( + i, Timestamp.fromLocalDateTime(datetime), precision); break; case MAP: MapType mapType = (MapType) seaTunnelRowType.getFieldType(i); @@ -404,13 +414,14 @@ public static InternalRow convert( Object[] values = field.values().toArray(new Object[0]); binaryWriter.writeMap( i, - BinaryMap.valueOf(convert(keys, keyType), convert(values, valueType)), + BinaryMap.valueOf( + reconvert(keys, keyType), reconvert(values, valueType)), new InternalMapSerializer(paimonKeyType, paimonValueType)); break; case ARRAY: ArrayType arrayType = (ArrayType) seaTunnelRowType.getFieldType(i); BinaryArray paimonArray = - convert(seaTunnelRow.getField(i), arrayType.getElementType()); + reconvert(seaTunnelRow.getField(i), arrayType.getElementType()); binaryWriter.writeArray( i, paimonArray, @@ -420,8 +431,10 @@ public static InternalRow convert( case ROW: SeaTunnelDataType rowType = seaTunnelRowType.getFieldType(i); Object row = seaTunnelRow.getField(i); - InternalRow paimonRow = convert((SeaTunnelRow) row, (SeaTunnelRowType) rowType); - RowType paimonRowType = RowTypeConverter.reconvert((SeaTunnelRowType) rowType); + InternalRow paimonRow = + reconvert((SeaTunnelRow) row, (SeaTunnelRowType) rowType, tableSchema); + RowType paimonRowType = + RowTypeConverter.reconvert((SeaTunnelRowType) rowType, tableSchema); binaryWriter.writeRow(i, paimonRow, new InternalRowSerializer(paimonRowType)); break; default: diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowTypeConverter.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowTypeConverter.java index 16863ebff5f..5a4f5dbe8a6 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowTypeConverter.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowTypeConverter.java @@ -17,6 +17,8 @@ package org.apache.seatunnel.connectors.seatunnel.paimon.utils; +import org.apache.seatunnel.api.table.catalog.Column; +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; import org.apache.seatunnel.api.table.type.BasicType; import org.apache.seatunnel.api.table.type.LocalTimeType; import org.apache.seatunnel.api.table.type.PrimitiveByteArrayType; @@ -25,6 +27,7 @@ import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; import org.apache.seatunnel.connectors.seatunnel.paimon.exception.PaimonConnectorException; +import org.apache.paimon.schema.TableSchema; import org.apache.paimon.types.ArrayType; import org.apache.paimon.types.BigIntType; import org.apache.paimon.types.BinaryType; @@ -33,6 +36,7 @@ import org.apache.paimon.types.DataField; import org.apache.paimon.types.DataType; import org.apache.paimon.types.DataTypeDefaultVisitor; +import org.apache.paimon.types.DataTypeRoot; import org.apache.paimon.types.DataTypes; import org.apache.paimon.types.DateType; import org.apache.paimon.types.DecimalType; @@ -50,6 +54,8 @@ import org.apache.paimon.types.VarCharType; import java.util.Arrays; +import java.util.List; +import java.util.Objects; /** The converter for converting {@link RowType} and {@link SeaTunnelRowType} */ public class RowTypeConverter { @@ -77,16 +83,21 @@ public static SeaTunnelRowType convert(RowType rowType) { * @param dataType Paimon data type * @return SeaTunnel data type {@link SeaTunnelDataType} */ - public static SeaTunnelDataType convert(DataType dataType) { + public static Column convert(DataType dataType) { + PhysicalColumn.PhysicalColumnBuilder physicalColumnBuilder = PhysicalColumn.builder(); SeaTunnelDataType seaTunnelDataType; PaimonToSeaTunnelTypeVisitor paimonToSeaTunnelTypeVisitor = PaimonToSeaTunnelTypeVisitor.INSTANCE; switch (dataType.getTypeRoot()) { case CHAR: - seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((CharType) dataType); + CharType charType = (CharType) dataType; + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit(charType); + physicalColumnBuilder.columnLength((long) charType.getLength()); break; case VARCHAR: - seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((VarCharType) dataType); + VarCharType varCharType = (VarCharType) dataType; + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit(varCharType); + physicalColumnBuilder.columnLength((long) varCharType.getLength()); break; case BOOLEAN: seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((BooleanType) dataType); @@ -95,10 +106,15 @@ public static SeaTunnelDataType convert(DataType dataType) { seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((BinaryType) dataType); break; case VARBINARY: - seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((VarBinaryType) dataType); + VarBinaryType varBinaryType = (VarBinaryType) dataType; + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit(varBinaryType); + physicalColumnBuilder.columnLength((long) varBinaryType.getLength()); break; case DECIMAL: - seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((DecimalType) dataType); + DecimalType decimalType = (DecimalType) dataType; + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit(decimalType); + physicalColumnBuilder.columnLength((long) decimalType.getPrecision()); + physicalColumnBuilder.scale(decimalType.getScale()); break; case TINYINT: seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((TinyIntType) dataType); @@ -122,14 +138,21 @@ public static SeaTunnelDataType convert(DataType dataType) { seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((DateType) dataType); break; case TIME_WITHOUT_TIME_ZONE: - seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((TimeType) dataType); + TimeType timeType = (TimeType) dataType; + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit(timeType); + physicalColumnBuilder.scale(timeType.getPrecision()); break; case TIMESTAMP_WITHOUT_TIME_ZONE: - seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((TimestampType) dataType); + TimestampType timestampType = (TimestampType) dataType; + seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit(timestampType); + physicalColumnBuilder.scale(timestampType.getPrecision()); break; case TIMESTAMP_WITH_LOCAL_TIME_ZONE: + LocalZonedTimestampType localZonedTimestampType = + (LocalZonedTimestampType) dataType; seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((LocalZonedTimestampType) dataType); + physicalColumnBuilder.scale(localZonedTimestampType.getPrecision()); break; case ARRAY: seaTunnelDataType = paimonToSeaTunnelTypeVisitor.visit((ArrayType) dataType); @@ -148,7 +171,7 @@ public static SeaTunnelDataType convert(DataType dataType) { throw new PaimonConnectorException( CommonErrorCodeDeprecated.UNSUPPORTED_DATA_TYPE, errorMsg); } - return seaTunnelDataType; + return physicalColumnBuilder.dataType(seaTunnelDataType).build(); } /** @@ -157,20 +180,39 @@ public static SeaTunnelDataType convert(DataType dataType) { * @param seaTunnelRowType SeaTunnel row type {@link SeaTunnelRowType} * @return Paimon row type {@link RowType} */ - public static RowType reconvert(SeaTunnelRowType seaTunnelRowType) { + public static RowType reconvert(SeaTunnelRowType seaTunnelRowType, TableSchema tableSchema) { SeaTunnelDataType[] fieldTypes = seaTunnelRowType.getFieldTypes(); + List fields = tableSchema.fields(); DataType[] dataTypes = Arrays.stream(fieldTypes) .map(SeaTunnelTypeToPaimonVisitor.INSTANCE::visit) .toArray(DataType[]::new); DataField[] dataFields = new DataField[dataTypes.length]; for (int i = 0; i < dataTypes.length; i++) { - DataField dataField = new DataField(i, seaTunnelRowType.getFieldName(i), dataTypes[i]); + DataType dataType = dataTypes[i]; + DataTypeRoot typeRoot = dataType.getTypeRoot(); + String fieldName = seaTunnelRowType.getFieldName(i); + if (typeRoot.equals(DataTypeRoot.TIMESTAMP_WITHOUT_TIME_ZONE) + || typeRoot.equals(DataTypeRoot.TIMESTAMP_WITH_LOCAL_TIME_ZONE)) { + DataField dataField = SchemaUtil.getDataField(fields, fieldName); + dataType = new TimestampType(((TimestampType) dataField.type()).getPrecision()); + } + DataField dataField = new DataField(i, fieldName, dataType); dataFields[i] = dataField; } return DataTypes.ROW(dataFields); } + /** + * Mapping SeaTunnel data type of column {@link Column} to Paimon data type {@link DataType} + * + * @param column SeaTunnel data type {@link Column} + * @return Paimon data type {@link DataType} + */ + public static DataType reconvert(Column column) { + return SeaTunnelTypeToPaimonVisitor.INSTANCE.visit(column); + } + /** * Mapping SeaTunnel data type {@link SeaTunnelDataType} to Paimon data type {@link DataType} * @@ -192,6 +234,21 @@ private static class SeaTunnelTypeToPaimonVisitor { private SeaTunnelTypeToPaimonVisitor() {} + public DataType visit(Column column) { + SeaTunnelDataType dataType = column.getDataType(); + Integer scale = column.getScale(); + switch (dataType.getSqlType()) { + case TIMESTAMP: + return DataTypes.TIMESTAMP( + Objects.isNull(scale) ? TimestampType.DEFAULT_PRECISION : scale); + case TIME: + return DataTypes.TIME( + Objects.isNull(scale) ? TimeType.DEFAULT_PRECISION : scale); + default: + return visit(dataType); + } + } + public DataType visit(SeaTunnelDataType dataType) { switch (dataType.getSqlType()) { case TINYINT: @@ -220,8 +277,10 @@ public DataType visit(SeaTunnelDataType dataType) { return DataTypes.BOOLEAN(); case DATE: return DataTypes.DATE(); + case TIME: + return DataTypes.TIME(TimeType.MAX_PRECISION); case TIMESTAMP: - return DataTypes.TIMESTAMP(6); + return DataTypes.TIMESTAMP(TimestampType.MAX_PRECISION); case MAP: SeaTunnelDataType keyType = ((org.apache.seatunnel.api.table.type.MapType) dataType) diff --git a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/SchemaUtil.java b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/SchemaUtil.java index c03a77149c9..65129dc8b74 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/SchemaUtil.java +++ b/seatunnel-connectors-v2/connector-paimon/src/main/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/SchemaUtil.java @@ -18,14 +18,16 @@ package org.apache.seatunnel.connectors.seatunnel.paimon.utils; import org.apache.seatunnel.api.table.catalog.Column; -import org.apache.seatunnel.api.table.catalog.PrimaryKey; import org.apache.seatunnel.api.table.catalog.TableSchema; -import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.connectors.seatunnel.paimon.config.PaimonSinkConfig; import org.apache.seatunnel.connectors.seatunnel.paimon.data.PaimonTypeMapper; import org.apache.paimon.schema.Schema; +import org.apache.paimon.types.DataField; import org.apache.paimon.types.DataType; +import java.util.List; +import java.util.Map; import java.util.Objects; /** The util seatunnel schema to paimon schema */ @@ -35,20 +37,39 @@ public static DataType toPaimonType(Column column) { return PaimonTypeMapper.INSTANCE.reconvert(column); } - public static Schema toPaimonSchema(TableSchema tableSchema) { + public static Schema toPaimonSchema( + TableSchema tableSchema, PaimonSinkConfig paimonSinkConfig) { Schema.Builder paiSchemaBuilder = Schema.newBuilder(); for (int i = 0; i < tableSchema.getColumns().size(); i++) { Column column = tableSchema.getColumns().get(i); paiSchemaBuilder.column(column.getName(), toPaimonType(column)); } - PrimaryKey primaryKey = tableSchema.getPrimaryKey(); - if (Objects.nonNull(primaryKey) && primaryKey.getColumnNames().size() > 0) { - paiSchemaBuilder.primaryKey(primaryKey.getColumnNames()); + List primaryKeys = paimonSinkConfig.getPrimaryKeys(); + if (primaryKeys.isEmpty() && Objects.nonNull(tableSchema.getPrimaryKey())) { + primaryKeys = tableSchema.getPrimaryKey().getColumnNames(); + } + if (!primaryKeys.isEmpty()) { + paiSchemaBuilder.primaryKey(primaryKeys); + } + List partitionKeys = paimonSinkConfig.getPartitionKeys(); + if (!partitionKeys.isEmpty()) { + paiSchemaBuilder.partitionKeys(partitionKeys); + } + Map writeProps = paimonSinkConfig.getWriteProps(); + if (!writeProps.isEmpty()) { + paiSchemaBuilder.options(writeProps); } return paiSchemaBuilder.build(); } - public static SeaTunnelDataType toSeaTunnelType(DataType dataType) { - return PaimonTypeMapper.INSTANCE.convert(dataType).getDataType(); + public static Column toSeaTunnelType(DataType dataType) { + return PaimonTypeMapper.INSTANCE.convert(dataType); + } + + public static DataField getDataField(List fields, String fieldName) { + return fields.parallelStream() + .filter(field -> field.name().equals(fieldName)) + .findFirst() + .get(); } } diff --git a/seatunnel-connectors-v2/connector-paimon/src/test/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowConverterTest.java b/seatunnel-connectors-v2/connector-paimon/src/test/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowConverterTest.java index fcb9090a576..eec61aea6df 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/test/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowConverterTest.java +++ b/seatunnel-connectors-v2/connector-paimon/src/test/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowConverterTest.java @@ -38,7 +38,10 @@ import org.apache.paimon.data.Timestamp; import org.apache.paimon.data.serializer.InternalArraySerializer; import org.apache.paimon.data.serializer.InternalMapSerializer; +import org.apache.paimon.schema.TableSchema; +import org.apache.paimon.types.DataType; import org.apache.paimon.types.DataTypes; +import org.apache.paimon.types.RowType; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; @@ -48,7 +51,10 @@ import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; /** Unit tests for {@link RowConverter} */ @@ -60,6 +66,45 @@ public class RowConverterTest { private SeaTunnelRowType seaTunnelRowType; + private TableSchema tableSchema; + + public static final RowType DEFAULT_ROW_TYPE = + RowType.of( + new DataType[] { + DataTypes.TINYINT(), + DataTypes.SMALLINT(), + DataTypes.INT(), + DataTypes.BIGINT(), + DataTypes.FLOAT(), + DataTypes.DOUBLE(), + DataTypes.DECIMAL(10, 10), + DataTypes.STRING(), + DataTypes.BYTES(), + DataTypes.BOOLEAN(), + DataTypes.DATE(), + DataTypes.TIMESTAMP(), + DataTypes.MAP(DataTypes.STRING(), DataTypes.STRING()), + DataTypes.ARRAY(DataTypes.STRING()) + }, + new String[] { + "c_tinyint", + "c_smallint", + "c_int", + "c_bigint", + "c_float", + "c_double", + "c_decimal", + "c_string", + "c_bytes", + "c_boolean", + "c_date", + "c_timestamp", + "c_map", + "c_array" + }); + + public static final List KEY_NAME_LIST = Arrays.asList("c_tinyint"); + @BeforeEach public void before() { seaTunnelRowType = @@ -171,11 +216,21 @@ public void before() { binaryRowWriter.writeArray( 13, binaryArray2, new InternalArraySerializer(DataTypes.STRING())); internalRow = binaryRow; + + tableSchema = + new TableSchema( + 0, + TableSchema.newFields(DEFAULT_ROW_TYPE), + DEFAULT_ROW_TYPE.getFieldCount(), + Collections.EMPTY_LIST, + KEY_NAME_LIST, + Collections.EMPTY_MAP, + ""); } @Test public void seaTunnelToPaimon() { - InternalRow convert = RowConverter.convert(seaTunnelRow, seaTunnelRowType); + InternalRow convert = RowConverter.reconvert(seaTunnelRow, seaTunnelRowType, tableSchema); Assertions.assertEquals(convert, internalRow); } diff --git a/seatunnel-connectors-v2/connector-paimon/src/test/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowTypeConverterTest.java b/seatunnel-connectors-v2/connector-paimon/src/test/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowTypeConverterTest.java index f828be06505..5e614aeda53 100644 --- a/seatunnel-connectors-v2/connector-paimon/src/test/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowTypeConverterTest.java +++ b/seatunnel-connectors-v2/connector-paimon/src/test/java/org/apache/seatunnel/connectors/seatunnel/paimon/utils/RowTypeConverterTest.java @@ -26,7 +26,9 @@ import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; +import org.apache.paimon.schema.TableSchema; import org.apache.paimon.types.DataField; +import org.apache.paimon.types.DataType; import org.apache.paimon.types.DataTypes; import org.apache.paimon.types.RowType; @@ -34,12 +36,55 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + public class RowTypeConverterTest { private SeaTunnelRowType seaTunnelRowType; private RowType rowType; + private TableSchema tableSchema; + + public static final RowType DEFAULT_ROW_TYPE = + RowType.of( + new DataType[] { + DataTypes.TINYINT(), + DataTypes.SMALLINT(), + DataTypes.INT(), + DataTypes.BIGINT(), + DataTypes.FLOAT(), + DataTypes.DOUBLE(), + DataTypes.DECIMAL(10, 10), + DataTypes.STRING(), + DataTypes.BYTES(), + DataTypes.BOOLEAN(), + DataTypes.DATE(), + DataTypes.TIMESTAMP(), + DataTypes.MAP(DataTypes.STRING(), DataTypes.STRING()), + DataTypes.ARRAY(DataTypes.STRING()) + }, + new String[] { + "c_tinyint", + "c_smallint", + "c_int", + "c_bigint", + "c_float", + "c_double", + "c_decimal", + "c_string", + "c_bytes", + "c_boolean", + "c_date", + "c_timestamp", + "c_map", + "c_array" + }); + + public static final List KEY_NAME_LIST = Arrays.asList("c_tinyint"); + @BeforeEach public void before() { seaTunnelRowType = @@ -93,6 +138,16 @@ public void before() { new DataField( 12, "c_map", DataTypes.MAP(DataTypes.STRING(), DataTypes.STRING())), new DataField(13, "c_array", DataTypes.ARRAY(DataTypes.STRING()))); + + tableSchema = + new TableSchema( + 0, + TableSchema.newFields(DEFAULT_ROW_TYPE), + DEFAULT_ROW_TYPE.getFieldCount(), + Collections.EMPTY_LIST, + KEY_NAME_LIST, + Collections.EMPTY_MAP, + ""); } @Test @@ -103,7 +158,7 @@ public void paimonToSeaTunnel() { @Test public void seaTunnelToPaimon() { - RowType convert = RowTypeConverter.reconvert(seaTunnelRowType); + RowType convert = RowTypeConverter.reconvert(seaTunnelRowType, tableSchema); Assertions.assertEquals(convert, rowType); } } diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/java/org/apache/seatunnel/e2e/connector/paimon/PaimonSinkCDCIT.java b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/java/org/apache/seatunnel/e2e/connector/paimon/PaimonSinkCDCIT.java index a960f7d4d37..d2d88c1dbcc 100644 --- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/java/org/apache/seatunnel/e2e/connector/paimon/PaimonSinkCDCIT.java +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/java/org/apache/seatunnel/e2e/connector/paimon/PaimonSinkCDCIT.java @@ -25,17 +25,23 @@ import org.apache.seatunnel.e2e.common.container.TestContainer; import org.apache.seatunnel.e2e.common.junit.DisabledOnContainer; +import org.apache.commons.lang3.StringUtils; +import org.apache.paimon.CoreOptions; import org.apache.paimon.catalog.Catalog; import org.apache.paimon.catalog.CatalogContext; import org.apache.paimon.catalog.CatalogFactory; import org.apache.paimon.catalog.Identifier; import org.apache.paimon.data.InternalRow; +import org.apache.paimon.data.Timestamp; import org.apache.paimon.options.Options; import org.apache.paimon.reader.RecordReader; +import org.apache.paimon.table.FileStoreTable; import org.apache.paimon.table.Table; import org.apache.paimon.table.source.ReadBuilder; import org.apache.paimon.table.source.TableRead; import org.apache.paimon.table.source.TableScan; +import org.apache.paimon.types.DataField; +import org.apache.paimon.types.TimestampType; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Assertions; @@ -52,6 +58,7 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import static org.awaitility.Awaitility.given; @@ -67,7 +74,6 @@ public class PaimonSinkCDCIT extends TestSuiteBase implements TestResource { private static final String NAMESPACE_TAR = "paimon.tar.gz"; private static final String CATALOG_DIR = CATALOG_ROOT_DIR + NAMESPACE + "/"; private static final String TARGET_TABLE = "st_test"; - private static final String TARGET_DATABASE = "seatunnel_namespace"; private static final String FAKE_TABLE1 = "FakeTable1"; private static final String FAKE_DATABASE1 = "FakeDatabase1"; private static final String FAKE_TABLE2 = "FakeTable1"; @@ -95,7 +101,7 @@ public void testFakeCDCSinkPaimon(TestContainer container) throws Exception { // copy paimon to local container.executeExtraCommands(containerExtendedFactory); List paimonRecords = - loadPaimonData(TARGET_DATABASE, TARGET_TABLE); + loadPaimonData("seatunnel_namespace1", TARGET_TABLE); Assertions.assertEquals(2, paimonRecords.size()); paimonRecords.forEach( paimonRecord -> { @@ -107,8 +113,6 @@ public void testFakeCDCSinkPaimon(TestContainer container) throws Exception { } }); }); - - cleanPaimonTable(container); } @TestTemplate @@ -152,18 +156,221 @@ public void testFakeMultipleTableSinkPaimon(TestContainer container) throws Exce } }); }); + } + + @TestTemplate + public void testFakeCDCSinkPaimonWithMultipleBucket(TestContainer container) throws Exception { + Container.ExecResult execResult = container.executeJob("/fake_cdc_sink_paimon_case3.conf"); + Assertions.assertEquals(0, execResult.getExitCode()); + + given().ignoreExceptions() + .await() + .atLeast(100L, TimeUnit.MILLISECONDS) + .atMost(30L, TimeUnit.SECONDS) + .untilAsserted( + () -> { + // copy paimon to local + container.executeExtraCommands(containerExtendedFactory); + Table table = getTable("seatunnel_namespace3", TARGET_TABLE); + String bucket = table.options().get(CoreOptions.BUCKET.key()); + Assertions.assertTrue(StringUtils.isNoneBlank(bucket)); + Assertions.assertEquals(2, Integer.valueOf(bucket)); + List paimonRecords = + loadPaimonData("seatunnel_namespace3", TARGET_TABLE); + Assertions.assertEquals(2, paimonRecords.size()); + paimonRecords.forEach( + paimonRecord -> { + if (paimonRecord.getPkId() == 1) { + Assertions.assertEquals("A_1", paimonRecord.getName()); + } + if (paimonRecord.getPkId() == 3) { + Assertions.assertEquals("C", paimonRecord.getName()); + } + }); + }); + } + + @TestTemplate + public void testFakeCDCSinkPaimonWithPartition(TestContainer container) throws Exception { + Container.ExecResult execResult = container.executeJob("/fake_cdc_sink_paimon_case4.conf"); + Assertions.assertEquals(0, execResult.getExitCode()); + + given().ignoreExceptions() + .await() + .atLeast(100L, TimeUnit.MILLISECONDS) + .atMost(30L, TimeUnit.SECONDS) + .untilAsserted( + () -> { + // copy paimon to local + container.executeExtraCommands(containerExtendedFactory); + Table table = getTable("seatunnel_namespace4", TARGET_TABLE); + List partitionKeys = table.partitionKeys(); + List primaryKeys = table.primaryKeys(); + Assertions.assertTrue(partitionKeys.contains("dt")); + Assertions.assertEquals(2, primaryKeys.size()); + Assertions.assertTrue(primaryKeys.contains("pk_id")); + Assertions.assertTrue(primaryKeys.contains("dt")); + ReadBuilder readBuilder = table.newReadBuilder(); + TableScan.Plan plan = readBuilder.newScan().plan(); + TableRead tableRead = readBuilder.newRead(); + List result = new ArrayList<>(); + try (RecordReader reader = tableRead.createReader(plan)) { + reader.forEachRemaining( + row -> { + result.add( + new PaimonRecord( + row.getLong(0), + row.getString(1).toString(), + row.getString(2).toString())); + log.info( + "key_id:" + + row.getLong(0) + + ", name:" + + row.getString(1) + + ", dt:" + + row.getString(2)); + }); + } + Assertions.assertEquals(2, result.size()); + List filterRecords = + result.stream() + .filter(record -> record.pkId == 1) + .collect(Collectors.toList()); + Assertions.assertEquals(1, filterRecords.size()); + PaimonRecord paimonRecord = filterRecords.get(0); + Assertions.assertEquals("A_1", paimonRecord.getName()); + Assertions.assertEquals("2024-03-20", paimonRecord.getDt()); + }); + } + + @TestTemplate + public void testFakeCDCSinkPaimonWithParquet(TestContainer container) throws Exception { + Container.ExecResult execResult = container.executeJob("/fake_cdc_sink_paimon_case5.conf"); + Assertions.assertEquals(0, execResult.getExitCode()); + + given().ignoreExceptions() + .await() + .atLeast(100L, TimeUnit.MILLISECONDS) + .atMost(30L, TimeUnit.SECONDS) + .untilAsserted( + () -> { + // copy paimon to local + container.executeExtraCommands(containerExtendedFactory); + Table table = getTable("seatunnel_namespace5", TARGET_TABLE); + String fileFormat = table.options().get(CoreOptions.FILE_FORMAT.key()); + Assertions.assertTrue(StringUtils.isNoneBlank(fileFormat)); + Assertions.assertEquals("parquet", fileFormat); + List paimonRecords = + loadPaimonData("seatunnel_namespace5", TARGET_TABLE); + Assertions.assertEquals(2, paimonRecords.size()); + paimonRecords.forEach( + paimonRecord -> { + if (paimonRecord.getPkId() == 1) { + Assertions.assertEquals("A_1", paimonRecord.getName()); + } + if (paimonRecord.getPkId() == 3) { + Assertions.assertEquals("C", paimonRecord.getName()); + } + }); + }); + } - cleanPaimonTable(container); + @TestTemplate + public void testFakeCDCSinkPaimonWithAvro(TestContainer container) throws Exception { + Container.ExecResult execResult = container.executeJob("/fake_cdc_sink_paimon_case6.conf"); + Assertions.assertEquals(0, execResult.getExitCode()); + + given().ignoreExceptions() + .await() + .atLeast(100L, TimeUnit.MILLISECONDS) + .atMost(30L, TimeUnit.SECONDS) + .untilAsserted( + () -> { + // copy paimon to local + container.executeExtraCommands(containerExtendedFactory); + Table table = getTable("seatunnel_namespace6", TARGET_TABLE); + String fileFormat = table.options().get(CoreOptions.FILE_FORMAT.key()); + Assertions.assertTrue(StringUtils.isNoneBlank(fileFormat)); + Assertions.assertEquals("avro", fileFormat); + List paimonRecords = + loadPaimonData("seatunnel_namespace6", TARGET_TABLE); + Assertions.assertEquals(2, paimonRecords.size()); + paimonRecords.forEach( + paimonRecord -> { + if (paimonRecord.getPkId() == 1) { + Assertions.assertEquals("A_1", paimonRecord.getName()); + } + if (paimonRecord.getPkId() == 3) { + Assertions.assertEquals("C", paimonRecord.getName()); + } + }); + }); } - protected final ContainerExtendedFactory cleanContainerExtendedFactory = - genericContainer -> - genericContainer.execInContainer("sh", "-c", "rm -rf " + CATALOG_DIR + "**"); + @TestTemplate + public void testFakeCDCSinkPaimonWithTimestampN(TestContainer container) throws Exception { + Container.ExecResult execResult = container.executeJob("/fake_cdc_sink_paimon_case7.conf"); + Assertions.assertEquals(0, execResult.getExitCode()); - private void cleanPaimonTable(TestContainer container) - throws IOException, InterruptedException { - // clean table - container.executeExtraCommands(cleanContainerExtendedFactory); + given().ignoreExceptions() + .await() + .atLeast(100L, TimeUnit.MILLISECONDS) + .atMost(30L, TimeUnit.SECONDS) + .untilAsserted( + () -> { + // copy paimon to local + container.executeExtraCommands(containerExtendedFactory); + FileStoreTable table = + (FileStoreTable) getTable("seatunnel_namespace7", TARGET_TABLE); + List fields = table.schema().fields(); + for (DataField field : fields) { + if (field.name().equalsIgnoreCase("one_time")) { + Assertions.assertEquals( + 0, ((TimestampType) field.type()).getPrecision()); + } + if (field.name().equalsIgnoreCase("two_time")) { + Assertions.assertEquals( + 3, ((TimestampType) field.type()).getPrecision()); + } + if (field.name().equalsIgnoreCase("three_time")) { + Assertions.assertEquals( + 6, ((TimestampType) field.type()).getPrecision()); + } + if (field.name().equalsIgnoreCase("four_time")) { + Assertions.assertEquals( + 9, ((TimestampType) field.type()).getPrecision()); + } + } + ReadBuilder readBuilder = table.newReadBuilder(); + TableScan.Plan plan = readBuilder.newScan().plan(); + TableRead tableRead = readBuilder.newRead(); + List result = new ArrayList<>(); + try (RecordReader reader = tableRead.createReader(plan)) { + reader.forEachRemaining( + row -> + result.add( + new PaimonRecord( + row.getLong(0), + row.getString(1).toString(), + row.getTimestamp(2, 0), + row.getTimestamp(3, 3), + row.getTimestamp(4, 6), + row.getTimestamp(5, 9)))); + } + Assertions.assertEquals(2, result.size()); + for (PaimonRecord paimonRecord : result) { + Assertions.assertEquals( + paimonRecord.oneTime.toString(), "2024-03-10T10:00:12"); + Assertions.assertEquals( + paimonRecord.twoTime.toString(), "2024-03-10T10:00:00.123"); + Assertions.assertEquals( + paimonRecord.threeTime.toString(), + "2024-03-10T10:00:00.123456"); + Assertions.assertEquals( + paimonRecord.fourTime.toString(), + "2024-03-10T10:00:00.123456789"); + } + }); } protected final ContainerExtendedFactory containerExtendedFactory = @@ -256,5 +463,34 @@ private Catalog getCatalog() { public class PaimonRecord { private Long pkId; private String name; + private String dt; + private Timestamp oneTime; + private Timestamp twoTime; + private Timestamp threeTime; + private Timestamp fourTime; + + public PaimonRecord(Long pkId, String name) { + this.pkId = pkId; + this.name = name; + } + + public PaimonRecord(Long pkId, String name, String dt) { + this(pkId, name); + this.dt = dt; + } + + public PaimonRecord( + Long pkId, + String name, + Timestamp oneTime, + Timestamp twoTime, + Timestamp threeTime, + Timestamp fourTime) { + this(pkId, name); + this.oneTime = oneTime; + this.twoTime = twoTime; + this.threeTime = threeTime; + this.fourTime = fourTime; + } } } diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case1.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case1.conf index 59e3a0cf727..50ce13aa683 100644 --- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case1.conf +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case1.conf @@ -80,7 +80,7 @@ source { sink { Paimon { warehouse = "file:///tmp/paimon" - database = "seatunnel_namespace" + database = "seatunnel_namespace1" table = "st_test" } } diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case3.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case3.conf new file mode 100644 index 00000000000..f5db1c82538 --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case3.conf @@ -0,0 +1,93 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +###### +###### This config file is a demonstration of streaming processing in seatunnel config +###### + +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + FakeSource { + schema = { + fields { + pk_id = bigint + name = string + score = int + } + primaryKey { + name = "pk_id" + columnNames = [pk_id] + } + } + rows = [ + { + kind = INSERT + fields = [1, "A", 100] + }, + { + kind = INSERT + fields = [2, "B", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = UPDATE_BEFORE + fields = [1, "A", 100] + }, + { + kind = UPDATE_AFTER + fields = [1, "A_1", 19] + }, + { + kind = DELETE + fields = [2, "B", 100] + } + ] + } +} + +transform { + +} + +sink { + Paimon { + warehouse = "file:///tmp/paimon" + database = "seatunnel_namespace3" + table = "st_test" + paimon.table.write-props = { + bucket = 2 + } + } +} diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case4.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case4.conf new file mode 100644 index 00000000000..9a287a61b16 --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case4.conf @@ -0,0 +1,91 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +###### +###### This config file is a demonstration of streaming processing in seatunnel config +###### + +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + FakeSource { + schema = { + fields { + pk_id = bigint + name = string + dt = string + } + } + rows = [ + { + kind = INSERT + fields = [1, "A", "2024-03-19"] + }, + { + kind = INSERT + fields = [2, "B", "2024-03-19"] + }, + { + kind = INSERT + fields = [3, "C", "2024-03-19"] + }, + { + kind = INSERT + fields = [3, "C", "2024-03-19"] + }, + { + kind = INSERT + fields = [3, "C", "2024-03-19"] + }, + { + kind = INSERT + fields = [3, "C", "2024-03-19"] + }, + { + kind = UPDATE_BEFORE + fields = [1, "A", "2024-03-19"] + }, + { + kind = UPDATE_AFTER + fields = [1, "A_1", "2024-03-20"] + }, + { + kind = DELETE + fields = [2, "B", "2024-03-19"] + } + ] + } +} + +transform { + +} + +sink { + Paimon { + warehouse = "file:///tmp/paimon" + database = "seatunnel_namespace4" + table = "st_test" + paimon.table.write-props = { + bucket = 2 + } + paimon.table.partition-keys = "dt" + paimon.table.primary-keys = "pk_id,dt" + } +} diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case5.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case5.conf new file mode 100644 index 00000000000..65df2115f4f --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case5.conf @@ -0,0 +1,93 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +###### +###### This config file is a demonstration of streaming processing in seatunnel config +###### + +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + FakeSource { + schema = { + fields { + pk_id = bigint + name = string + score = int + } + primaryKey { + name = "pk_id" + columnNames = [pk_id] + } + } + rows = [ + { + kind = INSERT + fields = [1, "A", 100] + }, + { + kind = INSERT + fields = [2, "B", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = UPDATE_BEFORE + fields = [1, "A", 100] + }, + { + kind = UPDATE_AFTER + fields = [1, "A_1", 19] + }, + { + kind = DELETE + fields = [2, "B", 100] + } + ] + } +} + +transform { + +} + +sink { + Paimon { + warehouse = "file:///tmp/paimon" + database = "seatunnel_namespace5" + table = "st_test" + paimon.table.write-props = { + file.format = "parquet" + } + } +} diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case6.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case6.conf new file mode 100644 index 00000000000..102747ef0f7 --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case6.conf @@ -0,0 +1,93 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +###### +###### This config file is a demonstration of streaming processing in seatunnel config +###### + +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + FakeSource { + schema = { + fields { + pk_id = bigint + name = string + score = int + } + primaryKey { + name = "pk_id" + columnNames = [pk_id] + } + } + rows = [ + { + kind = INSERT + fields = [1, "A", 100] + }, + { + kind = INSERT + fields = [2, "B", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = INSERT + fields = [3, "C", 100] + }, + { + kind = UPDATE_BEFORE + fields = [1, "A", 100] + }, + { + kind = UPDATE_AFTER + fields = [1, "A_1", 19] + }, + { + kind = DELETE + fields = [2, "B", 100] + } + ] + } +} + +transform { + +} + +sink { + Paimon { + warehouse = "file:///tmp/paimon" + database = "seatunnel_namespace6" + table = "st_test" + paimon.table.write-props = { + file.format = "avro" + } + } +} diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case7.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case7.conf new file mode 100644 index 00000000000..6578c723588 --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-paimon-e2e/src/test/resources/fake_cdc_sink_paimon_case7.conf @@ -0,0 +1,127 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +###### +###### This config file is a demonstration of streaming processing in seatunnel config +###### + +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + FakeSource { + schema = { + columns = [ + { + name = pk_id + type = bigint + nullable = false + comment = "primary key id" + }, + { + name = name + type = "string" + nullable = true + comment = "name" + }, + { + name = one_time + type = timestamp + nullable = false + comment = "one time" + columnScale = 0 + }, + { + name = two_time + type = timestamp + nullable = false + comment = "two time" + columnScale = 3 + }, + { + name = three_time + type = timestamp + nullable = false + comment = "three time" + columnScale = 6 + }, + { + name = four_time + type = timestamp + nullable = false + comment = "four time" + columnScale = 9 + } + ] + primaryKey { + name = "pk_id" + columnNames = [pk_id] + } + } + rows = [ + { + kind = INSERT + fields = [1, "A", "2024-03-10T10:00:12", "2024-03-10T10:00:00.123", "2024-03-10T10:00:00.123456", "2024-03-10T10:00:00.123456789"] + }, + { + kind = INSERT + fields = [2, "B", "2024-03-10T10:00:12", "2024-03-10T10:00:00.123", "2024-03-10T10:00:00.123456", "2024-03-10T10:00:00.123456789"] + }, + { + kind = INSERT + fields = [3, "C", "2024-03-10T10:00:12", "2024-03-10T10:00:00.123", "2024-03-10T10:00:00.123456", "2024-03-10T10:00:00.123456789"] + }, + { + kind = INSERT + fields = [3, "C", "2024-03-10T10:00:12", "2024-03-10T10:00:00.123", "2024-03-10T10:00:00.123456", "2024-03-10T10:00:00.123456789"] + }, + { + kind = INSERT + fields = [3, "C", "2024-03-10T10:00:12", "2024-03-10T10:00:00.123", "2024-03-10T10:00:00.123456", "2024-03-10T10:00:00.123456789"] + }, + { + kind = INSERT + fields = [3, "C", "2024-03-10T10:00:12", "2024-03-10T10:00:00.123", "2024-03-10T10:00:00.123456", "2024-03-10T10:00:00.123456789"] + }, + { + kind = UPDATE_BEFORE + fields = [1, "A", "2024-03-10T10:00:12", "2024-03-10T10:00:00.123", "2024-03-10T10:00:00.123456", "2024-03-10T10:00:00.123456789"] + }, + { + kind = UPDATE_AFTER + fields = [1, "A_1", "2024-03-10T10:00:12", "2024-03-10T10:00:00.123", "2024-03-10T10:00:00.123456", "2024-03-10T10:00:00.123456789"] + }, + { + kind = DELETE + fields = [2, "B", "2024-03-10T10:00:12", "2024-03-10T10:00:00.123", "2024-03-10T10:00:00.123456", "2024-03-10T10:00:00.123456789"] + } + ] + } +} + +transform { + +} + +sink { + Paimon { + warehouse = "file:///tmp/paimon" + database = "seatunnel_namespace7" + table = "st_test" + } +} From 5fd98eab9ec5b11de06ee1381fc6b771929dfb13 Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Wed, 27 Mar 2024 10:29:27 +0800 Subject: [PATCH 31/59] [Fix][Doc] Fixed typography error in starrocks sink document (#6579) --- docs/en/connector-v2/sink/Doris.md | 22 +++++--- docs/en/connector-v2/sink/StarRocks.md | 76 +++++++++++++------------- 2 files changed, 52 insertions(+), 46 deletions(-) diff --git a/docs/en/connector-v2/sink/Doris.md b/docs/en/connector-v2/sink/Doris.md index 7e8d9c6341f..453a410bbe6 100644 --- a/docs/en/connector-v2/sink/Doris.md +++ b/docs/en/connector-v2/sink/Doris.md @@ -73,16 +73,20 @@ We use templates to automatically create Doris tables, which will create corresponding table creation statements based on the type of upstream data and schema type, and the default template can be modified according to the situation. +Default template: + ```sql -CREATE TABLE IF NOT EXISTS `${database}`.`${table_name}` -( - ${rowtype_fields} -) ENGINE = OLAP UNIQUE KEY (${rowtype_primary_key}) - DISTRIBUTED BY HASH (${rowtype_primary_key}) - PROPERTIES -( - "replication_num" = "1" -); +CREATE TABLE IF NOT EXISTS `${database}`.`${table_name}` ( +${rowtype_fields} +) ENGINE=OLAP + UNIQUE KEY (${rowtype_primary_key}) +DISTRIBUTED BY HASH (${rowtype_primary_key}) + PROPERTIES ( +"replication_allocation" = "tag.location.default: 1", +"in_memory" = "false", +"storage_format" = "V2", +"disable_auto_compaction" = "false" +) ``` If a custom field is filled in the template, such as adding an `id` field diff --git a/docs/en/connector-v2/sink/StarRocks.md b/docs/en/connector-v2/sink/StarRocks.md index 03afca211b2..b6dc18e8eab 100644 --- a/docs/en/connector-v2/sink/StarRocks.md +++ b/docs/en/connector-v2/sink/StarRocks.md @@ -48,6 +48,45 @@ We use templates to automatically create starrocks tables, which will create corresponding table creation statements based on the type of upstream data and schema type, and the default template can be modified according to the situation. Only work on multi-table mode at now. +Default template: + +```sql +CREATE TABLE IF NOT EXISTS `${database}`.`${table_name}` ( +${rowtype_primary_key}, +${rowtype_fields} +) ENGINE=OLAP +PRIMARY KEY (${rowtype_primary_key}) +DISTRIBUTED BY HASH (${rowtype_primary_key})PROPERTIES ( +"replication_num" = "1" +) +``` + +If a custom field is filled in the template, such as adding an `id` field + +```sql +CREATE TABLE IF NOT EXISTS `${database}`.`${table_name}` +( + id, + ${rowtype_fields} +) ENGINE = OLAP DISTRIBUTED BY HASH (${rowtype_primary_key}) + PROPERTIES +( + "replication_num" = "1" +); +``` + +The connector will automatically obtain the corresponding type from the upstream to complete the filling, +and remove the id field from `rowtype_fields`. This method can be used to customize the modification of field types and attributes. + +You can use the following placeholders + +- database: Used to get the database in the upstream schema +- table_name: Used to get the table name in the upstream schema +- rowtype_fields: Used to get all the fields in the upstream schema, we will automatically map to the field + description of StarRocks +- rowtype_primary_key: Used to get the primary key in the upstream schema (maybe a list) +- rowtype_unique_key: Used to get the unique key in the upstream schema (maybe a list) + ### table [string] Use `database` and this `table-name` auto-generate sql and receive upstream input datas write to database. @@ -82,43 +121,6 @@ Option introduction: When data_save_mode selects CUSTOM_PROCESSING, you should fill in the CUSTOM_SQL parameter. This parameter usually fills in a SQL that can be executed. SQL will be executed before synchronization tasks. -```sql -CREATE TABLE IF NOT EXISTS `${database}`.`${table_name}` -( - ${rowtype_fields} -) ENGINE = OLAP DISTRIBUTED BY HASH (${rowtype_primary_key}) - PROPERTIES -( - "replication_num" = "1" -); -``` - -If a custom field is filled in the template, such as adding an `id` field - -```sql -CREATE TABLE IF NOT EXISTS `${database}`.`${table_name}` -( - id, - ${rowtype_fields} -) ENGINE = OLAP DISTRIBUTED BY HASH (${rowtype_primary_key}) - PROPERTIES -( - "replication_num" = "1" -); -``` - -The connector will automatically obtain the corresponding type from the upstream to complete the filling, -and remove the id field from `rowtype_fields`. This method can be used to customize the modification of field types and attributes. - -You can use the following placeholders - -- database: Used to get the database in the upstream schema -- table_name: Used to get the table name in the upstream schema -- rowtype_fields: Used to get all the fields in the upstream schema, we will automatically map to the field - description of StarRocks -- rowtype_primary_key: Used to get the primary key in the upstream schema (maybe a list) -- rowtype_unique_key: Used to get the unique key in the upstream schema (maybe a list) - ## Data Type Mapping | StarRocks Data type | SeaTunnel Data type | From 341615f488eb4f82c383debec738debc59dfb89d Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Wed, 27 Mar 2024 19:14:11 +0800 Subject: [PATCH 32/59] [Fix][Connector-V2] Fix doris sink can not be closed when stream load not read any data (#6570) --- .../doris/sink/writer/DorisSinkWriter.java | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/writer/DorisSinkWriter.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/writer/DorisSinkWriter.java index 323bac17ef9..4443d7a0a46 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/writer/DorisSinkWriter.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/sink/writer/DorisSinkWriter.java @@ -65,9 +65,9 @@ public class DorisSinkWriter private final int intervalTime; private final DorisSerializer serializer; private final CatalogTable catalogTable; - private final transient ScheduledExecutorService scheduledExecutorService; - private transient Thread executorThread; - private transient volatile Exception loadException = null; + private final ScheduledExecutorService scheduledExecutorService; + private Thread executorThread; + private volatile Exception loadException = null; public DorisSinkWriter( SinkWriter.Context context, @@ -114,8 +114,6 @@ private void initializeLoad() { } catch (Exception e) { throw new DorisConnectorException(DorisConnectorErrorCode.STREAM_LOAD_FAILED, e); } - // get main work thread. - executorThread = Thread.currentThread(); startLoad(labelGenerator.generateLabel(lastCheckpointId + 1)); // when uploading data in streaming mode, we need to regularly detect whether there are // exceptions. @@ -125,7 +123,7 @@ private void initializeLoad() { @Override public void write(SeaTunnelRow element) throws IOException { - checkLoadException(); + checkLoadExceptionAndResetThread(); byte[] serialize = serializer.serialize( dorisConfig.isNeedsUnsupportedTypeCasting() @@ -222,9 +220,11 @@ private void checkDone() { } } - private void checkLoadException() { + private void checkLoadExceptionAndResetThread() { if (loadException != null) { throw new RuntimeException("error while loading data.", loadException); + } else { + executorThread = Thread.currentThread(); } } From 2086b0e8a6b4b6c04089cb0507a38d357c33bd20 Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Wed, 27 Mar 2024 19:23:19 +0800 Subject: [PATCH 33/59] [Improve] Improve MultiTableSinkWriter prepare commit performance (#6495) * [Improve] Improve MultiTableSinkWriter prepare commit performance * update * update --- .github/workflows/backend.yml | 14 ++++ .../multitablesink/MultiTableSinkWriter.java | 69 +++++++++++++++---- .../jdbc/internal/JdbcOutputFormat.java | 5 ++ 3 files changed, 75 insertions(+), 13 deletions(-) diff --git a/.github/workflows/backend.yml b/.github/workflows/backend.yml index 34a173cd984..5c8e75897d7 100644 --- a/.github/workflows/backend.yml +++ b/.github/workflows/backend.yml @@ -304,6 +304,8 @@ jobs: java-version: ${{ matrix.java }} distribution: 'temurin' cache: 'maven' + - name: free disk space + run: tools/github/free_disk_space.sh - name: run updated modules integration test (part-1) if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != '' run: | @@ -333,6 +335,8 @@ jobs: java-version: ${{ matrix.java }} distribution: 'temurin' cache: 'maven' + - name: free disk space + run: tools/github/free_disk_space.sh - name: run updated modules integration test (part-2) if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != '' run: | @@ -393,6 +397,8 @@ jobs: java-version: ${{ matrix.java }} distribution: 'temurin' cache: 'maven' + - name: free disk space + run: tools/github/free_disk_space.sh - name: run updated modules integration test (part-4) if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != '' run: | @@ -421,6 +427,8 @@ jobs: java-version: ${{ matrix.java }} distribution: 'temurin' cache: 'maven' + - name: free disk space + run: tools/github/free_disk_space.sh - name: run updated modules integration test (part-5) if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != '' run: | @@ -449,6 +457,8 @@ jobs: java-version: ${{ matrix.java }} distribution: 'temurin' cache: 'maven' + - name: free disk space + run: tools/github/free_disk_space.sh - name: run updated modules integration test (part-6) if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != '' run: | @@ -477,6 +487,8 @@ jobs: java-version: ${{ matrix.java }} distribution: 'temurin' cache: 'maven' + - name: free disk space + run: tools/github/free_disk_space.sh - name: run updated modules integration test (part-7) if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != '' run: | @@ -506,6 +518,8 @@ jobs: java-version: ${{ matrix.java }} distribution: 'temurin' cache: 'maven' + - name: free disk space + run: tools/github/free_disk_space.sh - name: run updated modules integration test (part-8) if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != '' run: | diff --git a/seatunnel-connectors-v2/connector-common/src/main/java/org/apache/seatunnel/connectors/seatunnel/common/multitablesink/MultiTableSinkWriter.java b/seatunnel-connectors-v2/connector-common/src/main/java/org/apache/seatunnel/connectors/seatunnel/common/multitablesink/MultiTableSinkWriter.java index c296d387f08..12163676d7d 100644 --- a/seatunnel-connectors-v2/connector-common/src/main/java/org/apache/seatunnel/connectors/seatunnel/common/multitablesink/MultiTableSinkWriter.java +++ b/seatunnel-connectors-v2/connector-common/src/main/java/org/apache/seatunnel/connectors/seatunnel/common/multitablesink/MultiTableSinkWriter.java @@ -20,6 +20,7 @@ import org.apache.seatunnel.api.sink.MultiTableResourceManager; import org.apache.seatunnel.api.sink.SinkWriter; import org.apache.seatunnel.api.sink.SupportMultiTableSinkWriter; +import org.apache.seatunnel.api.table.event.SchemaChangeEvent; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import lombok.extern.slf4j.Slf4j; @@ -34,6 +35,7 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -58,7 +60,9 @@ public MultiTableSinkWriter( AtomicInteger cnt = new AtomicInteger(0); executorService = Executors.newFixedThreadPool( - queueSize, + // we use it in `MultiTableWriterRunnable` and `prepare commit task`, so it + // should be double. + queueSize * 2, runnable -> { Thread thread = new Thread(runnable); thread.setDaemon(true); @@ -71,9 +75,9 @@ public MultiTableSinkWriter( BlockingQueue queue = new LinkedBlockingQueue<>(1024); Map> tableIdWriterMap = new HashMap<>(); Map> sinkIdentifierMap = new HashMap<>(); - int finalI = i; + int queueIndex = i; sinkWriters.entrySet().stream() - .filter(entry -> entry.getKey().getIndex() % queueSize == finalI) + .filter(entry -> entry.getKey().getIndex() % queueSize == queueIndex) .forEach( entry -> { tableIdWriterMap.put( @@ -119,6 +123,24 @@ private void subSinkErrorCheck() { } } + @Override + public void applySchemaChange(SchemaChangeEvent event) throws IOException { + subSinkErrorCheck(); + for (int i = 0; i < sinkWritersWithIndex.size(); i++) { + for (Map.Entry> sinkWriterEntry : + sinkWritersWithIndex.get(i).entrySet()) { + if (sinkWriterEntry + .getKey() + .getTableIdentifier() + .equals(event.tablePath().getFullName())) { + synchronized (runnable.get(i)) { + sinkWriterEntry.getValue().applySchemaChange(event); + } + } + } + } + } + @Override public void write(SeaTunnelRow element) throws IOException { if (!submitted) { @@ -178,17 +200,38 @@ public Optional prepareCommit() throws IOException { checkQueueRemain(); subSinkErrorCheck(); MultiTableCommitInfo multiTableCommitInfo = new MultiTableCommitInfo(new HashMap<>()); + List> futures = new ArrayList<>(); for (int i = 0; i < sinkWritersWithIndex.size(); i++) { - for (Map.Entry> sinkWriterEntry : - sinkWritersWithIndex.get(i).entrySet()) { - synchronized (runnable.get(i)) { - Optional commit = sinkWriterEntry.getValue().prepareCommit(); - commit.ifPresent( - o -> - multiTableCommitInfo - .getCommitInfo() - .put(sinkWriterEntry.getKey(), o)); - } + int subWriterIndex = i; + futures.add( + executorService.submit( + () -> { + synchronized (runnable.get(subWriterIndex)) { + for (Map.Entry> + sinkWriterEntry : + sinkWritersWithIndex + .get(subWriterIndex) + .entrySet()) { + Optional commit; + try { + commit = sinkWriterEntry.getValue().prepareCommit(); + } catch (IOException e) { + throw new RuntimeException(e); + } + commit.ifPresent( + o -> + multiTableCommitInfo + .getCommitInfo() + .put(sinkWriterEntry.getKey(), o)); + } + } + })); + } + for (Future future : futures) { + try { + future.get(); + } catch (Exception e) { + throw new RuntimeException(e); } } return Optional.of(multiTableCommitInfo); diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/JdbcOutputFormat.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/JdbcOutputFormat.java index dafe5f9caf5..32dee1786b2 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/JdbcOutputFormat.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/JdbcOutputFormat.java @@ -125,6 +125,11 @@ public synchronized void flush() throws IOException { ExceptionUtils.getMessage(flushException))); return; } + if (batchCount == 0) { + LOG.debug("No data to flush."); + return; + } + final int sleepMs = 1000; for (int i = 0; i <= jdbcConnectionConfig.getMaxRetries(); i++) { try { From f2ed1fbde093217dcc5c09a4031b60bf063291be Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Wed, 27 Mar 2024 19:43:17 +0800 Subject: [PATCH 34/59] [Fix][Connector-V2] Fixed doris/starrocks create table sql parse error (#6580) --- .../doris/util/CreateTableParser.java | 3 ++ .../doris/catalog/DorisCreateTableTest.java | 41 +++++++++++++++++++ .../starrocks/util/CreateTableParser.java | 3 ++ .../catalog/StarRocksCreateTableTest.java | 36 ++++++++++++++++ 4 files changed, 83 insertions(+) diff --git a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/util/CreateTableParser.java b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/util/CreateTableParser.java index f607cc9e45f..a911f1e1a2c 100644 --- a/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/util/CreateTableParser.java +++ b/seatunnel-connectors-v2/connector-doris/src/main/java/org/apache/seatunnel/connectors/doris/util/CreateTableParser.java @@ -43,6 +43,9 @@ public static Map getColumnList(String createTableSql) { } else if ((c == ',' || c == ')') && !insideParentheses) { parseColumn(columnBuilder.toString(), columns, startIndex + i + 1); columnBuilder.setLength(0); + if (c == ')') { + break; + } } else if (c == ')') { insideParentheses = false; columnBuilder.append(c); diff --git a/seatunnel-connectors-v2/connector-doris/src/test/java/org/apache/seatunnel/connectors/doris/catalog/DorisCreateTableTest.java b/seatunnel-connectors-v2/connector-doris/src/test/java/org/apache/seatunnel/connectors/doris/catalog/DorisCreateTableTest.java index ae8cd51309c..d33e1747a12 100644 --- a/seatunnel-connectors-v2/connector-doris/src/test/java/org/apache/seatunnel/connectors/doris/catalog/DorisCreateTableTest.java +++ b/seatunnel-connectors-v2/connector-doris/src/test/java/org/apache/seatunnel/connectors/doris/catalog/DorisCreateTableTest.java @@ -305,4 +305,45 @@ public void testWithVarchar() { + " \"dynamic_partition.prefix\" = \"p\" \n" + ");"); } + + @Test + public void testWithThreePrimaryKeys() { + List columns = new ArrayList<>(); + + columns.add(PhysicalColumn.of("id", BasicType.LONG_TYPE, (Long) null, true, null, "")); + columns.add(PhysicalColumn.of("name", BasicType.STRING_TYPE, (Long) null, true, null, "")); + columns.add(PhysicalColumn.of("age", BasicType.INT_TYPE, (Long) null, true, null, "")); + columns.add(PhysicalColumn.of("comment", BasicType.STRING_TYPE, 500, true, null, "")); + columns.add(PhysicalColumn.of("description", BasicType.STRING_TYPE, 70000, true, null, "")); + + String result = + DorisCatalogUtil.getCreateTableStatement( + "create table '${database}'.'${table_name}'(\n" + + " ${rowtype_fields}\n" + + " )\n" + + " partitioned by ${rowtype_primary_key};", + TablePath.of("test1", "test2"), + CatalogTable.of( + TableIdentifier.of("test", "test1", "test2"), + TableSchema.builder() + .primaryKey( + PrimaryKey.of( + "test", Arrays.asList("id", "age", "name"))) + .columns(columns) + .build(), + Collections.emptyMap(), + Collections.emptyList(), + "")); + + Assertions.assertEquals( + "create table 'test1'.'test2'(\n" + + " `id` BIGINT(1) NULL ,\n" + + "`name` STRING NULL ,\n" + + "`age` INT(1) NULL ,\n" + + "`comment` VARCHAR(500) NULL ,\n" + + "`description` STRING NULL \n" + + " )\n" + + " partitioned by `id`,`age`,`name`;", + result); + } } diff --git a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/util/CreateTableParser.java b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/util/CreateTableParser.java index a13a88bf610..6986967f7a4 100644 --- a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/util/CreateTableParser.java +++ b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/util/CreateTableParser.java @@ -43,6 +43,9 @@ public static Map getColumnList(String createTableSql) { } else if ((c == ',' || c == ')') && !insideParentheses) { parseColumn(columnBuilder.toString(), columns, startIndex + i + 1); columnBuilder.setLength(0); + if (c == ')') { + break; + } } else if (c == ')') { insideParentheses = false; columnBuilder.append(c); diff --git a/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/StarRocksCreateTableTest.java b/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/StarRocksCreateTableTest.java index 0a3f36196aa..2e15ea39113 100644 --- a/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/StarRocksCreateTableTest.java +++ b/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/catalog/StarRocksCreateTableTest.java @@ -289,4 +289,40 @@ public void testWithVarchar() { + ");", result); } + + @Test + public void testWithThreePrimaryKeys() { + List columns = new ArrayList<>(); + + columns.add(PhysicalColumn.of("id", BasicType.LONG_TYPE, (Long) null, true, null, "")); + columns.add(PhysicalColumn.of("name", BasicType.STRING_TYPE, (Long) null, true, null, "")); + columns.add(PhysicalColumn.of("age", BasicType.INT_TYPE, (Long) null, true, null, "")); + columns.add(PhysicalColumn.of("comment", BasicType.STRING_TYPE, 500, true, null, "")); + columns.add(PhysicalColumn.of("description", BasicType.STRING_TYPE, 70000, true, null, "")); + + String result = + StarRocksSaveModeUtil.getCreateTableSql( + "create table '${database}'.'${table_name}'(\n" + + " ${rowtype_fields}\n" + + " )\n" + + " partitioned by ${rowtype_primary_key};", + "test1", + "test2", + TableSchema.builder() + .primaryKey( + PrimaryKey.of("test", Arrays.asList("id", "age", "name"))) + .columns(columns) + .build()); + + Assertions.assertEquals( + "create table 'test1'.'test2'(\n" + + " `id` BIGINT NULL ,\n" + + "`name` STRING NULL ,\n" + + "`age` INT NULL ,\n" + + "`comment` VARCHAR(500) NULL ,\n" + + "`description` STRING NULL \n" + + " )\n" + + " partitioned by `id`,`age`,`name`;", + result); + } } From e1211c73b0bfdc9e92c43b84b59fa5bce3aab86d Mon Sep 17 00:00:00 2001 From: Jarvis Date: Wed, 27 Mar 2024 20:21:22 +0800 Subject: [PATCH 35/59] [Doc] update rest api document (#6586) --- docs/zh/seatunnel-engine/rest-api.md | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/docs/zh/seatunnel-engine/rest-api.md b/docs/zh/seatunnel-engine/rest-api.md index a3f8d10d190..28a81c548d2 100644 --- a/docs/zh/seatunnel-engine/rest-api.md +++ b/docs/zh/seatunnel-engine/rest-api.md @@ -91,8 +91,6 @@ network: "jobId": "", "jobName": "", "jobStatus": "", - "envOptions": { - }, "createTime": "", "jobDag": { "vertices": [ @@ -100,13 +98,29 @@ network: "edges": [ ] }, - "pluginJarsUrls": [ - ], - "isStartWithSavePoint": false, "metrics": { "sourceReceivedCount": "", "sinkWriteCount": "" - } + }, + "finishedTime": "", + "errorMsg": null, + "envOptions": { + }, + "pluginJarsUrls": [ + ], + "isStartWithSavePoint": false +} +``` + +`jobId`, `jobName`, `jobStatus`, `createTime`, `jobDag`, `metrics` 字段总会返回. +`envOptions`, `pluginJarsUrls`, `isStartWithSavePoint` 字段在Job在RUNNING状态时会返回 +`finishedTime`, `errorMsg` 字段在Job结束时会返回,结束状态为不为RUNNING,可能为FINISHED,可能为CANCEL + +当我们查询不到这个Job时,返回结果为: + +```json +{ + "jobId" : "" } ``` From cdfb5735acfbd281e01bf34393870fa573c5516c Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Wed, 27 Mar 2024 22:45:11 +0800 Subject: [PATCH 36/59] [Improve][Test] Make classloader cache testing more stable (#6597) --- .../engine/e2e/classloader/ClassLoaderITBase.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/classloader/ClassLoaderITBase.java b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/classloader/ClassLoaderITBase.java index bdc6163c3be..6355f6523c7 100644 --- a/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/classloader/ClassLoaderITBase.java +++ b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/classloader/ClassLoaderITBase.java @@ -75,9 +75,9 @@ public void testFakeSourceToInMemorySink() throws IOException, InterruptedExcept Assertions.assertEquals(0, execResult.getExitCode()); Assertions.assertTrue(containsDaemonThread()); if (cacheMode()) { - Assertions.assertEquals(3, getClassLoaderCount()); + Assertions.assertTrue(3 >= getClassLoaderCount()); } else { - Assertions.assertEquals(2 + i, getClassLoaderCount()); + Assertions.assertTrue(2 + i >= getClassLoaderCount()); } } } @@ -165,9 +165,9 @@ public void testFakeSourceToInMemorySinkForRestApi() throws IOException, Interru Thread.sleep(5000); Assertions.assertTrue(containsDaemonThread()); if (cacheMode()) { - Assertions.assertEquals(3, getClassLoaderCount()); + Assertions.assertTrue(3 >= getClassLoaderCount()); } else { - Assertions.assertEquals(2 + i, getClassLoaderCount()); + Assertions.assertTrue(2 + i >= getClassLoaderCount()); } } } From 831d0022eb00fee6822c1c0216a20c1744f26c12 Mon Sep 17 00:00:00 2001 From: hailin0 Date: Thu, 28 Mar 2024 21:37:13 +0800 Subject: [PATCH 37/59] [Feature][Core] Support event listener for job (#6419) --- docs/en/concept/event-listener.md | 116 ++++++++++++++ docs/sidebars.js | 3 +- .../api/event/DefaultEventProcessor.java | 61 +++++++ .../org/apache/seatunnel/api/event/Event.java | 31 ++++ .../seatunnel/api/event/EventHandler.java | 33 ++++ .../seatunnel/api/event/EventListener.java | 24 +++ .../seatunnel/api/event/EventProcessor.java | 47 ++++++ .../apache/seatunnel/api/event/EventType.java | 32 ++++ .../seatunnel/api/event/LifecycleEvent.java | 20 +++ .../api/event/LoggingEventHandler.java | 31 ++++ .../api/sink/DefaultSinkWriterContext.java | 17 ++ .../apache/seatunnel/api/sink/SinkWriter.java | 8 + .../api/sink/event/WriterCloseEvent.java | 40 +++++ .../seatunnel/api/source/SourceReader.java | 8 + .../api/source/SourceSplitEnumerator.java | 8 + .../source/event/EnumeratorCloseEvent.java | 40 +++++ .../api/source/event/EnumeratorOpenEvent.java | 40 +++++ .../api/source/event/ReaderCloseEvent.java | 40 +++++ .../api/source/event/ReaderOpenEvent.java | 40 +++++ .../table/event/AlterTableAddColumnEvent.java | 6 + .../event/AlterTableChangeColumnEvent.java | 6 + .../table/event/AlterTableColumnsEvent.java | 6 + .../event/AlterTableDropColumnEvent.java | 6 + .../event/AlterTableModifyColumnEvent.java | 6 + .../api/table/event/AlterTableNameEvent.java | 6 + .../api/table/event/SchemaChangeEvent.java | 5 +- .../seatunnel/api/table/event/TableEvent.java | 9 ++ .../cdc/base/source/IncrementalSource.java | 8 +- .../IncrementalSourceRecordEmitter.java | 14 +- .../multitablesink/SinkContextProxy.java | 6 + .../console/sink/ConsoleSinkWriter.java | 3 +- .../fake/source/FakeSourceReader.java | 6 +- .../source/FakeSourceSplitEnumerator.java | 6 +- .../spark/execution/SinkExecuteProcessor.java | 8 +- .../spark/execution/SinkExecuteProcessor.java | 8 +- .../seatunnel/SeaTunnelContainer.java | 3 +- .../connector-console-seatunnel-e2e/pom.xml | 7 + .../FakeSourceToConsoleWithEventReportIT.java | 124 +++++++++++++++ .../test/resources/fakesource_to_console.conf | 1 + .../seatunnel_config_with_event_report.yaml | 23 +++ .../src/test/resources/log4j2.properties | 3 + .../engine/common/config/EngineConfig.java | 16 ++ .../YamlSeaTunnelDomConfigProcessor.java | 20 +++ .../config/server/ServerConfigOptions.java | 4 + .../seatunnel-engine-server/pom.xml | 6 + .../engine/server/CoordinatorService.java | 56 +++++++ .../event/JobEventHttpReportHandler.java | 150 ++++++++++++++++++ .../engine/server/event/JobEventListener.java | 38 +++++ .../server/event/JobEventProcessor.java | 41 +++++ .../server/event/JobEventReportOperation.java | 78 +++++++++ .../serializable/TaskDataSerializerHook.java | 5 + .../task/SourceSplitEnumeratorTask.java | 6 +- .../SeaTunnelSplitEnumeratorContext.java | 11 +- .../task/context/SinkWriterContext.java | 11 +- .../task/context/SourceReaderContext.java | 11 +- .../server/task/flow/SinkFlowLifeCycle.java | 12 +- .../server/task/flow/SourceFlowLifeCycle.java | 8 +- .../event/JobEventHttpReportHandlerTest.java | 133 ++++++++++++++++ .../source/CoordinatedEnumeratorContext.java | 12 +- .../source/CoordinatedReaderContext.java | 10 ++ .../translation/source/CoordinatedSource.java | 9 +- .../source/ParallelEnumeratorContext.java | 14 +- .../source/ParallelReaderContext.java | 14 +- .../translation/source/ParallelSource.java | 8 +- .../flink/sink/FlinkSinkWriterContext.java | 37 ++++- .../flink/sink/FlinkSinkWriterContext.java | 33 +++- .../source/FlinkSourceReaderContext.java | 40 ++++- .../FlinkSourceSplitEnumeratorContext.java | 76 +++++++++ .../translation/spark/sink/SparkSink.java | 9 +- .../spark/sink/SparkSinkInjector.java | 13 +- .../sink/writer/SparkDataSourceWriter.java | 7 +- .../sink/writer/SparkDataWriterFactory.java | 8 +- .../spark/sink/writer/SparkStreamWriter.java | 5 +- .../spark/source/SeaTunnelSourceSupport.java | 5 +- .../partition/batch/BatchPartition.java | 9 +- .../partition/micro/MicroBatchPartition.java | 5 + .../reader/batch/BatchSourceReader.java | 7 +- .../CoordinatedBatchPartitionReader.java | 10 +- .../batch/ParallelBatchPartitionReader.java | 8 +- .../CoordinatedMicroBatchPartitionReader.java | 9 +- .../reader/micro/MicroBatchSourceReader.java | 5 + .../ParallelMicroBatchPartitionReader.java | 5 +- .../spark/sink/SeaTunnelBatchWrite.java | 8 +- .../spark/sink/SeaTunnelSinkTable.java | 4 +- .../spark/sink/SparkSinkInjector.java | 14 +- .../SeaTunnelSparkDataWriterFactory.java | 8 +- .../spark/sink/write/SeaTunnelWrite.java | 9 +- .../sink/write/SeaTunnelWriteBuilder.java | 7 +- .../spark/source/SeaTunnelSourceTable.java | 5 +- .../CoordinatedBatchPartitionReader.java | 10 +- .../batch/ParallelBatchPartitionReader.java | 8 +- .../partition/batch/SeaTunnelBatch.java | 5 +- .../SeaTunnelBatchPartitionReaderFactory.java | 8 +- .../CoordinatedMicroBatchPartitionReader.java | 9 +- .../ParallelMicroBatchPartitionReader.java | 5 +- .../partition/micro/SeaTunnelMicroBatch.java | 5 +- ...unnelMicroBatchPartitionReaderFactory.java | 5 + .../spark/source/scan/SeaTunnelScan.java | 7 +- .../source/scan/SeaTunnelScanBuilder.java | 5 +- .../translation/spark/sink/SparkSinkTest.java | 3 +- 100 files changed, 1883 insertions(+), 115 deletions(-) create mode 100644 docs/en/concept/event-listener.md create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/event/DefaultEventProcessor.java create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/event/Event.java create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventHandler.java create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventListener.java create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventProcessor.java create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventType.java create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/event/LifecycleEvent.java create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/event/LoggingEventHandler.java create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/event/WriterCloseEvent.java create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/EnumeratorCloseEvent.java create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/EnumeratorOpenEvent.java create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/ReaderCloseEvent.java create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/ReaderOpenEvent.java create mode 100644 seatunnel-e2e/seatunnel-engine-e2e/connector-console-seatunnel-e2e/src/test/java/org/apache/seatunnel/engine/e2e/console/FakeSourceToConsoleWithEventReportIT.java create mode 100644 seatunnel-e2e/seatunnel-engine-e2e/connector-console-seatunnel-e2e/src/test/resources/seatunnel_config_with_event_report.yaml create mode 100644 seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventHttpReportHandler.java create mode 100644 seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventListener.java create mode 100644 seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventProcessor.java create mode 100644 seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventReportOperation.java create mode 100644 seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/event/JobEventHttpReportHandlerTest.java diff --git a/docs/en/concept/event-listener.md b/docs/en/concept/event-listener.md new file mode 100644 index 00000000000..7ba4550205d --- /dev/null +++ b/docs/en/concept/event-listener.md @@ -0,0 +1,116 @@ +# Event Listener + +## Introduction + +The SeaTunnel provides a rich event listening feature that allows you to manage the status at which data is synchronized. +This functionality is crucial when you need to listen job running status(`org.apache.seatunnel.api.event`). +This document will guide you through the usage of these parameters and how to leverage them effectively. + +## Support Those Engines + +> SeaTunnel Zeta
+> Flink
+> Spark
+ +## API + +The event API is defined in the `org.apache.seatunnel.api.event` package. + +### Event Data API + +- `org.apache.seatunnel.api.event.Event` - The interface for event data. +- `org.apache.seatunnel.api.event.EventType` - The enum for event type. + +### Event Listener API + +You can customize event handler, such as sending events to external systems + +- `org.apache.seatunnel.api.event.EventHandler` - The interface for event handler, SPI will automatically load subclass from the classpath. + +### Event Collect API + +- `org.apache.seatunnel.api.source.SourceSplitEnumerator` - Attached event listener API to report events from `SourceSplitEnumerator`. + +```java +package org.apache.seatunnel.api.source; + +public interface SourceSplitEnumerator { + + interface Context { + + /** + * Get the {@link org.apache.seatunnel.api.event.EventListener} of this enumerator. + * + * @return + */ + EventListener getEventListener(); + } +} +``` + +- `org.apache.seatunnel.api.source.SourceReader` - Attached event listener API to report events from `SourceReader`. + +```java +package org.apache.seatunnel.api.source; + +public interface SourceReader { + + interface Context { + + /** + * Get the {@link org.apache.seatunnel.api.event.EventListener} of this reader. + * + * @return + */ + EventListener getEventListener(); + } +} +``` + +- `org.apache.seatunnel.api.sink.SinkWriter` - Attached event listener API to report events from `SinkWriter`. + +```java +package org.apache.seatunnel.api.sink; + +public interface SinkWriter { + + interface Context { + + /** + * Get the {@link org.apache.seatunnel.api.event.EventListener} of this writer. + * + * @return + */ + EventListener getEventListener(); + } +} +``` + +## Configuration Listener + +To use the event listening feature, you need to configure engine config. + +### Zeta Engine + +Example config in your config file(seatunnel.yaml): + +``` +seatunnel: + engine: + event-report-http: + url: "http://example.com:1024/event/report" + headers: + Content-Type: application/json +``` + +### Flink Engine + +You can define the implementation class of `org.apache.seatunnel.api.event.EventHandler` interface and add to the classpath to automatically load it through SPI. + +Support flink version: 1.14.0+ + +Example: `org.apache.seatunnel.api.event.LoggingEventHandler` + +### Spark Engine + +You can define the implementation class of `org.apache.seatunnel.api.event.EventHandler` interface and add to the classpath to automatically load it through SPI. diff --git a/docs/sidebars.js b/docs/sidebars.js index a6d3bafe10f..76be7c7761a 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -90,7 +90,8 @@ const sidebars = { "concept/connector-v2-features", 'concept/schema-feature', 'concept/JobEnvConfig', - 'concept/speed-limit' + 'concept/speed-limit', + 'concept/event-listener' ] }, "Connector-v2-release-state", diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/DefaultEventProcessor.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/DefaultEventProcessor.java new file mode 100644 index 00000000000..f2cc6bf6768 --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/DefaultEventProcessor.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.event; + +import lombok.AllArgsConstructor; +import lombok.extern.slf4j.Slf4j; + +import java.util.List; + +@Slf4j +@AllArgsConstructor +public class DefaultEventProcessor implements EventListener, EventProcessor { + private final String jobId; + private final List handlers; + + public DefaultEventProcessor() { + this(DefaultEventProcessor.class.getClassLoader()); + } + + public DefaultEventProcessor(String jobId) { + this(jobId, EventProcessor.loadEventHandlers(DefaultEventProcessor.class.getClassLoader())); + } + + public DefaultEventProcessor(ClassLoader classLoader) { + this(null, EventProcessor.loadEventHandlers(classLoader)); + } + + @Override + public void process(Event event) { + handlers.forEach(listener -> listener.handle(event)); + } + + @Override + public void onEvent(Event event) { + if (jobId != null) { + event.setJobId(jobId); + } + process(event); + } + + @Override + public void close() throws Exception { + log.info("Closing event handlers."); + EventProcessor.close(handlers); + } +} diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/Event.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/Event.java new file mode 100644 index 00000000000..46198e510f7 --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/Event.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.event; + +import java.io.Serializable; + +public interface Event extends Serializable { + + long getCreatedTime(); + + void setJobId(String jobId); + + String getJobId(); + + EventType getEventType(); +} diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventHandler.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventHandler.java new file mode 100644 index 00000000000..de671bd3527 --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventHandler.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.event; + +import java.io.Serializable; + +public interface EventHandler extends Serializable, AutoCloseable { + + /** + * Receive and handle the event data. + * + * @param event + */ + void handle(Event event); + + @Override + default void close() throws Exception {} +} diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventListener.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventListener.java new file mode 100644 index 00000000000..bb1c4e85979 --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventListener.java @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.event; + +import java.io.Serializable; + +public interface EventListener extends Serializable { + void onEvent(Event event); +} diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventProcessor.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventProcessor.java new file mode 100644 index 00000000000..18b9c390027 --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventProcessor.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.event; + +import java.util.LinkedList; +import java.util.List; +import java.util.ServiceConfigurationError; +import java.util.ServiceLoader; + +public interface EventProcessor extends AutoCloseable { + void process(Event event); + + static List loadEventHandlers(ClassLoader classLoader) { + try { + List result = new LinkedList<>(); + ServiceLoader.load(EventHandler.class, classLoader) + .iterator() + .forEachRemaining(result::add); + return result; + } catch (ServiceConfigurationError e) { + throw new RuntimeException("Could not load service provider for event handlers.", e); + } + } + + static void close(List handlers) throws Exception { + if (handlers != null) { + for (EventHandler handler : handlers) { + handler.close(); + } + } + } +} diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventType.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventType.java new file mode 100644 index 00000000000..46acd316b4d --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventType.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.event; + +public enum EventType { + SCHEMA_CHANGE_ADD_COLUMN, + SCHEMA_CHANGE_DROP_COLUMN, + SCHEMA_CHANGE_MODIFY_COLUMN, + SCHEMA_CHANGE_CHANGE_COLUMN, + SCHEMA_CHANGE_UPDATE_COLUMNS, + SCHEMA_CHANGE_RENAME_TABLE, + LIFECYCLE_ENUMERATOR_OPEN, + LIFECYCLE_ENUMERATOR_CLOSE, + LIFECYCLE_READER_OPEN, + LIFECYCLE_READER_CLOSE, + LIFECYCLE_WRITER_CLOSE, +} diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/LifecycleEvent.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/LifecycleEvent.java new file mode 100644 index 00000000000..c2b5c64f127 --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/LifecycleEvent.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.event; + +public interface LifecycleEvent extends Event {} diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/LoggingEventHandler.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/LoggingEventHandler.java new file mode 100644 index 00000000000..8c0b083e8bd --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/LoggingEventHandler.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.event; + +import com.google.auto.service.AutoService; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@AutoService(EventHandler.class) +public class LoggingEventHandler implements EventHandler { + + @Override + public void handle(Event event) { + log.info("log event: {}", event); + } +} diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/DefaultSinkWriterContext.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/DefaultSinkWriterContext.java index 2aaab48238a..73af75f22ca 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/DefaultSinkWriterContext.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/DefaultSinkWriterContext.java @@ -19,13 +19,25 @@ import org.apache.seatunnel.api.common.metrics.AbstractMetricsContext; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.DefaultEventProcessor; +import org.apache.seatunnel.api.event.EventListener; /** The default {@link SinkWriter.Context} implement class. */ public class DefaultSinkWriterContext implements SinkWriter.Context { private final int subtask; + private final EventListener eventListener; public DefaultSinkWriterContext(int subtask) { + this(subtask, new DefaultEventProcessor()); + } + + public DefaultSinkWriterContext(String jobId, int subtask) { + this(subtask, new DefaultEventProcessor(jobId)); + } + + public DefaultSinkWriterContext(int subtask, EventListener eventListener) { this.subtask = subtask; + this.eventListener = eventListener; } @Override @@ -39,4 +51,9 @@ public MetricsContext getMetricsContext() { // https://github.com/apache/seatunnel/issues/3431 return new AbstractMetricsContext() {}; } + + @Override + public EventListener getEventListener() { + return eventListener; + } } diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/SinkWriter.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/SinkWriter.java index 3b1e715ebee..785f1065dd4 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/SinkWriter.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/SinkWriter.java @@ -18,6 +18,7 @@ package org.apache.seatunnel.api.sink; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.table.event.SchemaChangeEvent; import java.io.IOException; @@ -93,5 +94,12 @@ interface Context extends Serializable { /** @return metricsContext of this reader. */ MetricsContext getMetricsContext(); + + /** + * Get the {@link EventListener} of this writer. + * + * @return + */ + EventListener getEventListener(); } } diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/event/WriterCloseEvent.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/event/WriterCloseEvent.java new file mode 100644 index 00000000000..2a11d45597f --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/sink/event/WriterCloseEvent.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.sink.event; + +import org.apache.seatunnel.api.event.EventType; +import org.apache.seatunnel.api.event.LifecycleEvent; + +import lombok.AllArgsConstructor; +import lombok.Getter; +import lombok.Setter; +import lombok.ToString; + +@Getter +@Setter +@ToString +@AllArgsConstructor +public class WriterCloseEvent implements LifecycleEvent { + private long createdTime; + private String jobId; + private EventType eventType = EventType.LIFECYCLE_WRITER_CLOSE; + + public WriterCloseEvent() { + this.createdTime = System.currentTimeMillis(); + } +} diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/SourceReader.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/SourceReader.java index 50ec0c137ec..d095e426efb 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/SourceReader.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/SourceReader.java @@ -18,6 +18,7 @@ package org.apache.seatunnel.api.source; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.state.CheckpointListener; import java.io.IOException; @@ -111,5 +112,12 @@ interface Context { /** @return metricsContext of this reader. */ MetricsContext getMetricsContext(); + + /** + * Get the {@link EventListener} of this reader. + * + * @return + */ + EventListener getEventListener(); } } diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/SourceSplitEnumerator.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/SourceSplitEnumerator.java index 49d3c7c1fea..897d18ac5f0 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/SourceSplitEnumerator.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/SourceSplitEnumerator.java @@ -18,6 +18,7 @@ package org.apache.seatunnel.api.source; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.state.CheckpointListener; import java.io.IOException; @@ -120,5 +121,12 @@ default void assignSplit(int subtaskId, SplitT split) { /** @return metricsContext of this reader. */ MetricsContext getMetricsContext(); + + /** + * Get the {@link EventListener} of this enumerator. + * + * @return + */ + EventListener getEventListener(); } } diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/EnumeratorCloseEvent.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/EnumeratorCloseEvent.java new file mode 100644 index 00000000000..1911b16b6ab --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/EnumeratorCloseEvent.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.source.event; + +import org.apache.seatunnel.api.event.EventType; +import org.apache.seatunnel.api.event.LifecycleEvent; + +import lombok.AllArgsConstructor; +import lombok.Getter; +import lombok.Setter; +import lombok.ToString; + +@Getter +@Setter +@ToString +@AllArgsConstructor +public class EnumeratorCloseEvent implements LifecycleEvent { + private long createdTime; + private String jobId; + private EventType eventType = EventType.LIFECYCLE_ENUMERATOR_CLOSE; + + public EnumeratorCloseEvent() { + this.createdTime = System.currentTimeMillis(); + } +} diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/EnumeratorOpenEvent.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/EnumeratorOpenEvent.java new file mode 100644 index 00000000000..56383a9f56d --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/EnumeratorOpenEvent.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.source.event; + +import org.apache.seatunnel.api.event.EventType; +import org.apache.seatunnel.api.event.LifecycleEvent; + +import lombok.AllArgsConstructor; +import lombok.Getter; +import lombok.Setter; +import lombok.ToString; + +@Getter +@Setter +@ToString +@AllArgsConstructor +public class EnumeratorOpenEvent implements LifecycleEvent { + private long createdTime; + private String jobId; + private EventType eventType = EventType.LIFECYCLE_ENUMERATOR_OPEN; + + public EnumeratorOpenEvent() { + this.createdTime = System.currentTimeMillis(); + } +} diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/ReaderCloseEvent.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/ReaderCloseEvent.java new file mode 100644 index 00000000000..fd584575110 --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/ReaderCloseEvent.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.source.event; + +import org.apache.seatunnel.api.event.EventType; +import org.apache.seatunnel.api.event.LifecycleEvent; + +import lombok.AllArgsConstructor; +import lombok.Getter; +import lombok.Setter; +import lombok.ToString; + +@Getter +@Setter +@ToString +@AllArgsConstructor +public class ReaderCloseEvent implements LifecycleEvent { + private long createdTime; + private String jobId; + private EventType eventType = EventType.LIFECYCLE_READER_CLOSE; + + public ReaderCloseEvent() { + this.createdTime = System.currentTimeMillis(); + } +} diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/ReaderOpenEvent.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/ReaderOpenEvent.java new file mode 100644 index 00000000000..5f3454483fc --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/ReaderOpenEvent.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.source.event; + +import org.apache.seatunnel.api.event.EventType; +import org.apache.seatunnel.api.event.LifecycleEvent; + +import lombok.AllArgsConstructor; +import lombok.Getter; +import lombok.Setter; +import lombok.ToString; + +@Getter +@Setter +@ToString +@AllArgsConstructor +public class ReaderOpenEvent implements LifecycleEvent { + private long createdTime; + private String jobId; + private EventType eventType = EventType.LIFECYCLE_READER_OPEN; + + public ReaderOpenEvent() { + this.createdTime = System.currentTimeMillis(); + } +} diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableAddColumnEvent.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableAddColumnEvent.java index 4c874ad2a31..7bb2218d885 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableAddColumnEvent.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableAddColumnEvent.java @@ -17,6 +17,7 @@ package org.apache.seatunnel.api.table.event; +import org.apache.seatunnel.api.event.EventType; import org.apache.seatunnel.api.table.catalog.Column; import org.apache.seatunnel.api.table.catalog.TableIdentifier; @@ -51,4 +52,9 @@ public static AlterTableAddColumnEvent addAfter( TableIdentifier tableIdentifier, Column column, String afterColumn) { return new AlterTableAddColumnEvent(tableIdentifier, column, false, afterColumn); } + + @Override + public EventType getEventType() { + return EventType.SCHEMA_CHANGE_ADD_COLUMN; + } } diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableChangeColumnEvent.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableChangeColumnEvent.java index d53f5886ea0..672f0998667 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableChangeColumnEvent.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableChangeColumnEvent.java @@ -17,6 +17,7 @@ package org.apache.seatunnel.api.table.event; +import org.apache.seatunnel.api.event.EventType; import org.apache.seatunnel.api.table.catalog.Column; import org.apache.seatunnel.api.table.catalog.TableIdentifier; @@ -53,4 +54,9 @@ public static AlterTableChangeColumnEvent changeAfter( return new AlterTableChangeColumnEvent( tableIdentifier, oldColumn, column, false, afterColumn); } + + @Override + public EventType getEventType() { + return EventType.SCHEMA_CHANGE_CHANGE_COLUMN; + } } diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableColumnsEvent.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableColumnsEvent.java index 3d47592ffaf..ce487681767 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableColumnsEvent.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableColumnsEvent.java @@ -17,6 +17,7 @@ package org.apache.seatunnel.api.table.event; +import org.apache.seatunnel.api.event.EventType; import org.apache.seatunnel.api.table.catalog.TableIdentifier; import lombok.Getter; @@ -44,4 +45,9 @@ public AlterTableColumnsEvent addEvent(AlterTableColumnEvent event) { events.add(event); return this; } + + @Override + public EventType getEventType() { + return EventType.SCHEMA_CHANGE_UPDATE_COLUMNS; + } } diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableDropColumnEvent.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableDropColumnEvent.java index 3e1ddaf9578..ea4b204142a 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableDropColumnEvent.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableDropColumnEvent.java @@ -17,6 +17,7 @@ package org.apache.seatunnel.api.table.event; +import org.apache.seatunnel.api.event.EventType; import org.apache.seatunnel.api.table.catalog.TableIdentifier; import lombok.Getter; @@ -31,4 +32,9 @@ public AlterTableDropColumnEvent(TableIdentifier tableIdentifier, String column) super(tableIdentifier); this.column = column; } + + @Override + public EventType getEventType() { + return EventType.SCHEMA_CHANGE_DROP_COLUMN; + } } diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableModifyColumnEvent.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableModifyColumnEvent.java index 2a0208244da..342d24ce73f 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableModifyColumnEvent.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableModifyColumnEvent.java @@ -17,6 +17,7 @@ package org.apache.seatunnel.api.table.event; +import org.apache.seatunnel.api.event.EventType; import org.apache.seatunnel.api.table.catalog.Column; import org.apache.seatunnel.api.table.catalog.TableIdentifier; @@ -45,4 +46,9 @@ public static AlterTableModifyColumnEvent modifyAfter( TableIdentifier tableIdentifier, Column column, String afterColumn) { return new AlterTableModifyColumnEvent(tableIdentifier, column, false, afterColumn); } + + @Override + public EventType getEventType() { + return EventType.SCHEMA_CHANGE_MODIFY_COLUMN; + } } diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableNameEvent.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableNameEvent.java index 4ed02509aa6..9454f6a5469 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableNameEvent.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/AlterTableNameEvent.java @@ -17,6 +17,7 @@ package org.apache.seatunnel.api.table.event; +import org.apache.seatunnel.api.event.EventType; import org.apache.seatunnel.api.table.catalog.TableIdentifier; import org.apache.seatunnel.api.table.catalog.TablePath; @@ -37,4 +38,9 @@ public AlterTableNameEvent( public TablePath getNewTablePath() { return newTableIdentifier.toTablePath(); } + + @Override + public EventType getEventType() { + return EventType.SCHEMA_CHANGE_RENAME_TABLE; + } } diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/SchemaChangeEvent.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/SchemaChangeEvent.java index b696a33b9da..b3d73db9f15 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/SchemaChangeEvent.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/SchemaChangeEvent.java @@ -17,13 +17,12 @@ package org.apache.seatunnel.api.table.event; +import org.apache.seatunnel.api.event.Event; import org.apache.seatunnel.api.table.catalog.TableIdentifier; import org.apache.seatunnel.api.table.catalog.TablePath; -import java.io.Serializable; - /** Represents a structural change to a table schema. */ -public interface SchemaChangeEvent extends Serializable { +public interface SchemaChangeEvent extends Event { /** * Path of the change table object diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/TableEvent.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/TableEvent.java index 5ba8127e306..4a1235bb671 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/TableEvent.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/event/TableEvent.java @@ -22,13 +22,17 @@ import lombok.Getter; import lombok.RequiredArgsConstructor; +import lombok.Setter; import lombok.ToString; @Getter @ToString @RequiredArgsConstructor public abstract class TableEvent implements SchemaChangeEvent { + private long createdTime = System.currentTimeMillis(); protected final TableIdentifier tableIdentifier; + @Getter @Setter private String jobId; + @Getter @Setter private String statement; @Override public TableIdentifier tableIdentifier() { @@ -38,4 +42,9 @@ public TableIdentifier tableIdentifier() { public TablePath getTablePath() { return tablePath(); } + + @Override + public long getCreatedTime() { + return createdTime; + } } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/IncrementalSource.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/IncrementalSource.java index d49aff05922..087dc177761 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/IncrementalSource.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/IncrementalSource.java @@ -17,7 +17,6 @@ package org.apache.seatunnel.connectors.cdc.base.source; -import org.apache.seatunnel.api.common.metrics.MetricsContext; import org.apache.seatunnel.api.configuration.Option; import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.source.Boundedness; @@ -182,7 +181,7 @@ public SourceReader createReader(SourceReader.Context reader dataSourceDialect, elementsQueue, splitReaderSupplier, - createRecordEmitter(sourceConfig, readerContext.getMetricsContext()), + createRecordEmitter(sourceConfig, readerContext), new SourceReaderOptions(readonlyConfig), readerContext, sourceConfig, @@ -190,9 +189,8 @@ public SourceReader createReader(SourceReader.Context reader } protected RecordEmitter createRecordEmitter( - SourceConfig sourceConfig, MetricsContext metricsContext) { - return new IncrementalSourceRecordEmitter<>( - deserializationSchema, offsetFactory, metricsContext); + SourceConfig sourceConfig, SourceReader.Context context) { + return new IncrementalSourceRecordEmitter<>(deserializationSchema, offsetFactory, context); } @Override diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/IncrementalSourceRecordEmitter.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/IncrementalSourceRecordEmitter.java index 3fbbd744b9c..65474a0d9fd 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/IncrementalSourceRecordEmitter.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/IncrementalSourceRecordEmitter.java @@ -18,8 +18,9 @@ package org.apache.seatunnel.connectors.cdc.base.source.reader; import org.apache.seatunnel.api.common.metrics.Counter; -import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.source.Collector; +import org.apache.seatunnel.api.source.SourceReader; import org.apache.seatunnel.api.table.event.SchemaChangeEvent; import org.apache.seatunnel.connectors.cdc.base.source.offset.Offset; import org.apache.seatunnel.connectors.cdc.base.source.offset.OffsetFactory; @@ -66,16 +67,18 @@ public class IncrementalSourceRecordEmitter protected final Counter recordFetchDelay; protected final Counter recordEmitDelay; + protected final EventListener eventListener; public IncrementalSourceRecordEmitter( DebeziumDeserializationSchema debeziumDeserializationSchema, OffsetFactory offsetFactory, - MetricsContext metricsContext) { + SourceReader.Context context) { this.debeziumDeserializationSchema = debeziumDeserializationSchema; this.outputCollector = new OutputCollector<>(); this.offsetFactory = offsetFactory; - this.recordFetchDelay = metricsContext.counter(CDC_RECORD_FETCH_DELAY); - this.recordEmitDelay = metricsContext.counter(CDC_RECORD_EMIT_DELAY); + this.recordFetchDelay = context.getMetricsContext().counter(CDC_RECORD_FETCH_DELAY); + this.recordEmitDelay = context.getMetricsContext().counter(CDC_RECORD_EMIT_DELAY); + this.eventListener = context.getEventListener(); } @Override @@ -157,7 +160,7 @@ protected void emitElement(SourceRecord element, Collector output) throws Exc debeziumDeserializationSchema.deserialize(element, outputCollector); } - private static class OutputCollector implements Collector { + private class OutputCollector implements Collector { private Collector output; @Override @@ -167,6 +170,7 @@ public void collect(T record) { @Override public void collect(SchemaChangeEvent event) { + eventListener.onEvent(event); output.collect(event); } diff --git a/seatunnel-connectors-v2/connector-common/src/main/java/org/apache/seatunnel/connectors/seatunnel/common/multitablesink/SinkContextProxy.java b/seatunnel-connectors-v2/connector-common/src/main/java/org/apache/seatunnel/connectors/seatunnel/common/multitablesink/SinkContextProxy.java index 3f2ad31c81c..f7691ddedff 100644 --- a/seatunnel-connectors-v2/connector-common/src/main/java/org/apache/seatunnel/connectors/seatunnel/common/multitablesink/SinkContextProxy.java +++ b/seatunnel-connectors-v2/connector-common/src/main/java/org/apache/seatunnel/connectors/seatunnel/common/multitablesink/SinkContextProxy.java @@ -18,6 +18,7 @@ package org.apache.seatunnel.connectors.seatunnel.common.multitablesink; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.sink.SinkWriter; public class SinkContextProxy implements SinkWriter.Context { @@ -40,4 +41,9 @@ public int getIndexOfSubtask() { public MetricsContext getMetricsContext() { return context.getMetricsContext(); } + + @Override + public EventListener getEventListener() { + return context.getEventListener(); + } } diff --git a/seatunnel-connectors-v2/connector-console/src/main/java/org/apache/seatunnel/connectors/seatunnel/console/sink/ConsoleSinkWriter.java b/seatunnel-connectors-v2/connector-console/src/main/java/org/apache/seatunnel/connectors/seatunnel/console/sink/ConsoleSinkWriter.java index 07e7afe1b8a..4c9e6f47605 100644 --- a/seatunnel-connectors-v2/connector-console/src/main/java/org/apache/seatunnel/connectors/seatunnel/console/sink/ConsoleSinkWriter.java +++ b/seatunnel-connectors-v2/connector-console/src/main/java/org/apache/seatunnel/connectors/seatunnel/console/sink/ConsoleSinkWriter.java @@ -19,6 +19,7 @@ import org.apache.seatunnel.api.sink.SinkWriter; import org.apache.seatunnel.api.sink.SupportMultiTableSinkWriter; +import org.apache.seatunnel.api.sink.event.WriterCloseEvent; import org.apache.seatunnel.api.table.event.SchemaChangeEvent; import org.apache.seatunnel.api.table.event.handler.DataTypeChangeEventDispatcher; import org.apache.seatunnel.api.table.event.handler.DataTypeChangeEventHandler; @@ -99,7 +100,7 @@ public void write(SeaTunnelRow element) { @Override public void close() { - // nothing + context.getEventListener().onEvent(new WriterCloseEvent()); } private String fieldsInfo(SeaTunnelRowType seaTunnelRowType) { diff --git a/seatunnel-connectors-v2/connector-fake/src/main/java/org/apache/seatunnel/connectors/seatunnel/fake/source/FakeSourceReader.java b/seatunnel-connectors-v2/connector-fake/src/main/java/org/apache/seatunnel/connectors/seatunnel/fake/source/FakeSourceReader.java index 016f336d37e..95758cb971e 100644 --- a/seatunnel-connectors-v2/connector-fake/src/main/java/org/apache/seatunnel/connectors/seatunnel/fake/source/FakeSourceReader.java +++ b/seatunnel-connectors-v2/connector-fake/src/main/java/org/apache/seatunnel/connectors/seatunnel/fake/source/FakeSourceReader.java @@ -20,6 +20,8 @@ import org.apache.seatunnel.api.source.Boundedness; import org.apache.seatunnel.api.source.Collector; import org.apache.seatunnel.api.source.SourceReader; +import org.apache.seatunnel.api.source.event.ReaderCloseEvent; +import org.apache.seatunnel.api.source.event.ReaderOpenEvent; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.connectors.seatunnel.fake.config.FakeConfig; import org.apache.seatunnel.connectors.seatunnel.fake.config.MultipleTableFakeSourceConfig; @@ -72,12 +74,12 @@ public FakeSourceReader( @Override public void open() { - // nothing + context.getEventListener().onEvent(new ReaderOpenEvent()); } @Override public void close() { - // nothing + context.getEventListener().onEvent(new ReaderCloseEvent()); } @Override diff --git a/seatunnel-connectors-v2/connector-fake/src/main/java/org/apache/seatunnel/connectors/seatunnel/fake/source/FakeSourceSplitEnumerator.java b/seatunnel-connectors-v2/connector-fake/src/main/java/org/apache/seatunnel/connectors/seatunnel/fake/source/FakeSourceSplitEnumerator.java index 102957d26ea..ecd6d509149 100644 --- a/seatunnel-connectors-v2/connector-fake/src/main/java/org/apache/seatunnel/connectors/seatunnel/fake/source/FakeSourceSplitEnumerator.java +++ b/seatunnel-connectors-v2/connector-fake/src/main/java/org/apache/seatunnel/connectors/seatunnel/fake/source/FakeSourceSplitEnumerator.java @@ -18,6 +18,8 @@ package org.apache.seatunnel.connectors.seatunnel.fake.source; import org.apache.seatunnel.api.source.SourceSplitEnumerator; +import org.apache.seatunnel.api.source.event.EnumeratorCloseEvent; +import org.apache.seatunnel.api.source.event.EnumeratorOpenEvent; import org.apache.seatunnel.connectors.seatunnel.fake.config.FakeConfig; import org.apache.seatunnel.connectors.seatunnel.fake.config.MultipleTableFakeSourceConfig; import org.apache.seatunnel.connectors.seatunnel.fake.state.FakeSourceState; @@ -57,7 +59,7 @@ public FakeSourceSplitEnumerator( @Override public void open() { - // No connection needs to be opened + enumeratorContext.getEventListener().onEvent(new EnumeratorOpenEvent()); } @Override @@ -68,7 +70,7 @@ public void run() throws Exception { @Override public void close() throws IOException { - // nothing + enumeratorContext.getEventListener().onEvent(new EnumeratorCloseEvent()); } @Override diff --git a/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-2-starter/src/main/java/org/apache/seatunnel/core/starter/spark/execution/SinkExecuteProcessor.java b/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-2-starter/src/main/java/org/apache/seatunnel/core/starter/spark/execution/SinkExecuteProcessor.java index 886f6d6a158..d080c21fa79 100644 --- a/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-2-starter/src/main/java/org/apache/seatunnel/core/starter/spark/execution/SinkExecuteProcessor.java +++ b/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-2-starter/src/main/java/org/apache/seatunnel/core/starter/spark/execution/SinkExecuteProcessor.java @@ -149,7 +149,13 @@ public List execute(List upstreamDataStreams } } } - SparkSinkInjector.inject(dataset.write(), sink, datasetTableInfo.getCatalogTable()) + String applicationId = + sparkRuntimeEnvironment.getSparkSession().sparkContext().applicationId(); + SparkSinkInjector.inject( + dataset.write(), + sink, + datasetTableInfo.getCatalogTable(), + applicationId) .option("checkpointLocation", "/tmp") .save(); } diff --git a/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-starter-common/src/main/java/org/apache/seatunnel/core/starter/spark/execution/SinkExecuteProcessor.java b/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-starter-common/src/main/java/org/apache/seatunnel/core/starter/spark/execution/SinkExecuteProcessor.java index 654cfaa1815..08fe4162bcb 100644 --- a/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-starter-common/src/main/java/org/apache/seatunnel/core/starter/spark/execution/SinkExecuteProcessor.java +++ b/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-starter-common/src/main/java/org/apache/seatunnel/core/starter/spark/execution/SinkExecuteProcessor.java @@ -150,7 +150,13 @@ public List execute(List upstreamDataStreams } } } - SparkSinkInjector.inject(dataset.write(), sink, datasetTableInfo.getCatalogTable()) + String applicationId = + sparkRuntimeEnvironment.getStreamingContext().sparkContext().applicationId(); + SparkSinkInjector.inject( + dataset.write(), + sink, + datasetTableInfo.getCatalogTable(), + applicationId) .option("checkpointLocation", "/tmp") .mode(SaveMode.Append) .save(); diff --git a/seatunnel-e2e/seatunnel-e2e-common/src/test/java/org/apache/seatunnel/e2e/common/container/seatunnel/SeaTunnelContainer.java b/seatunnel-e2e/seatunnel-e2e-common/src/test/java/org/apache/seatunnel/e2e/common/container/seatunnel/SeaTunnelContainer.java index b4dbf28da3d..aa4d62024f5 100644 --- a/seatunnel-e2e/seatunnel-e2e-common/src/test/java/org/apache/seatunnel/e2e/common/container/seatunnel/SeaTunnelContainer.java +++ b/seatunnel-e2e/seatunnel-e2e-common/src/test/java/org/apache/seatunnel/e2e/common/container/seatunnel/SeaTunnelContainer.java @@ -97,9 +97,10 @@ public void startUp() throws Exception { PROJECT_ROOT_PATH + "/seatunnel-shade/seatunnel-hadoop3-3.1.4-uber/target/seatunnel-hadoop3-3.1.4-uber.jar"), Paths.get(SEATUNNEL_HOME, "lib/seatunnel-hadoop3-3.1.4-uber.jar").toString()); - server.start(); // execute extra commands executeExtraCommands(server); + + server.start(); } @Override diff --git a/seatunnel-e2e/seatunnel-engine-e2e/connector-console-seatunnel-e2e/pom.xml b/seatunnel-e2e/seatunnel-engine-e2e/connector-console-seatunnel-e2e/pom.xml index fa8080de1e6..d9a77c025a3 100644 --- a/seatunnel-e2e/seatunnel-engine-e2e/connector-console-seatunnel-e2e/pom.xml +++ b/seatunnel-e2e/seatunnel-engine-e2e/connector-console-seatunnel-e2e/pom.xml @@ -36,6 +36,13 @@ test-jar test + + com.squareup.okhttp + mockwebserver + 2.7.5 + test + + org.apache.seatunnel diff --git a/seatunnel-e2e/seatunnel-engine-e2e/connector-console-seatunnel-e2e/src/test/java/org/apache/seatunnel/engine/e2e/console/FakeSourceToConsoleWithEventReportIT.java b/seatunnel-e2e/seatunnel-engine-e2e/connector-console-seatunnel-e2e/src/test/java/org/apache/seatunnel/engine/e2e/console/FakeSourceToConsoleWithEventReportIT.java new file mode 100644 index 00000000000..8389cb3c058 --- /dev/null +++ b/seatunnel-e2e/seatunnel-engine-e2e/connector-console-seatunnel-e2e/src/test/java/org/apache/seatunnel/engine/e2e/console/FakeSourceToConsoleWithEventReportIT.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.engine.e2e.console; + +import org.apache.seatunnel.shade.com.fasterxml.jackson.databind.JsonNode; +import org.apache.seatunnel.shade.com.fasterxml.jackson.databind.node.ArrayNode; + +import org.apache.seatunnel.api.event.EventType; +import org.apache.seatunnel.engine.e2e.SeaTunnelContainer; +import org.apache.seatunnel.engine.server.event.JobEventHttpReportHandler; + +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.testcontainers.Testcontainers; +import org.testcontainers.containers.Container; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.utility.MountableFile; + +import com.squareup.okhttp.mockwebserver.MockResponse; +import com.squareup.okhttp.mockwebserver.MockWebServer; +import com.squareup.okhttp.mockwebserver.RecordedRequest; +import lombok.extern.slf4j.Slf4j; +import okio.Buffer; + +import java.io.IOException; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.apache.seatunnel.e2e.common.util.ContainerUtil.PROJECT_ROOT_PATH; +import static org.awaitility.Awaitility.given; + +@Slf4j +public class FakeSourceToConsoleWithEventReportIT extends SeaTunnelContainer { + private static final int MOCK_SERVER_PORT = 1024; + + private MockWebServer mockWebServer; + + @Override + @BeforeAll + public void startUp() throws Exception { + mockWebServer = new MockWebServer(); + mockWebServer.start(MOCK_SERVER_PORT); + mockWebServer.enqueue(new MockResponse().setResponseCode(200)); + Testcontainers.exposeHostPorts(MOCK_SERVER_PORT); + + super.startUp(); + log.info("The TestContainer[{}] is running.", identifier()); + } + + @Override + @AfterAll + public void tearDown() throws Exception { + super.tearDown(); + + mockWebServer.shutdown(); + log.info("The TestContainer[{}] is closed.", identifier()); + } + + @Override + protected void executeExtraCommands(GenericContainer container) + throws IOException, InterruptedException { + container.withCopyFileToContainer( + MountableFile.forHostPath( + PROJECT_ROOT_PATH + + "/seatunnel-e2e/seatunnel-engine-e2e/connector-console-seatunnel-e2e/src/test/resources/seatunnel_config_with_event_report.yaml"), + Paths.get(SEATUNNEL_HOME, "config", "seatunnel.yaml").toString()); + } + + @Test + public void testEventReport() throws IOException, InterruptedException { + Container.ExecResult execResult = executeSeaTunnelJob("/fakesource_to_console.conf"); + Assertions.assertEquals(0, execResult.getExitCode()); + + Thread.sleep(JobEventHttpReportHandler.REPORT_INTERVAL.toMillis()); + given().ignoreExceptions() + .await() + .atMost(60, TimeUnit.SECONDS) + .until(() -> mockWebServer.getRequestCount(), count -> count > 0); + + List events = new ArrayList<>(); + for (int i = 0; i < mockWebServer.getRequestCount(); i++) { + RecordedRequest request = mockWebServer.takeRequest(); + try (Buffer buffer = request.getBody()) { + String body = buffer.readUtf8(); + ArrayNode arrayNode = + (ArrayNode) JobEventHttpReportHandler.JSON_MAPPER.readTree(body); + arrayNode.elements().forEachRemaining(jsonNode -> events.add(jsonNode)); + } + } + Assertions.assertEquals(8, events.size()); + Set eventTypes = + events.stream().map(e -> e.get("eventType").asText()).collect(Collectors.toSet()); + Assertions.assertTrue( + eventTypes.containsAll( + Arrays.asList( + EventType.LIFECYCLE_ENUMERATOR_OPEN.name(), + EventType.LIFECYCLE_ENUMERATOR_CLOSE.name(), + EventType.LIFECYCLE_READER_OPEN.name(), + EventType.LIFECYCLE_READER_CLOSE.name(), + EventType.LIFECYCLE_WRITER_CLOSE.name()))); + } +} diff --git a/seatunnel-e2e/seatunnel-engine-e2e/connector-console-seatunnel-e2e/src/test/resources/fakesource_to_console.conf b/seatunnel-e2e/seatunnel-engine-e2e/connector-console-seatunnel-e2e/src/test/resources/fakesource_to_console.conf index 42df3525d8e..520d9bd3ce3 100644 --- a/seatunnel-e2e/seatunnel-engine-e2e/connector-console-seatunnel-e2e/src/test/resources/fakesource_to_console.conf +++ b/seatunnel-e2e/seatunnel-engine-e2e/connector-console-seatunnel-e2e/src/test/resources/fakesource_to_console.conf @@ -19,6 +19,7 @@ ###### env { + parallelism = 2 job.mode = "BATCH" } diff --git a/seatunnel-e2e/seatunnel-engine-e2e/connector-console-seatunnel-e2e/src/test/resources/seatunnel_config_with_event_report.yaml b/seatunnel-e2e/seatunnel-engine-e2e/connector-console-seatunnel-e2e/src/test/resources/seatunnel_config_with_event_report.yaml new file mode 100644 index 00000000000..5cad037ede4 --- /dev/null +++ b/seatunnel-e2e/seatunnel-engine-e2e/connector-console-seatunnel-e2e/src/test/resources/seatunnel_config_with_event_report.yaml @@ -0,0 +1,23 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +seatunnel: + engine: + event-report-http: + url: http://host.testcontainers.internal:1024/event/report + headers: + Content-Type: application/json diff --git a/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/resources/log4j2.properties b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/resources/log4j2.properties index 6b6c6335ece..3b1f20dd67e 100644 --- a/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/resources/log4j2.properties +++ b/seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/resources/log4j2.properties @@ -32,6 +32,9 @@ logger.checkpoint.level=INFO logger.debezium.name=io.debezium.connector logger.debezium.level=WARN +logger.loggingEvent.name=org.apache.seatunnel.api.event.LoggingEventHandler +logger.loggingEvent.level=INFO + ############################ log output to console ############################# rootLogger.appenderRef.consoleStdout.ref = consoleStdoutAppender rootLogger.appenderRef.consoleStderr.ref = consoleStderrAppender diff --git a/seatunnel-engine/seatunnel-engine-common/src/main/java/org/apache/seatunnel/engine/common/config/EngineConfig.java b/seatunnel-engine/seatunnel-engine-common/src/main/java/org/apache/seatunnel/engine/common/config/EngineConfig.java index b9d7ed10790..8d1eff8e6fe 100644 --- a/seatunnel-engine/seatunnel-engine-common/src/main/java/org/apache/seatunnel/engine/common/config/EngineConfig.java +++ b/seatunnel-engine/seatunnel-engine-common/src/main/java/org/apache/seatunnel/engine/common/config/EngineConfig.java @@ -26,6 +26,9 @@ import lombok.Data; +import java.util.Collections; +import java.util.Map; + import static com.hazelcast.internal.util.Preconditions.checkBackupCount; import static com.hazelcast.internal.util.Preconditions.checkNotNull; import static com.hazelcast.internal.util.Preconditions.checkPositive; @@ -60,6 +63,9 @@ public class EngineConfig { private int historyJobExpireMinutes = ServerConfigOptions.HISTORY_JOB_EXPIRE_MINUTES.defaultValue(); + private String eventReportHttpApi; + private Map eventReportHttpHeaders = Collections.emptyMap(); + public void setBackupCount(int newBackupCount) { checkBackupCount(newBackupCount, 0); this.backupCount = newBackupCount; @@ -103,4 +109,14 @@ public EngineConfig setQueueType(QueueType queueType) { this.queueType = queueType; return this; } + + public EngineConfig setEventReportHttpApi(String eventReportHttpApi) { + this.eventReportHttpApi = eventReportHttpApi; + return this; + } + + public EngineConfig setEventReportHttpHeaders(Map eventReportHttpHeaders) { + this.eventReportHttpHeaders = eventReportHttpHeaders; + return this; + } } diff --git a/seatunnel-engine/seatunnel-engine-common/src/main/java/org/apache/seatunnel/engine/common/config/YamlSeaTunnelDomConfigProcessor.java b/seatunnel-engine/seatunnel-engine-common/src/main/java/org/apache/seatunnel/engine/common/config/YamlSeaTunnelDomConfigProcessor.java index ebc67f08a73..787f58ca90f 100644 --- a/seatunnel-engine/seatunnel-engine-common/src/main/java/org/apache/seatunnel/engine/common/config/YamlSeaTunnelDomConfigProcessor.java +++ b/seatunnel-engine/seatunnel-engine-common/src/main/java/org/apache/seatunnel/engine/common/config/YamlSeaTunnelDomConfigProcessor.java @@ -29,7 +29,9 @@ import org.apache.commons.lang3.StringUtils; +import org.w3c.dom.NamedNodeMap; import org.w3c.dom.Node; +import org.w3c.dom.NodeList; import com.hazelcast.config.InvalidConfigurationException; import com.hazelcast.internal.config.AbstractDomConfigProcessor; @@ -38,6 +40,7 @@ import java.util.Arrays; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.Locale; import java.util.Map; @@ -145,6 +148,23 @@ private void parseEngineConfig(Node engineNode, SeaTunnelConfig config) { engineConfig.setConnectorJarStorageConfig(parseConnectorJarStorageConfig(node)); } else if (ServerConfigOptions.CLASSLOADER_CACHE_MODE.key().equals(name)) { engineConfig.setClassloaderCacheMode(getBooleanValue(getTextContent(node))); + } else if (ServerConfigOptions.EVENT_REPORT_HTTP.equalsIgnoreCase(name)) { + NamedNodeMap attributes = node.getAttributes(); + Node urlNode = attributes.getNamedItem(ServerConfigOptions.EVENT_REPORT_HTTP_URL); + if (urlNode != null) { + engineConfig.setEventReportHttpApi(getTextContent(urlNode)); + Node headersNode = + attributes.getNamedItem(ServerConfigOptions.EVENT_REPORT_HTTP_HEADERS); + if (headersNode != null) { + Map headers = new LinkedHashMap<>(); + NodeList nodeList = headersNode.getChildNodes(); + for (int i = 0; i < nodeList.getLength(); i++) { + Node item = nodeList.item(i); + headers.put(cleanNodeName(item), getTextContent(item)); + } + engineConfig.setEventReportHttpHeaders(headers); + } + } } else { LOGGER.warning("Unrecognized element: " + name); } diff --git a/seatunnel-engine/seatunnel-engine-common/src/main/java/org/apache/seatunnel/engine/common/config/server/ServerConfigOptions.java b/seatunnel-engine/seatunnel-engine-common/src/main/java/org/apache/seatunnel/engine/common/config/server/ServerConfigOptions.java index ea9a188be6f..1316d2fec7c 100644 --- a/seatunnel-engine/seatunnel-engine-common/src/main/java/org/apache/seatunnel/engine/common/config/server/ServerConfigOptions.java +++ b/seatunnel-engine/seatunnel-engine-common/src/main/java/org/apache/seatunnel/engine/common/config/server/ServerConfigOptions.java @@ -203,4 +203,8 @@ public class ServerConfigOptions { .defaultValue(false) .withDescription( "Whether to use classloader cache mode. With cache mode, all jobs share the same classloader if the jars are the same"); + + public static final String EVENT_REPORT_HTTP = "event-report-http"; + public static final String EVENT_REPORT_HTTP_URL = "url"; + public static final String EVENT_REPORT_HTTP_HEADERS = "headers"; } diff --git a/seatunnel-engine/seatunnel-engine-server/pom.xml b/seatunnel-engine/seatunnel-engine-server/pom.xml index 4c0443d9110..b8321b56252 100644 --- a/seatunnel-engine/seatunnel-engine-server/pom.xml +++ b/seatunnel-engine/seatunnel-engine-server/pom.xml @@ -88,6 +88,12 @@ optional provided + + com.squareup.okhttp + mockwebserver + 2.7.5 + test + diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/CoordinatorService.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/CoordinatorService.java index a5028f3597a..0bf2c0de4dc 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/CoordinatorService.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/CoordinatorService.java @@ -19,6 +19,8 @@ import org.apache.seatunnel.api.common.metrics.JobMetrics; import org.apache.seatunnel.api.common.metrics.RawJobMetrics; +import org.apache.seatunnel.api.event.EventHandler; +import org.apache.seatunnel.api.event.EventProcessor; import org.apache.seatunnel.common.utils.ExceptionUtils; import org.apache.seatunnel.common.utils.SeaTunnelException; import org.apache.seatunnel.common.utils.StringFormatUtils; @@ -38,6 +40,8 @@ import org.apache.seatunnel.engine.server.dag.physical.PhysicalVertex; import org.apache.seatunnel.engine.server.dag.physical.PipelineLocation; import org.apache.seatunnel.engine.server.dag.physical.SubPlan; +import org.apache.seatunnel.engine.server.event.JobEventHttpReportHandler; +import org.apache.seatunnel.engine.server.event.JobEventProcessor; import org.apache.seatunnel.engine.server.execution.ExecutionState; import org.apache.seatunnel.engine.server.execution.TaskExecutionState; import org.apache.seatunnel.engine.server.execution.TaskGroupLocation; @@ -55,11 +59,13 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.hazelcast.cluster.Address; +import com.hazelcast.config.Config; import com.hazelcast.core.HazelcastInstanceNotActiveException; import com.hazelcast.internal.serialization.Data; import com.hazelcast.internal.services.MembershipServiceEvent; import com.hazelcast.logging.ILogger; import com.hazelcast.map.IMap; +import com.hazelcast.ringbuffer.Ringbuffer; import com.hazelcast.spi.impl.NodeEngineImpl; import lombok.NonNull; @@ -157,6 +163,8 @@ public class CoordinatorService { private ConnectorPackageService connectorPackageService; + private EventProcessor eventProcessor; + public CoordinatorService( @NonNull NodeEngineImpl nodeEngine, @NonNull SeaTunnelServer seaTunnelServer, @@ -175,6 +183,37 @@ public CoordinatorService( this::checkNewActiveMaster, 0, 100, TimeUnit.MILLISECONDS); } + private JobEventProcessor createJobEventProcessor( + String reportHttpEndpoint, + Map reportHttpHeaders, + NodeEngineImpl nodeEngine) { + List handlers = + EventProcessor.loadEventHandlers(Thread.currentThread().getContextClassLoader()); + + if (reportHttpEndpoint != null) { + String ringBufferName = "zeta-job-event"; + int maxBufferCapacity = 2000; + nodeEngine + .getHazelcastInstance() + .getConfig() + .addRingBufferConfig( + new Config() + .getRingbufferConfig(ringBufferName) + .setCapacity(maxBufferCapacity) + .setBackupCount(0) + .setAsyncBackupCount(1) + .setTimeToLiveSeconds(0)); + Ringbuffer ringbuffer = nodeEngine.getHazelcastInstance().getRingbuffer(ringBufferName); + JobEventHttpReportHandler httpReportHandler = + new JobEventHttpReportHandler( + reportHttpEndpoint, reportHttpHeaders, ringbuffer); + handlers.add(httpReportHandler); + } + logger.info("Loaded event handlers: " + handlers); + JobEventProcessor eventProcessor = new JobEventProcessor(handlers); + return eventProcessor; + } + public JobHistoryService getJobHistoryService() { return jobHistoryService; } @@ -183,6 +222,10 @@ public JobMaster getJobMaster(Long jobId) { return runningJobMasterMap.get(jobId); } + public EventProcessor getEventProcessor() { + return eventProcessor; + } + // On the new master node // 1. If runningJobStateIMap.get(jobId) == null and runningJobInfoIMap.get(jobId) != null. We // will do @@ -225,6 +268,11 @@ private void initCoordinatorService() { .getHazelcastInstance() .getMap(Constant.IMAP_FINISHED_JOB_VERTEX_INFO), engineConfig.getHistoryJobExpireMinutes()); + eventProcessor = + createJobEventProcessor( + engineConfig.getEventReportHttpApi(), + engineConfig.getEventReportHttpHeaders(), + nodeEngine); // If the user has configured the connector package service, create it on the master node. ConnectorJarStorageConfig connectorJarStorageConfig = @@ -363,6 +411,14 @@ public synchronized void clearCoordinatorService() { if (resourceManager != null) { resourceManager.close(); } + + try { + if (eventProcessor != null) { + eventProcessor.close(); + } + } catch (Exception e) { + throw new SeaTunnelEngineException("close event processor error", e); + } } /** Lazy load for resource manager */ diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventHttpReportHandler.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventHttpReportHandler.java new file mode 100644 index 00000000000..f1e0fe9ac8f --- /dev/null +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventHttpReportHandler.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.engine.server.event; + +import org.apache.seatunnel.shade.com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.seatunnel.shade.com.google.common.util.concurrent.ThreadFactoryBuilder; + +import org.apache.seatunnel.api.event.Event; +import org.apache.seatunnel.api.event.EventHandler; + +import com.hazelcast.ringbuffer.OverflowPolicy; +import com.hazelcast.ringbuffer.ReadResultSet; +import com.hazelcast.ringbuffer.Ringbuffer; +import com.hazelcast.ringbuffer.impl.RingbufferProxy; +import com.squareup.okhttp.MediaType; +import com.squareup.okhttp.OkHttpClient; +import com.squareup.okhttp.Request; +import com.squareup.okhttp.RequestBody; +import com.squareup.okhttp.Response; +import lombok.extern.slf4j.Slf4j; + +import java.io.IOException; +import java.time.Duration; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.CompletionStage; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +@Slf4j +public class JobEventHttpReportHandler implements EventHandler { + public static final ObjectMapper JSON_MAPPER = new ObjectMapper(); + public static final Duration REPORT_INTERVAL = Duration.ofSeconds(10); + + private final String httpEndpoint; + private final Map httpHeaders; + private final OkHttpClient httpClient; + private final MediaType httpMediaType = MediaType.parse("application/json"); + private final Ringbuffer ringbuffer; + private volatile long committedEventIndex; + private final ScheduledExecutorService scheduledExecutorService; + + public JobEventHttpReportHandler(String httpEndpoint, Ringbuffer ringbuffer) { + this(httpEndpoint, REPORT_INTERVAL, ringbuffer); + } + + public JobEventHttpReportHandler( + String httpEndpoint, Map httpHeaders, Ringbuffer ringbuffer) { + this(httpEndpoint, httpHeaders, REPORT_INTERVAL, ringbuffer); + } + + public JobEventHttpReportHandler( + String httpEndpoint, Duration reportInterval, Ringbuffer ringbuffer) { + this(httpEndpoint, Collections.emptyMap(), reportInterval, ringbuffer); + } + + public JobEventHttpReportHandler( + String httpEndpoint, + Map httpHeaders, + Duration reportInterval, + Ringbuffer ringbuffer) { + this.httpEndpoint = httpEndpoint; + this.httpHeaders = httpHeaders; + this.ringbuffer = ringbuffer; + this.committedEventIndex = ringbuffer.headSequence(); + this.httpClient = createHttpClient(); + this.scheduledExecutorService = + Executors.newSingleThreadScheduledExecutor( + new ThreadFactoryBuilder() + .setNameFormat("http-report-event-scheduler-%d") + .build()); + scheduledExecutorService.scheduleAtFixedRate( + () -> { + try { + report(); + } catch (Throwable e) { + log.error("Failed to report event", e); + } + }, + 0, + reportInterval.getSeconds(), + TimeUnit.SECONDS); + } + + @Override + public void handle(Event event) { + CompletionStage completionStage = ringbuffer.addAsync(event, OverflowPolicy.OVERWRITE); + completionStage.toCompletableFuture().join(); + } + + private void report() throws IOException { + long headSequence = ringbuffer.headSequence(); + if (headSequence > committedEventIndex) { + log.warn( + "The head sequence {} is greater than the committed event index {}", + headSequence, + committedEventIndex); + committedEventIndex = headSequence; + } + CompletionStage> completionStage = + ringbuffer.readManyAsync( + committedEventIndex, 0, RingbufferProxy.MAX_BATCH_SIZE, null); + ReadResultSet resultSet = completionStage.toCompletableFuture().join(); + if (resultSet.size() <= 0) { + return; + } + + String events = JSON_MAPPER.writeValueAsString(resultSet.iterator()); + Request.Builder requestBuilder = + new Request.Builder() + .url(httpEndpoint) + .post(RequestBody.create(httpMediaType, events)); + httpHeaders.forEach(requestBuilder::header); + Response response = httpClient.newCall(requestBuilder.build()).execute(); + if (response.isSuccessful()) { + committedEventIndex += resultSet.readCount(); + } else { + log.error("Failed to request http server: {}", response); + } + } + + @Override + public void close() { + log.info("Close http report handler"); + scheduledExecutorService.shutdown(); + } + + private OkHttpClient createHttpClient() { + OkHttpClient client = new OkHttpClient(); + client.setConnectTimeout(30, TimeUnit.SECONDS); + client.setWriteTimeout(10, TimeUnit.SECONDS); + return client; + } +} diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventListener.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventListener.java new file mode 100644 index 00000000000..4e834b828c4 --- /dev/null +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventListener.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.engine.server.event; + +import org.apache.seatunnel.api.event.Event; +import org.apache.seatunnel.api.event.EventListener; +import org.apache.seatunnel.engine.server.execution.TaskExecutionContext; +import org.apache.seatunnel.engine.server.execution.TaskLocation; + +import lombok.AllArgsConstructor; + +@AllArgsConstructor +public class JobEventListener implements EventListener { + private final TaskLocation taskLocation; + private final TaskExecutionContext taskExecutionContext; + + @Override + public void onEvent(Event event) { + event.setJobId(String.valueOf(taskLocation.getJobId())); + JobEventReportOperation evenCollectOperation = new JobEventReportOperation(event); + taskExecutionContext.sendToMaster(evenCollectOperation).join(); + } +} diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventProcessor.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventProcessor.java new file mode 100644 index 00000000000..42f128aceed --- /dev/null +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventProcessor.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.engine.server.event; + +import org.apache.seatunnel.api.event.Event; +import org.apache.seatunnel.api.event.EventHandler; +import org.apache.seatunnel.api.event.EventProcessor; + +import lombok.AllArgsConstructor; + +import java.util.List; + +@AllArgsConstructor +public class JobEventProcessor implements EventProcessor { + private final List handlers; + + @Override + public void process(Event event) { + handlers.forEach(listener -> listener.handle(event)); + } + + @Override + public void close() throws Exception { + EventProcessor.close(handlers); + } +} diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventReportOperation.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventReportOperation.java new file mode 100644 index 00000000000..cc2500f3b31 --- /dev/null +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventReportOperation.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.engine.server.event; + +import org.apache.seatunnel.api.event.Event; +import org.apache.seatunnel.engine.server.SeaTunnelServer; +import org.apache.seatunnel.engine.server.serializable.TaskDataSerializerHook; + +import com.hazelcast.nio.ObjectDataInput; +import com.hazelcast.nio.ObjectDataOutput; +import com.hazelcast.nio.serialization.IdentifiedDataSerializable; +import com.hazelcast.spi.impl.operationservice.Operation; +import lombok.AllArgsConstructor; +import lombok.NoArgsConstructor; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; + +@NoArgsConstructor +@AllArgsConstructor +public class JobEventReportOperation extends Operation implements IdentifiedDataSerializable { + + private Event event; + + @Override + public void run() throws Exception { + SeaTunnelServer server = getService(); + server.getCoordinatorService().getEventProcessor().process(event); + } + + @Override + protected void writeInternal(ObjectDataOutput out) throws IOException { + try (ByteArrayOutputStream byteOut = new ByteArrayOutputStream(); + ObjectOutputStream objectOut = new ObjectOutputStream(byteOut)) { + objectOut.writeObject(event); + objectOut.flush(); + out.writeByteArray(byteOut.toByteArray()); + } + } + + @Override + protected void readInternal(ObjectDataInput in) throws IOException { + try (ByteArrayInputStream byteIn = new ByteArrayInputStream(in.readByteArray()); + ObjectInputStream objectIn = new ObjectInputStream(byteIn)) { + event = (Event) objectIn.readObject(); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } + } + + @Override + public int getFactoryId() { + return TaskDataSerializerHook.FACTORY_ID; + } + + @Override + public int getClassId() { + return TaskDataSerializerHook.REPORT_JOB_EVENT; + } +} diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/serializable/TaskDataSerializerHook.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/serializable/TaskDataSerializerHook.java index 185c81ba9f0..7c298272c7d 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/serializable/TaskDataSerializerHook.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/serializable/TaskDataSerializerHook.java @@ -18,6 +18,7 @@ package org.apache.seatunnel.engine.server.serializable; import org.apache.seatunnel.engine.common.serializeable.SeaTunnelFactoryIdConstant; +import org.apache.seatunnel.engine.server.event.JobEventReportOperation; import org.apache.seatunnel.engine.server.execution.TaskLocation; import org.apache.seatunnel.engine.server.task.Progress; import org.apache.seatunnel.engine.server.task.TaskGroupImmutableInformation; @@ -98,6 +99,8 @@ public class TaskDataSerializerHook implements DataSerializerHook { public static final int DELETE_CONNECTOR_JAR_IN_EXECUTION_NODE = 24; + public static final int REPORT_JOB_EVENT = 25; + public static final int FACTORY_ID = FactoryIdHelper.getFactoryId( SeaTunnelFactoryIdConstant.SEATUNNEL_TASK_DATA_SERIALIZER_FACTORY, @@ -166,6 +169,8 @@ public IdentifiedDataSerializable create(int typeId) { return new SendConnectorJarToMemberNodeOperation(); case DELETE_CONNECTOR_JAR_IN_EXECUTION_NODE: return new DeleteConnectorJarInExecutionNode(); + case REPORT_JOB_EVENT: + return new JobEventReportOperation(); default: throw new IllegalArgumentException("Unknown type id " + typeId); } diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/SourceSplitEnumeratorTask.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/SourceSplitEnumeratorTask.java index 7f36f8f26a1..e95684c1c51 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/SourceSplitEnumeratorTask.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/SourceSplitEnumeratorTask.java @@ -27,6 +27,7 @@ import org.apache.seatunnel.engine.server.checkpoint.ActionSubtaskState; import org.apache.seatunnel.engine.server.checkpoint.CheckpointBarrier; import org.apache.seatunnel.engine.server.checkpoint.operation.TaskAcknowledgeOperation; +import org.apache.seatunnel.engine.server.event.JobEventListener; import org.apache.seatunnel.engine.server.execution.ProgressState; import org.apache.seatunnel.engine.server.execution.TaskLocation; import org.apache.seatunnel.engine.server.task.context.SeaTunnelSplitEnumeratorContext; @@ -102,7 +103,10 @@ public void init() throws Exception { + source.getName()); enumeratorContext = new SeaTunnelSplitEnumeratorContext<>( - this.source.getParallelism(), this, getMetricsContext()); + this.source.getParallelism(), + this, + getMetricsContext(), + new JobEventListener(taskLocation, getExecutionContext())); enumeratorStateSerializer = this.source.getSource().getEnumeratorStateSerializer(); splitSerializer = this.source.getSource().getSplitSerializer(); taskMemberMapping = new ConcurrentHashMap<>(); diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/context/SeaTunnelSplitEnumeratorContext.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/context/SeaTunnelSplitEnumeratorContext.java index 110562e4944..7b587283d52 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/context/SeaTunnelSplitEnumeratorContext.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/context/SeaTunnelSplitEnumeratorContext.java @@ -18,6 +18,7 @@ package org.apache.seatunnel.engine.server.task.context; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.source.SourceEvent; import org.apache.seatunnel.api.source.SourceSplit; import org.apache.seatunnel.api.source.SourceSplitEnumerator; @@ -43,14 +44,17 @@ public class SeaTunnelSplitEnumeratorContext private final SourceSplitEnumeratorTask task; private final MetricsContext metricsContext; + private final EventListener eventListener; public SeaTunnelSplitEnumeratorContext( int parallelism, SourceSplitEnumeratorTask task, - MetricsContext metricsContext) { + MetricsContext metricsContext, + EventListener eventListener) { this.parallelism = parallelism; this.task = task; this.metricsContext = metricsContext; + this.eventListener = eventListener; } @Override @@ -100,4 +104,9 @@ public void sendEventToSourceReader(int subtaskId, SourceEvent event) {} public MetricsContext getMetricsContext() { return metricsContext; } + + @Override + public EventListener getEventListener() { + return eventListener; + } } diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/context/SinkWriterContext.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/context/SinkWriterContext.java index 59cfa334241..747198d3eb8 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/context/SinkWriterContext.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/context/SinkWriterContext.java @@ -18,6 +18,7 @@ package org.apache.seatunnel.engine.server.task.context; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.sink.SinkWriter; public class SinkWriterContext implements SinkWriter.Context { @@ -25,10 +26,13 @@ public class SinkWriterContext implements SinkWriter.Context { private static final long serialVersionUID = -3082515319043725121L; private final int indexID; private final MetricsContext metricsContext; + private final EventListener eventListener; - public SinkWriterContext(int indexID, MetricsContext metricsContext) { + public SinkWriterContext( + int indexID, MetricsContext metricsContext, EventListener eventListener) { this.indexID = indexID; this.metricsContext = metricsContext; + this.eventListener = eventListener; } @Override @@ -40,4 +44,9 @@ public int getIndexOfSubtask() { public MetricsContext getMetricsContext() { return metricsContext; } + + @Override + public EventListener getEventListener() { + return eventListener; + } } diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/context/SourceReaderContext.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/context/SourceReaderContext.java index 3a526930586..8531999e639 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/context/SourceReaderContext.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/context/SourceReaderContext.java @@ -18,6 +18,7 @@ package org.apache.seatunnel.engine.server.task.context; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.source.Boundedness; import org.apache.seatunnel.api.source.SourceEvent; import org.apache.seatunnel.api.source.SourceReader; @@ -32,16 +33,19 @@ public class SourceReaderContext implements SourceReader.Context { private final SourceFlowLifeCycle sourceActionLifeCycle; private final MetricsContext metricsContext; + private final EventListener eventListener; public SourceReaderContext( int index, Boundedness boundedness, SourceFlowLifeCycle sourceActionLifeCycle, - MetricsContext metricsContext) { + MetricsContext metricsContext, + EventListener eventListener) { this.index = index; this.boundedness = boundedness; this.sourceActionLifeCycle = sourceActionLifeCycle; this.metricsContext = metricsContext; + this.eventListener = eventListener; } @Override @@ -73,4 +77,9 @@ public void sendSourceEventToEnumerator(SourceEvent sourceEvent) { public MetricsContext getMetricsContext() { return metricsContext; } + + @Override + public EventListener getEventListener() { + return eventListener; + } } diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/flow/SinkFlowLifeCycle.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/flow/SinkFlowLifeCycle.java index b8c5f4f663e..1a2143b0271 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/flow/SinkFlowLifeCycle.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/flow/SinkFlowLifeCycle.java @@ -20,6 +20,7 @@ import org.apache.seatunnel.api.common.metrics.Counter; import org.apache.seatunnel.api.common.metrics.Meter; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.serialization.Serializer; import org.apache.seatunnel.api.sink.MultiTableResourceManager; import org.apache.seatunnel.api.sink.SinkCommitter; @@ -32,6 +33,7 @@ import org.apache.seatunnel.engine.core.dag.actions.SinkAction; import org.apache.seatunnel.engine.server.checkpoint.ActionStateKey; import org.apache.seatunnel.engine.server.checkpoint.ActionSubtaskState; +import org.apache.seatunnel.engine.server.event.JobEventListener; import org.apache.seatunnel.engine.server.execution.TaskLocation; import org.apache.seatunnel.engine.server.task.SeaTunnelTask; import org.apache.seatunnel.engine.server.task.context.SinkWriterContext; @@ -100,6 +102,8 @@ public class SinkFlowLifeCycle sinkAction, TaskLocation taskLocation, @@ -116,6 +120,7 @@ public SinkFlowLifeCycle( this.committerTaskLocation = committerTaskLocation; this.containAggCommitter = containAggCommitter; this.metricsContext = metricsContext; + this.eventListener = new JobEventListener(taskLocation, runningTask.getExecutionContext()); sinkWriteCount = metricsContext.counter(SINK_WRITE_COUNT); sinkWriteQPS = metricsContext.meter(SINK_WRITE_QPS); sinkWriteBytes = metricsContext.counter(SINK_WRITE_BYTES); @@ -294,12 +299,15 @@ public void restoreState(List actionStateList) throws Except this.writer = sinkAction .getSink() - .createWriter(new SinkWriterContext(indexID, metricsContext)); + .createWriter( + new SinkWriterContext(indexID, metricsContext, eventListener)); } else { this.writer = sinkAction .getSink() - .restoreWriter(new SinkWriterContext(indexID, metricsContext), states); + .restoreWriter( + new SinkWriterContext(indexID, metricsContext, eventListener), + states); } if (this.writer instanceof SupportResourceShare) { resourceManager = diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/flow/SourceFlowLifeCycle.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/flow/SourceFlowLifeCycle.java index 95e54980b48..64e5bfd22b8 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/flow/SourceFlowLifeCycle.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/task/flow/SourceFlowLifeCycle.java @@ -18,6 +18,7 @@ package org.apache.seatunnel.engine.server.task.flow; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.serialization.Serializer; import org.apache.seatunnel.api.source.SourceEvent; import org.apache.seatunnel.api.source.SourceReader; @@ -29,6 +30,7 @@ import org.apache.seatunnel.engine.server.checkpoint.ActionStateKey; import org.apache.seatunnel.engine.server.checkpoint.ActionSubtaskState; import org.apache.seatunnel.engine.server.checkpoint.CheckpointBarrier; +import org.apache.seatunnel.engine.server.event.JobEventListener; import org.apache.seatunnel.engine.server.execution.TaskLocation; import org.apache.seatunnel.engine.server.task.SeaTunnelSourceCollector; import org.apache.seatunnel.engine.server.task.SeaTunnelTask; @@ -80,6 +82,7 @@ public class SourceFlowLifeCycle extends ActionFl private SeaTunnelSourceCollector collector; private final MetricsContext metricsContext; + private final EventListener eventListener; private final AtomicReference schemaChangePhase = new AtomicReference<>(); @@ -97,6 +100,8 @@ public SourceFlowLifeCycle( this.enumeratorTaskLocation = enumeratorTaskLocation; this.currentTaskLocation = currentTaskLocation; this.metricsContext = metricsContext; + this.eventListener = + new JobEventListener(currentTaskLocation, runningTask.getExecutionContext()); } public void setCollector(SeaTunnelSourceCollector collector) { @@ -114,7 +119,8 @@ public void init() throws Exception { indexID, sourceAction.getSource().getBoundedness(), this, - metricsContext)); + metricsContext, + eventListener)); this.enumeratorTaskAddress = getEnumeratorTaskAddress(); } diff --git a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/event/JobEventHttpReportHandlerTest.java b/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/event/JobEventHttpReportHandlerTest.java new file mode 100644 index 00000000000..e5a978d486e --- /dev/null +++ b/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/event/JobEventHttpReportHandlerTest.java @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.engine.server.event; + +import org.apache.seatunnel.shade.com.fasterxml.jackson.core.type.TypeReference; + +import org.apache.seatunnel.api.event.Event; +import org.apache.seatunnel.api.event.EventType; + +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import com.hazelcast.config.Config; +import com.hazelcast.core.Hazelcast; +import com.hazelcast.core.HazelcastInstance; +import com.hazelcast.ringbuffer.Ringbuffer; +import com.squareup.okhttp.mockwebserver.MockResponse; +import com.squareup.okhttp.mockwebserver.MockWebServer; +import com.squareup.okhttp.mockwebserver.RecordedRequest; +import lombok.AllArgsConstructor; +import lombok.Getter; +import lombok.NoArgsConstructor; +import lombok.Setter; +import okio.Buffer; + +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.awaitility.Awaitility.given; + +public class JobEventHttpReportHandlerTest { + private static HazelcastInstance hazelcast; + private static MockWebServer mockWebServer; + + @BeforeAll + public static void before() throws IOException { + hazelcast = Hazelcast.newHazelcastInstance(); + mockWebServer = new MockWebServer(); + mockWebServer.start(); + mockWebServer.enqueue(new MockResponse().setResponseCode(200)); + } + + @AfterAll + public static void after() throws IOException { + hazelcast.shutdown(); + mockWebServer.shutdown(); + } + + @Test + public void testReportEvent() throws IOException, InterruptedException { + int maxEvents = 1000; + Ringbuffer ringbuffer = createRingBuffer(maxEvents); + JobEventHttpReportHandler handler = + new JobEventHttpReportHandler( + mockWebServer.url("/api").toString(), Duration.ofSeconds(1), ringbuffer); + for (int i = 0; i < maxEvents; i++) { + handler.handle(new TestEvent(i)); + } + given().ignoreExceptions() + .await() + .atMost(10, TimeUnit.SECONDS) + .until(() -> mockWebServer.getRequestCount(), count -> count > 0); + handler.close(); + + List events = new ArrayList<>(); + for (int i = 0; i < mockWebServer.getRequestCount(); i++) { + RecordedRequest request = mockWebServer.takeRequest(); + try (Buffer buffer = request.getBody()) { + String body = buffer.readUtf8(); + List data = + JobEventHttpReportHandler.JSON_MAPPER.readValue( + body, new TypeReference>() {}); + events.addAll(data); + } + } + + Assertions.assertEquals(maxEvents, events.size()); + for (int i = 0; i < maxEvents; i++) { + Assertions.assertEquals(String.valueOf(i), events.get(i).getJobId()); + } + } + + private Ringbuffer createRingBuffer(int capacity) { + String ringBufferName = "test"; + hazelcast + .getConfig() + .addRingBufferConfig( + new Config() + .getRingbufferConfig(ringBufferName) + .setCapacity(capacity) + .setBackupCount(0) + .setAsyncBackupCount(1) + .setTimeToLiveSeconds(0)); + Ringbuffer ringbuffer = hazelcast.getRingbuffer(ringBufferName); + return ringbuffer; + } + + @Getter + @Setter + @NoArgsConstructor + @AllArgsConstructor + static class TestEvent implements Event { + private long createdTime; + private String jobId; + private EventType eventType; + + public TestEvent(long test) { + this.createdTime = test; + this.jobId = String.valueOf(test); + this.eventType = EventType.SCHEMA_CHANGE_UPDATE_COLUMNS; + } + } +} diff --git a/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/CoordinatedEnumeratorContext.java b/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/CoordinatedEnumeratorContext.java index 853892da517..1f79f4691e4 100644 --- a/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/CoordinatedEnumeratorContext.java +++ b/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/CoordinatedEnumeratorContext.java @@ -19,6 +19,8 @@ import org.apache.seatunnel.api.common.metrics.AbstractMetricsContext; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.DefaultEventProcessor; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.source.SourceEvent; import org.apache.seatunnel.api.source.SourceSplit; import org.apache.seatunnel.api.source.SourceSplitEnumerator; @@ -30,9 +32,12 @@ public class CoordinatedEnumeratorContext implements SourceSplitEnumerator.Context { protected final CoordinatedSource coordinatedSource; + protected final EventListener eventListener; - public CoordinatedEnumeratorContext(CoordinatedSource coordinatedSource) { + public CoordinatedEnumeratorContext( + CoordinatedSource coordinatedSource, String jobId) { this.coordinatedSource = coordinatedSource; + this.eventListener = new DefaultEventProcessor(jobId); } @Override @@ -66,4 +71,9 @@ public MetricsContext getMetricsContext() { // https://github.com/apache/seatunnel/issues/3431 return new AbstractMetricsContext() {}; } + + @Override + public EventListener getEventListener() { + return eventListener; + } } diff --git a/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/CoordinatedReaderContext.java b/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/CoordinatedReaderContext.java index 723dc5ad2a3..2a60d3ff1f6 100644 --- a/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/CoordinatedReaderContext.java +++ b/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/CoordinatedReaderContext.java @@ -19,6 +19,8 @@ import org.apache.seatunnel.api.common.metrics.AbstractMetricsContext; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.DefaultEventProcessor; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.source.Boundedness; import org.apache.seatunnel.api.source.SourceEvent; import org.apache.seatunnel.api.source.SourceReader; @@ -28,14 +30,17 @@ public class CoordinatedReaderContext implements SourceReader.Context { protected final CoordinatedSource coordinatedSource; protected final Boundedness boundedness; protected final Integer subtaskId; + protected final EventListener eventListener; public CoordinatedReaderContext( CoordinatedSource coordinatedSource, Boundedness boundedness, + String jobId, Integer subtaskId) { this.coordinatedSource = coordinatedSource; this.boundedness = boundedness; this.subtaskId = subtaskId; + this.eventListener = new DefaultEventProcessor(jobId); } @Override @@ -69,4 +74,9 @@ public MetricsContext getMetricsContext() { // https://github.com/apache/seatunnel/issues/3431 return new AbstractMetricsContext() {}; } + + @Override + public EventListener getEventListener() { + return eventListener; + } } diff --git a/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/CoordinatedSource.java b/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/CoordinatedSource.java index 553bb345071..11b240dd993 100644 --- a/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/CoordinatedSource.java +++ b/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/CoordinatedSource.java @@ -49,6 +49,7 @@ public class CoordinatedSource source; protected final Map> restoredState; protected final Integer parallelism; + protected final String jobId; protected final Serializer splitSerializer; protected final Serializer enumeratorStateSerializer; @@ -69,14 +70,16 @@ public class CoordinatedSource source, Map> restoredState, - int parallelism) { + int parallelism, + String jobId) { this.source = source; this.restoredState = restoredState; this.parallelism = parallelism; + this.jobId = jobId; this.splitSerializer = source.getSplitSerializer(); this.enumeratorStateSerializer = source.getEnumeratorStateSerializer(); - this.coordinatedEnumeratorContext = new CoordinatedEnumeratorContext<>(this); + this.coordinatedEnumeratorContext = new CoordinatedEnumeratorContext<>(this, jobId); this.readerContextMap = new ConcurrentHashMap<>(parallelism); this.readerRunningMap = new ConcurrentHashMap<>(parallelism); try { @@ -119,7 +122,7 @@ private void createSplitEnumerator() throws Exception { private void createReaders() throws Exception { for (int subtaskId = 0; subtaskId < this.parallelism; subtaskId++) { CoordinatedReaderContext readerContext = - new CoordinatedReaderContext(this, source.getBoundedness(), subtaskId); + new CoordinatedReaderContext(this, source.getBoundedness(), jobId, subtaskId); readerContextMap.put(subtaskId, readerContext); readerRunningMap.put(subtaskId, new AtomicBoolean(true)); SourceReader reader = source.createReader(readerContext); diff --git a/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/ParallelEnumeratorContext.java b/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/ParallelEnumeratorContext.java index d9f1d768178..8d8c640a888 100644 --- a/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/ParallelEnumeratorContext.java +++ b/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/ParallelEnumeratorContext.java @@ -19,6 +19,8 @@ import org.apache.seatunnel.api.common.metrics.AbstractMetricsContext; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.DefaultEventProcessor; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.source.SourceEvent; import org.apache.seatunnel.api.source.SourceSplit; import org.apache.seatunnel.api.source.SourceSplitEnumerator; @@ -33,13 +35,18 @@ public class ParallelEnumeratorContext protected final ParallelSource parallelSource; protected final Integer parallelism; protected final Integer subtaskId; + protected final EventListener eventListener; protected volatile boolean running = false; public ParallelEnumeratorContext( - ParallelSource parallelSource, int parallelism, int subtaskId) { + ParallelSource parallelSource, + int parallelism, + String jobId, + int subtaskId) { this.parallelSource = parallelSource; this.parallelism = parallelism; this.subtaskId = subtaskId; + this.eventListener = new DefaultEventProcessor(jobId); } @Override @@ -83,4 +90,9 @@ public MetricsContext getMetricsContext() { // https://github.com/apache/seatunnel/issues/3431 return new AbstractMetricsContext() {}; } + + @Override + public EventListener getEventListener() { + return eventListener; + } } diff --git a/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/ParallelReaderContext.java b/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/ParallelReaderContext.java index 704cecc1016..96e99cf5c7c 100644 --- a/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/ParallelReaderContext.java +++ b/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/ParallelReaderContext.java @@ -19,6 +19,8 @@ import org.apache.seatunnel.api.common.metrics.AbstractMetricsContext; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.DefaultEventProcessor; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.source.Boundedness; import org.apache.seatunnel.api.source.SourceEvent; import org.apache.seatunnel.api.source.SourceReader; @@ -28,12 +30,17 @@ public class ParallelReaderContext implements SourceReader.Context { protected final ParallelSource parallelSource; protected final Boundedness boundedness; protected final Integer subtaskId; + protected final EventListener eventListener; public ParallelReaderContext( - ParallelSource parallelSource, Boundedness boundedness, Integer subtaskId) { + ParallelSource parallelSource, + Boundedness boundedness, + String jobId, + Integer subtaskId) { this.parallelSource = parallelSource; this.boundedness = boundedness; this.subtaskId = subtaskId; + this.eventListener = new DefaultEventProcessor(jobId); } @Override @@ -69,4 +76,9 @@ public MetricsContext getMetricsContext() { // https://github.com/apache/seatunnel/issues/3431 return new AbstractMetricsContext() {}; } + + @Override + public EventListener getEventListener() { + return eventListener; + } } diff --git a/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/ParallelSource.java b/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/ParallelSource.java index c6934877a01..4cc1bfd1418 100644 --- a/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/ParallelSource.java +++ b/seatunnel-translation/seatunnel-translation-base/src/main/java/org/apache/seatunnel/translation/source/ParallelSource.java @@ -45,6 +45,7 @@ public class ParallelSource source; protected final ParallelEnumeratorContext parallelEnumeratorContext; protected final ParallelReaderContext readerContext; + protected final String jobId; protected final Integer subtaskId; protected final Integer parallelism; @@ -64,16 +65,19 @@ public ParallelSource( SeaTunnelSource source, Map> restoredState, int parallelism, + String jobId, int subtaskId) { this.source = source; + this.jobId = jobId; this.subtaskId = subtaskId; this.parallelism = parallelism; this.splitSerializer = source.getSplitSerializer(); this.enumeratorStateSerializer = source.getEnumeratorStateSerializer(); this.parallelEnumeratorContext = - new ParallelEnumeratorContext<>(this, parallelism, subtaskId); - this.readerContext = new ParallelReaderContext(this, source.getBoundedness(), subtaskId); + new ParallelEnumeratorContext<>(this, parallelism, jobId, subtaskId); + this.readerContext = + new ParallelReaderContext(this, source.getBoundedness(), jobId, subtaskId); // Create or restore split enumerator & reader try { diff --git a/seatunnel-translation/seatunnel-translation-flink/seatunnel-translation-flink-13/src/main/java/org/apache/seatunnel/translation/flink/sink/FlinkSinkWriterContext.java b/seatunnel-translation/seatunnel-translation-flink/seatunnel-translation-flink-13/src/main/java/org/apache/seatunnel/translation/flink/sink/FlinkSinkWriterContext.java index 95f72e8ed56..4a81b43c9a8 100644 --- a/seatunnel-translation/seatunnel-translation-flink/seatunnel-translation-flink-13/src/main/java/org/apache/seatunnel/translation/flink/sink/FlinkSinkWriterContext.java +++ b/seatunnel-translation/seatunnel-translation-flink/seatunnel-translation-flink-13/src/main/java/org/apache/seatunnel/translation/flink/sink/FlinkSinkWriterContext.java @@ -18,9 +18,12 @@ package org.apache.seatunnel.translation.flink.sink; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.DefaultEventProcessor; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.sink.SinkWriter; import org.apache.seatunnel.translation.flink.metric.FlinkMetricContext; +import org.apache.flink.api.connector.sink.Sink; import org.apache.flink.api.connector.sink.Sink.InitContext; import org.apache.flink.metrics.MetricGroup; import org.apache.flink.streaming.api.operators.StreamingRuntimeContext; @@ -35,9 +38,11 @@ public class FlinkSinkWriterContext implements SinkWriter.Context { private static final Logger LOGGER = LoggerFactory.getLogger(FlinkMetricContext.class); private final InitContext writerContext; + private final EventListener eventListener; public FlinkSinkWriterContext(InitContext writerContext) { this.writerContext = writerContext; + this.eventListener = new DefaultEventProcessor(getJobIdForV14(writerContext)); } @Override @@ -47,13 +52,9 @@ public int getIndexOfSubtask() { @Override public MetricsContext getMetricsContext() { - // In flink 1.14, it has contained runtimeContext in InitContext, so first step to detect if - // it is existed try { - Field field = writerContext.getClass().getDeclaredField("runtimeContext"); - field.setAccessible(true); StreamingRuntimeContext runtimeContext = - (StreamingRuntimeContext) field.get(writerContext); + getStreamingRuntimeContextForV14(writerContext); return new FlinkMetricContext(runtimeContext); } catch (Exception e) { LOGGER.info( @@ -71,4 +72,30 @@ public MetricsContext getMetricsContext() { throw new IllegalStateException("Initial sink metrics failed", e); } } + + @Override + public EventListener getEventListener() { + return eventListener; + } + + private static StreamingRuntimeContext getStreamingRuntimeContextForV14( + Sink.InitContext writerContext) throws NoSuchFieldException, IllegalAccessException { + // In flink 1.14, it has contained runtimeContext in InitContext, so first step to + // detect if + // it is existed + Field field = writerContext.getClass().getDeclaredField("runtimeContext"); + field.setAccessible(true); + return (StreamingRuntimeContext) field.get(writerContext); + } + + private static String getJobIdForV14(Sink.InitContext writerContext) { + try { + StreamingRuntimeContext runtimeContext = + getStreamingRuntimeContextForV14(writerContext); + return runtimeContext != null ? runtimeContext.getJobId().toString() : null; + } catch (Exception e) { + LOGGER.info("Flink version is not 1.14.x, will not initial job id"); + return null; + } + } } diff --git a/seatunnel-translation/seatunnel-translation-flink/seatunnel-translation-flink-common/src/main/java/org/apache/seatunnel/translation/flink/sink/FlinkSinkWriterContext.java b/seatunnel-translation/seatunnel-translation-flink/seatunnel-translation-flink-common/src/main/java/org/apache/seatunnel/translation/flink/sink/FlinkSinkWriterContext.java index 5f5a699a5ce..7969f445097 100644 --- a/seatunnel-translation/seatunnel-translation-flink/seatunnel-translation-flink-common/src/main/java/org/apache/seatunnel/translation/flink/sink/FlinkSinkWriterContext.java +++ b/seatunnel-translation/seatunnel-translation-flink/seatunnel-translation-flink-common/src/main/java/org/apache/seatunnel/translation/flink/sink/FlinkSinkWriterContext.java @@ -18,6 +18,8 @@ package org.apache.seatunnel.translation.flink.sink; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.DefaultEventProcessor; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.sink.SinkWriter; import org.apache.seatunnel.translation.flink.metric.FlinkMetricContext; @@ -25,14 +27,19 @@ import org.apache.flink.api.connector.sink.Sink.InitContext; import org.apache.flink.streaming.api.operators.StreamingRuntimeContext; +import lombok.extern.slf4j.Slf4j; + import java.lang.reflect.Field; +@Slf4j public class FlinkSinkWriterContext implements SinkWriter.Context { private final Sink.InitContext writerContext; + private final EventListener eventListener; public FlinkSinkWriterContext(InitContext writerContext) { this.writerContext = writerContext; + this.eventListener = new DefaultEventProcessor(getFlinkJobId(writerContext)); } @Override @@ -42,17 +49,35 @@ public int getIndexOfSubtask() { @Override public MetricsContext getMetricsContext() { + return new FlinkMetricContext(getStreamingRuntimeContextForV15(writerContext)); + } + + @Override + public EventListener getEventListener() { + return eventListener; + } + + private static String getFlinkJobId(Sink.InitContext writerContext) { + try { + return getStreamingRuntimeContextForV15(writerContext).getJobId().toString(); + } catch (Exception e) { + // ignore + log.warn("Get flink job id failed", e); + return null; + } + } + + private static StreamingRuntimeContext getStreamingRuntimeContextForV15( + Sink.InitContext writerContext) { try { Field contextImplField = writerContext.getClass().getDeclaredField("context"); contextImplField.setAccessible(true); Object contextImpl = contextImplField.get(writerContext); Field runtimeContextField = contextImpl.getClass().getDeclaredField("runtimeContext"); runtimeContextField.setAccessible(true); - StreamingRuntimeContext runtimeContext = - (StreamingRuntimeContext) runtimeContextField.get(contextImpl); - return new FlinkMetricContext(runtimeContext); + return (StreamingRuntimeContext) runtimeContextField.get(contextImpl); } catch (Exception e) { - throw new IllegalStateException("Initialize sink metrics failed", e); + throw new IllegalStateException("Initialize flink context failed", e); } } } diff --git a/seatunnel-translation/seatunnel-translation-flink/seatunnel-translation-flink-common/src/main/java/org/apache/seatunnel/translation/flink/source/FlinkSourceReaderContext.java b/seatunnel-translation/seatunnel-translation-flink/seatunnel-translation-flink-common/src/main/java/org/apache/seatunnel/translation/flink/source/FlinkSourceReaderContext.java index 576a35e0445..2b20e0d4047 100644 --- a/seatunnel-translation/seatunnel-translation-flink/seatunnel-translation-flink-common/src/main/java/org/apache/seatunnel/translation/flink/source/FlinkSourceReaderContext.java +++ b/seatunnel-translation/seatunnel-translation-flink/seatunnel-translation-flink-common/src/main/java/org/apache/seatunnel/translation/flink/source/FlinkSourceReaderContext.java @@ -18,6 +18,8 @@ package org.apache.seatunnel.translation.flink.source; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.DefaultEventProcessor; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.source.SeaTunnelSource; import org.apache.seatunnel.api.source.SourceEvent; import org.apache.seatunnel.api.source.SourceReader; @@ -30,6 +32,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import lombok.extern.slf4j.Slf4j; + import java.lang.reflect.Field; import java.util.concurrent.atomic.AtomicBoolean; @@ -37,6 +41,7 @@ * The implementation of {@link org.apache.seatunnel.api.source.SourceReader.Context} for flink * engine. */ +@Slf4j public class FlinkSourceReaderContext implements SourceReader.Context { private static final Logger LOGGER = LoggerFactory.getLogger(FlinkSourceReaderContext.class); @@ -46,10 +51,12 @@ public class FlinkSourceReaderContext implements SourceReader.Context { private final SourceReaderContext readerContext; private final SeaTunnelSource source; + protected final EventListener eventListener; public FlinkSourceReaderContext(SourceReaderContext readerContext, SeaTunnelSource source) { this.readerContext = readerContext; this.source = source; + this.eventListener = new DefaultEventProcessor(getFlinkJobId(readerContext)); } @Override @@ -87,19 +94,38 @@ public void sendSourceEventToEnumerator(SourceEvent sourceEvent) { @Override public MetricsContext getMetricsContext() { + return new FlinkMetricContext(getStreamingRuntimeContext(readerContext)); + } + + public boolean isSendNoMoreElementEvent() { + return isSendNoMoreElementEvent.get(); + } + + @Override + public EventListener getEventListener() { + return eventListener; + } + + private static String getFlinkJobId(SourceReaderContext readerContext) { + try { + return getStreamingRuntimeContext(readerContext).getJobId().toString(); + } catch (Exception e) { + // ignore + log.warn("Get flink job id failed", e); + return null; + } + } + + private static StreamingRuntimeContext getStreamingRuntimeContext( + SourceReaderContext readerContext) { try { Field field = readerContext.getClass().getDeclaredField("this$0"); field.setAccessible(true); AbstractStreamOperator operator = (AbstractStreamOperator) field.get(readerContext); - StreamingRuntimeContext runtimeContext = operator.getRuntimeContext(); - return new FlinkMetricContext(runtimeContext); + return operator.getRuntimeContext(); } catch (Exception e) { - throw new IllegalStateException("Initialize source metrics failed", e); + throw new IllegalStateException("Initialize flink context failed", e); } } - - public boolean isSendNoMoreElementEvent() { - return isSendNoMoreElementEvent.get(); - } } diff --git a/seatunnel-translation/seatunnel-translation-flink/seatunnel-translation-flink-common/src/main/java/org/apache/seatunnel/translation/flink/source/FlinkSourceSplitEnumeratorContext.java b/seatunnel-translation/seatunnel-translation-flink/seatunnel-translation-flink-common/src/main/java/org/apache/seatunnel/translation/flink/source/FlinkSourceSplitEnumeratorContext.java index e0d5ceb0156..f03d1e7e699 100644 --- a/seatunnel-translation/seatunnel-translation-flink/seatunnel-translation-flink-common/src/main/java/org/apache/seatunnel/translation/flink/source/FlinkSourceSplitEnumeratorContext.java +++ b/seatunnel-translation/seatunnel-translation-flink/seatunnel-translation-flink-common/src/main/java/org/apache/seatunnel/translation/flink/source/FlinkSourceSplitEnumeratorContext.java @@ -19,13 +19,23 @@ import org.apache.seatunnel.api.common.metrics.AbstractMetricsContext; import org.apache.seatunnel.api.common.metrics.MetricsContext; +import org.apache.seatunnel.api.event.DefaultEventProcessor; +import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.source.SourceEvent; import org.apache.seatunnel.api.source.SourceSplit; import org.apache.seatunnel.api.source.SourceSplitEnumerator; import org.apache.flink.api.connector.source.SplitEnumeratorContext; +import org.apache.flink.runtime.operators.coordination.OperatorCoordinator; +import org.apache.flink.runtime.scheduler.SchedulerBase; +import org.apache.flink.runtime.source.coordinator.SourceCoordinatorContext; +import lombok.extern.slf4j.Slf4j; + +import java.lang.reflect.Field; +import java.util.Arrays; import java.util.List; +import java.util.Optional; import java.util.Set; /** @@ -34,14 +44,17 @@ * * @param */ +@Slf4j public class FlinkSourceSplitEnumeratorContext implements SourceSplitEnumerator.Context { private final SplitEnumeratorContext> enumContext; + protected final EventListener eventListener; public FlinkSourceSplitEnumeratorContext( SplitEnumeratorContext> enumContext) { this.enumContext = enumContext; + this.eventListener = new DefaultEventProcessor(getFlinkJobId(enumContext)); } @Override @@ -76,4 +89,67 @@ public void sendEventToSourceReader(int subtaskId, SourceEvent event) { public MetricsContext getMetricsContext() { return new AbstractMetricsContext() {}; } + + @Override + public EventListener getEventListener() { + return eventListener; + } + + private static String getFlinkJobId(SplitEnumeratorContext enumContext) { + try { + return getJobIdForV15(enumContext); + } catch (Exception e) { + log.warn("Get flink job id failed", e); + return null; + } + } + + private static String getJobIdForV15(SplitEnumeratorContext enumContext) { + try { + SourceCoordinatorContext coordinatorContext = (SourceCoordinatorContext) enumContext; + Field field = + coordinatorContext.getClass().getDeclaredField("operatorCoordinatorContext"); + field.setAccessible(true); + OperatorCoordinator.Context operatorCoordinatorContext = + (OperatorCoordinator.Context) field.get(coordinatorContext); + Field[] fields = operatorCoordinatorContext.getClass().getDeclaredFields(); + Optional fieldOptional = + Arrays.stream(fields) + .filter(f -> f.getName().equals("globalFailureHandler")) + .findFirst(); + if (!fieldOptional.isPresent()) { + // RecreateOnResetOperatorCoordinator.QuiesceableContext + fieldOptional = + Arrays.stream(fields) + .filter(f -> f.getName().equals("context")) + .findFirst(); + field = fieldOptional.get(); + field.setAccessible(true); + operatorCoordinatorContext = + (OperatorCoordinator.Context) field.get(operatorCoordinatorContext); + } + + // OperatorCoordinatorHolder.LazyInitializedCoordinatorContext + field = + Arrays.stream(operatorCoordinatorContext.getClass().getDeclaredFields()) + .filter(f -> f.getName().equals("globalFailureHandler")) + .findFirst() + .get(); + field.setAccessible(true); + + // SchedulerBase$xxx + Object obj = field.get(operatorCoordinatorContext); + fields = obj.getClass().getDeclaredFields(); + field = + Arrays.stream(fields) + .filter(f -> f.getName().equals("arg$1")) + .findFirst() + .get(); + field.setAccessible(true); + SchedulerBase schedulerBase = (SchedulerBase) field.get(obj); + return schedulerBase.getExecutionGraph().getJobID().toString(); + } catch (Exception e) { + throw new IllegalStateException("Initialize flink job-id failed", e); + } + } } diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/SparkSink.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/SparkSink.java index 59f17cd7a58..96438d3ac73 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/SparkSink.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/SparkSink.java @@ -45,6 +45,8 @@ public class SparkSink private volatile CatalogTable catalogTable; + private volatile String jobId; + private void init(DataSourceOptions options) { if (sink == null) { this.sink = @@ -66,6 +68,9 @@ private void init(DataSourceOptions options) { "can not find sink " + "catalog table string in DataSourceOptions"))); } + if (jobId == null) { + this.jobId = options.get(SparkSinkInjector.JOB_ID).orElse(null); + } } @Override @@ -74,7 +79,7 @@ public StreamWriter createStreamWriter( init(options); try { - return new SparkStreamWriter<>(sink, catalogTable); + return new SparkStreamWriter<>(sink, catalogTable, jobId); } catch (IOException e) { throw new RuntimeException("find error when createStreamWriter", e); } @@ -86,7 +91,7 @@ public Optional createWriter( init(options); try { - return Optional.of(new SparkDataSourceWriter<>(sink, catalogTable)); + return Optional.of(new SparkDataSourceWriter<>(sink, catalogTable, jobId)); } catch (IOException e) { throw new RuntimeException("find error when createStreamWriter", e); } diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/SparkSinkInjector.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/SparkSinkInjector.java index e19957625c2..44aec8f70a2 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/SparkSinkInjector.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/SparkSinkInjector.java @@ -33,23 +33,28 @@ public class SparkSinkInjector { "org.apache.seatunnel.translation.spark.sink.SparkSink"; public static final String SINK_CATALOG_TABLE = "sink.catalog.table"; + public static final String JOB_ID = "jobId"; public static DataStreamWriter inject( DataStreamWriter dataset, SeaTunnelSink sink, - CatalogTable catalogTable) { + CatalogTable catalogTable, + String applicationId) { return dataset.format(SPARK_SINK_CLASS_NAME) .outputMode(OutputMode.Append()) .option(Constants.SINK_SERIALIZATION, SerializationUtils.objectToString(sink)) - .option(SINK_CATALOG_TABLE, SerializationUtils.objectToString(catalogTable)); + .option(SINK_CATALOG_TABLE, SerializationUtils.objectToString(catalogTable)) + .option(JOB_ID, applicationId); } public static DataFrameWriter inject( DataFrameWriter dataset, SeaTunnelSink sink, - CatalogTable catalogTable) { + CatalogTable catalogTable, + String applicationId) { return dataset.format(SPARK_SINK_CLASS_NAME) .option(Constants.SINK_SERIALIZATION, SerializationUtils.objectToString(sink)) - .option(SINK_CATALOG_TABLE, SerializationUtils.objectToString(catalogTable)); + .option(SINK_CATALOG_TABLE, SerializationUtils.objectToString(catalogTable)) + .option(JOB_ID, applicationId); } } diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/writer/SparkDataSourceWriter.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/writer/SparkDataSourceWriter.java index 5a5bb39f9a3..b02ae32fbc0 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/writer/SparkDataSourceWriter.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/writer/SparkDataSourceWriter.java @@ -48,15 +48,18 @@ public class SparkDataSourceWriter sinkAggregatedCommitter; protected final CatalogTable catalogTable; + protected final String jobId; private MultiTableResourceManager resourceManager; public SparkDataSourceWriter( SeaTunnelSink sink, - CatalogTable catalogTable) + CatalogTable catalogTable, + String jobId) throws IOException { this.sink = sink; this.catalogTable = catalogTable; + this.jobId = jobId; this.sinkAggregatedCommitter = sink.createAggregatedCommitter().orElse(null); if (sinkAggregatedCommitter != null) { // TODO close it @@ -75,7 +78,7 @@ public SparkDataSourceWriter( @Override public DataWriterFactory createWriterFactory() { - return new SparkDataWriterFactory<>(sink, catalogTable); + return new SparkDataWriterFactory<>(sink, catalogTable, jobId); } @Override diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/writer/SparkDataWriterFactory.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/writer/SparkDataWriterFactory.java index dc629c55126..a9c6cd445a1 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/writer/SparkDataWriterFactory.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/writer/SparkDataWriterFactory.java @@ -34,17 +34,21 @@ public class SparkDataWriterFactory implements DataWriterFa private final SeaTunnelSink sink; private final CatalogTable catalogTable; + private final String jobId; SparkDataWriterFactory( - SeaTunnelSink sink, CatalogTable catalogTable) { + SeaTunnelSink sink, + CatalogTable catalogTable, + String jobId) { this.sink = sink; this.catalogTable = catalogTable; + this.jobId = jobId; } @Override public DataWriter createDataWriter(int partitionId, long taskId, long epochId) { org.apache.seatunnel.api.sink.SinkWriter.Context context = - new DefaultSinkWriterContext((int) taskId); + new DefaultSinkWriterContext(jobId, (int) taskId); SinkWriter writer; SinkCommitter committer; try { diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/writer/SparkStreamWriter.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/writer/SparkStreamWriter.java index bf58f1160de..4d297454f8b 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/writer/SparkStreamWriter.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/sink/writer/SparkStreamWriter.java @@ -34,9 +34,10 @@ public class SparkStreamWriter public SparkStreamWriter( SeaTunnelSink sink, - CatalogTable catalogTable) + CatalogTable catalogTable, + String jobId) throws IOException { - super(sink, catalogTable); + super(sink, catalogTable, jobId); } @Override diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/SeaTunnelSourceSupport.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/SeaTunnelSourceSupport.java index 2716024bb85..be97f39752a 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/SeaTunnelSourceSupport.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/SeaTunnelSourceSupport.java @@ -66,7 +66,8 @@ public DataSourceReader createReader(DataSourceOptions options) { SeaTunnelSource seaTunnelSource = getSeaTunnelSource(options); int parallelism = options.getInt(CommonOptions.PARALLELISM.key(), 1); Map envOptions = options.asMap(); - return new BatchSourceReader(seaTunnelSource, parallelism, envOptions); + String applicationId = SparkSession.getActiveSession().get().sparkContext().applicationId(); + return new BatchSourceReader(seaTunnelSource, applicationId, parallelism, envOptions); } @Override @@ -76,6 +77,7 @@ public MicroBatchReader createMicroBatchReader( DataSourceOptions options) { SeaTunnelSource seaTunnelSource = getSeaTunnelSource(options); Integer parallelism = options.getInt(CommonOptions.PARALLELISM.key(), 1); + String applicationId = SparkSession.getActiveSession().get().sparkContext().applicationId(); Integer checkpointInterval = options.getInt( EnvCommonOptions.CHECKPOINT_INTERVAL.key(), CHECKPOINT_INTERVAL_DEFAULT); @@ -92,6 +94,7 @@ public MicroBatchReader createMicroBatchReader( return new MicroBatchSourceReader( seaTunnelSource, parallelism, + applicationId, checkpointId, checkpointInterval, checkpointPath, diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/BatchPartition.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/BatchPartition.java index 650f7b442c2..965706024c2 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/BatchPartition.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/BatchPartition.java @@ -33,16 +33,19 @@ public class BatchPartition implements InputPartition { protected final SeaTunnelSource source; protected final Integer parallelism; + protected final String jobId; protected final Integer subtaskId; private Map envOptions; public BatchPartition( SeaTunnelSource source, Integer parallelism, + String jobId, Integer subtaskId, Map envOptions) { this.source = source; this.parallelism = parallelism; + this.jobId = jobId; this.subtaskId = subtaskId; this.envOptions = envOptions; } @@ -52,10 +55,12 @@ public InputPartitionReader createPartitionReader() { ParallelBatchPartitionReader partitionReader; if (source instanceof SupportCoordinate) { partitionReader = - new CoordinatedBatchPartitionReader(source, parallelism, subtaskId, envOptions); + new CoordinatedBatchPartitionReader( + source, parallelism, jobId, subtaskId, envOptions); } else { partitionReader = - new ParallelBatchPartitionReader(source, parallelism, subtaskId, envOptions); + new ParallelBatchPartitionReader( + source, parallelism, jobId, subtaskId, envOptions); } return new SeaTunnelInputPartitionReader(partitionReader); } diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/MicroBatchPartition.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/MicroBatchPartition.java index 139c8050157..8f281603575 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/MicroBatchPartition.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/MicroBatchPartition.java @@ -34,6 +34,7 @@ public class MicroBatchPartition implements InputPartition { protected final SeaTunnelSource source; protected final Integer parallelism; + protected final String jobId; protected final Integer subtaskId; protected final Integer checkpointId; protected final Integer checkpointInterval; @@ -45,6 +46,7 @@ public class MicroBatchPartition implements InputPartition { public MicroBatchPartition( SeaTunnelSource source, Integer parallelism, + String jobId, Integer subtaskId, Integer checkpointId, Integer checkpointInterval, @@ -54,6 +56,7 @@ public MicroBatchPartition( Map envOptions) { this.source = source; this.parallelism = parallelism; + this.jobId = jobId; this.subtaskId = subtaskId; this.checkpointId = checkpointId; this.checkpointInterval = checkpointInterval; @@ -71,6 +74,7 @@ public InputPartitionReader createPartitionReader() { new CoordinatedMicroBatchPartitionReader( source, parallelism, + jobId, subtaskId, checkpointId, checkpointInterval, @@ -83,6 +87,7 @@ public InputPartitionReader createPartitionReader() { new ParallelMicroBatchPartitionReader( source, parallelism, + jobId, subtaskId, checkpointId, checkpointInterval, diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/batch/BatchSourceReader.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/batch/BatchSourceReader.java index 4836b2a22c1..4f5528622d4 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/batch/BatchSourceReader.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/batch/BatchSourceReader.java @@ -35,14 +35,17 @@ public class BatchSourceReader implements DataSourceReader { protected final SeaTunnelSource source; + protected final String jobId; protected final Integer parallelism; private Map envOptions; public BatchSourceReader( SeaTunnelSource source, + String jobId, Integer parallelism, Map envOptions) { this.source = source; + this.jobId = jobId; this.parallelism = parallelism; this.envOptions = envOptions; } @@ -57,12 +60,12 @@ public List> planInputPartitions() { List> virtualPartitions; if (source instanceof SupportCoordinate) { virtualPartitions = new ArrayList<>(1); - virtualPartitions.add(new BatchPartition(source, parallelism, 0, envOptions)); + virtualPartitions.add(new BatchPartition(source, parallelism, jobId, 0, envOptions)); } else { virtualPartitions = new ArrayList<>(parallelism); for (int subtaskId = 0; subtaskId < parallelism; subtaskId++) { virtualPartitions.add( - new BatchPartition(source, parallelism, subtaskId, envOptions)); + new BatchPartition(source, parallelism, jobId, subtaskId, envOptions)); } } return virtualPartitions; diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/batch/CoordinatedBatchPartitionReader.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/batch/CoordinatedBatchPartitionReader.java index 436e371acc2..bf7dd221425 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/batch/CoordinatedBatchPartitionReader.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/batch/CoordinatedBatchPartitionReader.java @@ -39,9 +39,10 @@ public class CoordinatedBatchPartitionReader extends ParallelBatchPartitionReade public CoordinatedBatchPartitionReader( SeaTunnelSource source, Integer parallelism, + String jobId, Integer subtaskId, Map envOptions) { - super(source, parallelism, subtaskId, envOptions); + super(source, parallelism, jobId, subtaskId, envOptions); this.collectorMap = new HashMap<>(parallelism); for (int i = 0; i < parallelism; i++) { collectorMap.put( @@ -58,7 +59,7 @@ protected String getEnumeratorThreadName() { @Override protected BaseSourceFunction createInternalSource() { - return new InternalCoordinatedSource<>(source, null, parallelism); + return new InternalCoordinatedSource<>(source, null, parallelism, jobId); } public class InternalCoordinatedSource @@ -67,8 +68,9 @@ public class InternalCoordinatedSource source, Map> restoredState, - int parallelism) { - super(source, restoredState, parallelism); + int parallelism, + String jobId) { + super(source, restoredState, parallelism, jobId); } @Override diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/batch/ParallelBatchPartitionReader.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/batch/ParallelBatchPartitionReader.java index 056af8e748e..71ed2f7c224 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/batch/ParallelBatchPartitionReader.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/batch/ParallelBatchPartitionReader.java @@ -43,6 +43,7 @@ public class ParallelBatchPartitionReader { protected final SeaTunnelSource source; protected final Integer parallelism; + protected final String jobId; protected final Integer subtaskId; protected final ExecutorService executorService; @@ -60,10 +61,12 @@ public class ParallelBatchPartitionReader { public ParallelBatchPartitionReader( SeaTunnelSource source, Integer parallelism, + String jobId, Integer subtaskId, Map envOptions) { this.source = source; this.parallelism = parallelism; + this.jobId = jobId; this.subtaskId = subtaskId; this.executorService = ThreadPoolExecutorFactory.createScheduledThreadPoolExecutor( @@ -118,7 +121,7 @@ protected void prepare() { } protected BaseSourceFunction createInternalSource() { - return new InternalParallelSource<>(source, null, parallelism, subtaskId); + return new InternalParallelSource<>(source, null, parallelism, jobId, subtaskId); } public InternalRow get() { @@ -148,8 +151,9 @@ public InternalParallelSource( SeaTunnelSource source, Map> restoredState, int parallelism, + String jobId, int subtaskId) { - super(source, restoredState, parallelism, subtaskId); + super(source, restoredState, parallelism, jobId, subtaskId); } @Override diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/micro/CoordinatedMicroBatchPartitionReader.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/micro/CoordinatedMicroBatchPartitionReader.java index 0cec65e6137..11e57cc4374 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/micro/CoordinatedMicroBatchPartitionReader.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/micro/CoordinatedMicroBatchPartitionReader.java @@ -40,6 +40,7 @@ public class CoordinatedMicroBatchPartitionReader extends ParallelMicroBatchPart public CoordinatedMicroBatchPartitionReader( SeaTunnelSource source, Integer parallelism, + String jobId, Integer subtaskId, Integer checkpointId, Integer checkpointInterval, @@ -50,6 +51,7 @@ public CoordinatedMicroBatchPartitionReader( super( source, parallelism, + jobId, subtaskId, checkpointId, checkpointInterval, @@ -126,7 +128,7 @@ protected String getEnumeratorThreadName() { @Override protected BaseSourceFunction createInternalSource() { - return new InternalCoordinatedSource<>(source, null, parallelism); + return new InternalCoordinatedSource<>(source, null, parallelism, jobId); } public class InternalCoordinatedSource @@ -135,8 +137,9 @@ public class InternalCoordinatedSource source, Map> restoredState, - int parallelism) { - super(source, restoredState, parallelism); + int parallelism, + String jobId) { + super(source, restoredState, parallelism, jobId); } @Override diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/micro/MicroBatchSourceReader.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/micro/MicroBatchSourceReader.java index 767a5ed3f13..d49e5bd6a62 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/micro/MicroBatchSourceReader.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/micro/MicroBatchSourceReader.java @@ -40,6 +40,7 @@ public class MicroBatchSourceReader implements MicroBatchReader { protected final SeaTunnelSource source; protected final Integer parallelism; + protected final String jobId; protected final Integer checkpointInterval; protected final String checkpointPath; @@ -53,6 +54,7 @@ public class MicroBatchSourceReader implements MicroBatchReader { public MicroBatchSourceReader( SeaTunnelSource source, Integer parallelism, + String jobId, Integer checkpointId, Integer checkpointInterval, String checkpointPath, @@ -61,6 +63,7 @@ public MicroBatchSourceReader( Map envOptions) { this.source = source; this.parallelism = parallelism; + this.jobId = jobId; this.checkpointId = checkpointId; this.checkpointInterval = checkpointInterval; this.checkpointPath = checkpointPath; @@ -117,6 +120,7 @@ public List> planInputPartitions() { new MicroBatchPartition( source, parallelism, + jobId, 0, checkpointId, checkpointInterval, @@ -131,6 +135,7 @@ public List> planInputPartitions() { new MicroBatchPartition( source, parallelism, + jobId, subtaskId, checkpointId, checkpointInterval, diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/micro/ParallelMicroBatchPartitionReader.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/micro/ParallelMicroBatchPartitionReader.java index 7b8c662b604..d3457246f7c 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/micro/ParallelMicroBatchPartitionReader.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-2.4/src/main/java/org/apache/seatunnel/translation/spark/source/reader/micro/ParallelMicroBatchPartitionReader.java @@ -58,6 +58,7 @@ public class ParallelMicroBatchPartitionReader extends ParallelBatchPartitionRea public ParallelMicroBatchPartitionReader( SeaTunnelSource source, Integer parallelism, + String jobId, Integer subtaskId, Integer checkpointId, Integer checkpointInterval, @@ -65,7 +66,7 @@ public ParallelMicroBatchPartitionReader( String hdfsRoot, String hdfsUser, Map envOptions) { - super(source, parallelism, subtaskId, envOptions); + super(source, parallelism, jobId, subtaskId, envOptions); this.checkpointId = checkpointId; this.checkpointInterval = checkpointInterval; this.checkpointPath = checkpointPath; @@ -75,7 +76,7 @@ public ParallelMicroBatchPartitionReader( @Override protected BaseSourceFunction createInternalSource() { - return new InternalParallelSource<>(source, restoredState, parallelism, subtaskId); + return new InternalParallelSource<>(source, restoredState, parallelism, jobId, subtaskId); } @Override diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/SeaTunnelBatchWrite.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/SeaTunnelBatchWrite.java index 9dd3194979e..a650854e2ac 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/SeaTunnelBatchWrite.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/SeaTunnelBatchWrite.java @@ -51,12 +51,16 @@ public class SeaTunnelBatchWrite private final CatalogTable catalogTable; + private final String jobId; + public SeaTunnelBatchWrite( SeaTunnelSink sink, - CatalogTable catalogTable) + CatalogTable catalogTable, + String jobId) throws IOException { this.sink = sink; this.catalogTable = catalogTable; + this.jobId = jobId; this.aggregatedCommitter = sink.createAggregatedCommitter().orElse(null); if (aggregatedCommitter != null) { if (this.aggregatedCommitter instanceof SupportResourceShare) { @@ -74,7 +78,7 @@ public SeaTunnelBatchWrite( @Override public DataWriterFactory createBatchWriterFactory(PhysicalWriteInfo info) { - return new SeaTunnelSparkDataWriterFactory<>(sink, catalogTable); + return new SeaTunnelSparkDataWriterFactory<>(sink, catalogTable, jobId); } @Override diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/SeaTunnelSinkTable.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/SeaTunnelSinkTable.java index be20846fffe..1fb8109eb5d 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/SeaTunnelSinkTable.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/SeaTunnelSinkTable.java @@ -47,6 +47,7 @@ public class SeaTunnelSinkTable implements Table, SupportsWrite { private final SeaTunnelSink sink; private final CatalogTable catalogTable; + private final String jobId; public SeaTunnelSinkTable(Map properties) { this.properties = properties; @@ -62,11 +63,12 @@ public SeaTunnelSinkTable(Map properties) { SparkSinkInjector.SINK_CATALOG_TABLE + " must be specified"); } this.catalogTable = SerializationUtils.stringToObject(sinkCatalogTableSerialization); + this.jobId = properties.getOrDefault(SparkSinkInjector.JOB_ID, null); } @Override public WriteBuilder newWriteBuilder(LogicalWriteInfo info) { - return new SeaTunnelWriteBuilder<>(sink, catalogTable); + return new SeaTunnelWriteBuilder<>(sink, catalogTable, jobId); } @Override diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/SparkSinkInjector.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/SparkSinkInjector.java index 41b0d7153bc..d6d148c3fed 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/SparkSinkInjector.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/SparkSinkInjector.java @@ -33,22 +33,28 @@ public class SparkSinkInjector { public static final String SINK_CATALOG_TABLE = "sink.catalog.table"; + public static final String JOB_ID = "jobId"; + public static DataStreamWriter inject( DataStreamWriter dataset, SeaTunnelSink sink, - CatalogTable catalogTable) { + CatalogTable catalogTable, + String applicationId) { return dataset.format(SINK_NAME) .outputMode(OutputMode.Append()) .option(Constants.SINK_SERIALIZATION, SerializationUtils.objectToString(sink)) - .option(SINK_CATALOG_TABLE, SerializationUtils.objectToString(catalogTable)); + .option(SINK_CATALOG_TABLE, SerializationUtils.objectToString(catalogTable)) + .option(JOB_ID, applicationId); } public static DataFrameWriter inject( DataFrameWriter dataset, SeaTunnelSink sink, - CatalogTable catalogTable) { + CatalogTable catalogTable, + String applicationId) { return dataset.format(SINK_NAME) .option(Constants.SINK_SERIALIZATION, SerializationUtils.objectToString(sink)) - .option(SINK_CATALOG_TABLE, SerializationUtils.objectToString(catalogTable)); + .option(SINK_CATALOG_TABLE, SerializationUtils.objectToString(catalogTable)) + .option(JOB_ID, applicationId); } } diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/write/SeaTunnelSparkDataWriterFactory.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/write/SeaTunnelSparkDataWriterFactory.java index 3b06468244f..5046c2deccc 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/write/SeaTunnelSparkDataWriterFactory.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/write/SeaTunnelSparkDataWriterFactory.java @@ -36,16 +36,20 @@ public class SeaTunnelSparkDataWriterFactory private final SeaTunnelSink sink; private final CatalogTable catalogTable; + private final String jobId; public SeaTunnelSparkDataWriterFactory( - SeaTunnelSink sink, CatalogTable catalogTable) { + SeaTunnelSink sink, + CatalogTable catalogTable, + String jobId) { this.sink = sink; this.catalogTable = catalogTable; + this.jobId = jobId; } @Override public DataWriter createWriter(int partitionId, long taskId) { - SinkWriter.Context context = new DefaultSinkWriterContext((int) taskId); + SinkWriter.Context context = new DefaultSinkWriterContext(jobId, (int) taskId); SinkWriter writer; SinkCommitter committer; try { diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/write/SeaTunnelWrite.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/write/SeaTunnelWrite.java index 9df74612e50..21ae52f1281 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/write/SeaTunnelWrite.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/write/SeaTunnelWrite.java @@ -32,18 +32,21 @@ public class SeaTunnelWrite implemen private final SeaTunnelSink sink; private final CatalogTable catalogTable; + private final String jobId; public SeaTunnelWrite( SeaTunnelSink sink, - CatalogTable catalogTable) { + CatalogTable catalogTable, + String jobId) { this.sink = sink; this.catalogTable = catalogTable; + this.jobId = jobId; } @Override public BatchWrite toBatch() { try { - return new SeaTunnelBatchWrite<>(sink, catalogTable); + return new SeaTunnelBatchWrite<>(sink, catalogTable, jobId); } catch (IOException e) { throw new RuntimeException("SeaTunnel Spark sink create batch failed", e); } @@ -52,7 +55,7 @@ public BatchWrite toBatch() { @Override public StreamingWrite toStreaming() { try { - return new SeaTunnelBatchWrite<>(sink, catalogTable); + return new SeaTunnelBatchWrite<>(sink, catalogTable, jobId); } catch (IOException e) { throw new RuntimeException("SeaTunnel Spark sink create batch failed", e); } diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/write/SeaTunnelWriteBuilder.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/write/SeaTunnelWriteBuilder.java index 6a8f0e12eed..d1ff3cfca1a 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/write/SeaTunnelWriteBuilder.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/sink/write/SeaTunnelWriteBuilder.java @@ -29,16 +29,19 @@ public class SeaTunnelWriteBuilder private final SeaTunnelSink sink; private final CatalogTable catalogTable; + private final String jobId; public SeaTunnelWriteBuilder( SeaTunnelSink sink, - CatalogTable catalogTable) { + CatalogTable catalogTable, + String jobId) { this.sink = sink; this.catalogTable = catalogTable; + this.jobId = jobId; } @Override public Write build() { - return new SeaTunnelWrite<>(sink, catalogTable); + return new SeaTunnelWrite<>(sink, catalogTable, jobId); } } diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/SeaTunnelSourceTable.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/SeaTunnelSourceTable.java index 038f947b085..fb5eb6d6a9e 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/SeaTunnelSourceTable.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/SeaTunnelSourceTable.java @@ -26,6 +26,7 @@ import org.apache.seatunnel.translation.spark.utils.TypeConverterUtils; import org.apache.commons.lang3.StringUtils; +import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.connector.catalog.SupportsRead; import org.apache.spark.sql.connector.catalog.Table; import org.apache.spark.sql.connector.catalog.TableCapability; @@ -66,7 +67,9 @@ public SeaTunnelSourceTable(Map properties) { public ScanBuilder newScanBuilder(CaseInsensitiveStringMap caseInsensitiveStringMap) { int parallelism = Integer.parseInt(properties.getOrDefault(CommonOptions.PARALLELISM.key(), "1")); - return new SeaTunnelScanBuilder(source, parallelism, caseInsensitiveStringMap); + String applicationId = SparkSession.getActiveSession().get().sparkContext().applicationId(); + return new SeaTunnelScanBuilder( + source, parallelism, applicationId, caseInsensitiveStringMap); } /** A name to identify this table */ diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/CoordinatedBatchPartitionReader.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/CoordinatedBatchPartitionReader.java index 62ff5030f3e..746cf348386 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/CoordinatedBatchPartitionReader.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/CoordinatedBatchPartitionReader.java @@ -39,9 +39,10 @@ public class CoordinatedBatchPartitionReader extends ParallelBatchPartitionReade public CoordinatedBatchPartitionReader( SeaTunnelSource source, Integer parallelism, + String jobId, Integer subtaskId, Map envOptions) { - super(source, parallelism, subtaskId, envOptions); + super(source, parallelism, jobId, subtaskId, envOptions); this.collectorMap = new HashMap<>(parallelism); for (int i = 0; i < parallelism; i++) { collectorMap.put( @@ -58,7 +59,7 @@ protected String getEnumeratorThreadName() { @Override protected BaseSourceFunction createInternalSource() { - return new InternalCoordinatedSource<>(source, null, parallelism); + return new InternalCoordinatedSource<>(source, null, parallelism, jobId); } public class InternalCoordinatedSource @@ -67,8 +68,9 @@ public class InternalCoordinatedSource source, Map> restoredState, - int parallelism) { - super(source, restoredState, parallelism); + int parallelism, + String jobId) { + super(source, restoredState, parallelism, jobId); } @Override diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/ParallelBatchPartitionReader.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/ParallelBatchPartitionReader.java index 987663e2eed..8bbbcc71c17 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/ParallelBatchPartitionReader.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/ParallelBatchPartitionReader.java @@ -43,6 +43,7 @@ public class ParallelBatchPartitionReader { protected final SeaTunnelSource source; protected final Integer parallelism; + protected final String jobId; protected final Integer subtaskId; protected final ExecutorService executorService; @@ -60,10 +61,12 @@ public class ParallelBatchPartitionReader { public ParallelBatchPartitionReader( SeaTunnelSource source, Integer parallelism, + String jobId, Integer subtaskId, Map envOptions) { this.source = source; this.parallelism = parallelism; + this.jobId = jobId; this.subtaskId = subtaskId; this.executorService = ThreadPoolExecutorFactory.createScheduledThreadPoolExecutor( @@ -118,7 +121,7 @@ protected void prepare() { } protected BaseSourceFunction createInternalSource() { - return new InternalParallelSource<>(source, null, parallelism, subtaskId); + return new InternalParallelSource<>(source, null, parallelism, jobId, subtaskId); } public InternalRow get() { @@ -148,8 +151,9 @@ public InternalParallelSource( SeaTunnelSource source, Map> restoredState, int parallelism, + String jobId, int subtaskId) { - super(source, restoredState, parallelism, subtaskId); + super(source, restoredState, parallelism, jobId, subtaskId); } @Override diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/SeaTunnelBatch.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/SeaTunnelBatch.java index 284583af2fe..491555f32b2 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/SeaTunnelBatch.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/SeaTunnelBatch.java @@ -33,14 +33,17 @@ public class SeaTunnelBatch implements Batch { private final SeaTunnelSource source; private final int parallelism; + private final String jobId; private final Map envOptions; public SeaTunnelBatch( SeaTunnelSource source, int parallelism, + String jobId, Map envOptions) { this.source = source; this.parallelism = parallelism; + this.jobId = jobId; this.envOptions = envOptions; } @@ -61,6 +64,6 @@ public InputPartition[] planInputPartitions() { @Override public PartitionReaderFactory createReaderFactory() { - return new SeaTunnelBatchPartitionReaderFactory(source, parallelism, envOptions); + return new SeaTunnelBatchPartitionReaderFactory(source, parallelism, jobId, envOptions); } } diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/SeaTunnelBatchPartitionReaderFactory.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/SeaTunnelBatchPartitionReaderFactory.java index 92a92582b0d..f15ae051ec2 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/SeaTunnelBatchPartitionReaderFactory.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/batch/SeaTunnelBatchPartitionReaderFactory.java @@ -33,14 +33,17 @@ public class SeaTunnelBatchPartitionReaderFactory implements PartitionReaderFact private final SeaTunnelSource source; private final int parallelism; + private final String jobId; private final Map envOptions; public SeaTunnelBatchPartitionReaderFactory( SeaTunnelSource source, int parallelism, + String jobId, Map envOptions) { this.source = source; this.parallelism = parallelism; + this.jobId = jobId; this.envOptions = envOptions; } @@ -52,10 +55,11 @@ public PartitionReader createReader(InputPartition partition) { if (source instanceof SupportCoordinate) { partitionReader = new CoordinatedBatchPartitionReader( - source, parallelism, partitionId, envOptions); + source, parallelism, jobId, partitionId, envOptions); } else { partitionReader = - new ParallelBatchPartitionReader(source, parallelism, partitionId, envOptions); + new ParallelBatchPartitionReader( + source, parallelism, jobId, partitionId, envOptions); } return new SeaTunnelBatchPartitionReader(partitionReader); } diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/CoordinatedMicroBatchPartitionReader.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/CoordinatedMicroBatchPartitionReader.java index 77b427d6cf4..2df1b089264 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/CoordinatedMicroBatchPartitionReader.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/CoordinatedMicroBatchPartitionReader.java @@ -39,6 +39,7 @@ public class CoordinatedMicroBatchPartitionReader extends ParallelMicroBatchPart public CoordinatedMicroBatchPartitionReader( SeaTunnelSource source, Integer parallelism, + String jobId, Integer subtaskId, Integer checkpointId, Integer checkpointInterval, @@ -49,6 +50,7 @@ public CoordinatedMicroBatchPartitionReader( super( source, parallelism, + jobId, subtaskId, checkpointId, checkpointInterval, @@ -125,7 +127,7 @@ protected String getEnumeratorThreadName() { @Override protected BaseSourceFunction createInternalSource() { - return new InternalCoordinatedSource<>(source, null, parallelism); + return new InternalCoordinatedSource<>(source, null, parallelism, jobId); } public class InternalCoordinatedSource @@ -134,8 +136,9 @@ public class InternalCoordinatedSource source, Map> restoredState, - int parallelism) { - super(source, restoredState, parallelism); + int parallelism, + String jobId) { + super(source, restoredState, parallelism, jobId); } @Override diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/ParallelMicroBatchPartitionReader.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/ParallelMicroBatchPartitionReader.java index 60d9f9ae969..c62185d0995 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/ParallelMicroBatchPartitionReader.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/ParallelMicroBatchPartitionReader.java @@ -57,6 +57,7 @@ public class ParallelMicroBatchPartitionReader extends ParallelBatchPartitionRea public ParallelMicroBatchPartitionReader( SeaTunnelSource source, Integer parallelism, + String jobId, Integer subtaskId, Integer checkpointId, Integer checkpointInterval, @@ -64,7 +65,7 @@ public ParallelMicroBatchPartitionReader( String hdfsRoot, String hdfsUser, Map envOptions) { - super(source, parallelism, subtaskId, envOptions); + super(source, parallelism, jobId, subtaskId, envOptions); this.checkpointId = checkpointId; this.checkpointInterval = checkpointInterval; this.checkpointPath = checkpointPath; @@ -74,7 +75,7 @@ public ParallelMicroBatchPartitionReader( @Override protected BaseSourceFunction createInternalSource() { - return new InternalParallelSource<>(source, restoredState, parallelism, subtaskId); + return new InternalParallelSource<>(source, restoredState, parallelism, jobId, subtaskId); } @Override diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/SeaTunnelMicroBatch.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/SeaTunnelMicroBatch.java index c4d03d2bd76..0d9f6d4cdbf 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/SeaTunnelMicroBatch.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/SeaTunnelMicroBatch.java @@ -46,6 +46,7 @@ public class SeaTunnelMicroBatch implements MicroBatchStream { private final SeaTunnelSource source; private final int parallelism; + private final String jobId; private final String checkpointLocation; @@ -58,10 +59,12 @@ public class SeaTunnelMicroBatch implements MicroBatchStream { public SeaTunnelMicroBatch( SeaTunnelSource source, int parallelism, + String jobId, String checkpointLocation, CaseInsensitiveStringMap caseInsensitiveStringMap) { this.source = source; this.parallelism = parallelism; + this.jobId = jobId; this.checkpointLocation = checkpointLocation; this.caseInsensitiveStringMap = caseInsensitiveStringMap; } @@ -116,7 +119,7 @@ public InputPartition[] planInputPartitions(Offset start, Offset end) { @Override public PartitionReaderFactory createReaderFactory() { return new SeaTunnelMicroBatchPartitionReaderFactory( - source, parallelism, checkpointLocation, caseInsensitiveStringMap); + source, parallelism, jobId, checkpointLocation, caseInsensitiveStringMap); } @Override diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/SeaTunnelMicroBatchPartitionReaderFactory.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/SeaTunnelMicroBatchPartitionReaderFactory.java index 35006cdfce4..4742916894d 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/SeaTunnelMicroBatchPartitionReaderFactory.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/partition/micro/SeaTunnelMicroBatchPartitionReaderFactory.java @@ -35,6 +35,7 @@ public class SeaTunnelMicroBatchPartitionReaderFactory implements PartitionReade private final SeaTunnelSource source; private final int parallelism; + private final String jobId; private final String checkpointLocation; @@ -43,10 +44,12 @@ public class SeaTunnelMicroBatchPartitionReaderFactory implements PartitionReade public SeaTunnelMicroBatchPartitionReaderFactory( SeaTunnelSource source, int parallelism, + String jobId, String checkpointLocation, CaseInsensitiveStringMap caseInsensitiveStringMap) { this.source = source; this.parallelism = parallelism; + this.jobId = jobId; this.checkpointLocation = checkpointLocation; this.caseInsensitiveStringMap = caseInsensitiveStringMap; } @@ -67,6 +70,7 @@ public PartitionReader createReader(InputPartition partition) { new CoordinatedMicroBatchPartitionReader( source, parallelism, + jobId, subtaskId, checkpointId, checkpointInterval, @@ -79,6 +83,7 @@ public PartitionReader createReader(InputPartition partition) { new ParallelMicroBatchPartitionReader( source, parallelism, + jobId, subtaskId, checkpointId, checkpointInterval, diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/scan/SeaTunnelScan.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/scan/SeaTunnelScan.java index d38d2a0a64c..aa196994087 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/scan/SeaTunnelScan.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/scan/SeaTunnelScan.java @@ -36,15 +36,18 @@ public class SeaTunnelScan implements Scan { private final SeaTunnelSource source; private final int parallelism; + private final String jobId; private final CaseInsensitiveStringMap caseInsensitiveStringMap; public SeaTunnelScan( SeaTunnelSource source, int parallelism, + String jobId, CaseInsensitiveStringMap caseInsensitiveStringMap) { this.source = source; this.parallelism = parallelism; + this.jobId = jobId; this.caseInsensitiveStringMap = caseInsensitiveStringMap; } @@ -56,12 +59,12 @@ public StructType readSchema() { @Override public Batch toBatch() { Map envOptions = caseInsensitiveStringMap.asCaseSensitiveMap(); - return new SeaTunnelBatch(source, parallelism, envOptions); + return new SeaTunnelBatch(source, parallelism, jobId, envOptions); } @Override public MicroBatchStream toMicroBatchStream(String checkpointLocation) { return new SeaTunnelMicroBatch( - source, parallelism, checkpointLocation, caseInsensitiveStringMap); + source, parallelism, jobId, checkpointLocation, caseInsensitiveStringMap); } } diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/scan/SeaTunnelScanBuilder.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/scan/SeaTunnelScanBuilder.java index b4100c62875..efff7956c15 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/scan/SeaTunnelScanBuilder.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/main/java/org/apache/seatunnel/translation/spark/source/scan/SeaTunnelScanBuilder.java @@ -29,21 +29,24 @@ public class SeaTunnelScanBuilder implements ScanBuilder { private final SeaTunnelSource source; private final int parallelism; + private final String jobId; private final CaseInsensitiveStringMap caseInsensitiveStringMap; public SeaTunnelScanBuilder( SeaTunnelSource source, int parallelism, + String jobId, CaseInsensitiveStringMap caseInsensitiveStringMap) { this.source = source; this.parallelism = parallelism; + this.jobId = jobId; this.caseInsensitiveStringMap = caseInsensitiveStringMap; } /** Returns the {@link SeaTunnelScan} */ @Override public Scan build() { - return new SeaTunnelScan(source, parallelism, caseInsensitiveStringMap); + return new SeaTunnelScan(source, parallelism, jobId, caseInsensitiveStringMap); } } diff --git a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/test/java/org/apache/seatunnel/translation/spark/sink/SparkSinkTest.java b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/test/java/org/apache/seatunnel/translation/spark/sink/SparkSinkTest.java index 9f9292e20b1..b426f175480 100644 --- a/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/test/java/org/apache/seatunnel/translation/spark/sink/SparkSinkTest.java +++ b/seatunnel-translation/seatunnel-translation-spark/seatunnel-translation-spark-3.3/src/test/java/org/apache/seatunnel/translation/spark/sink/SparkSinkTest.java @@ -405,7 +405,8 @@ public void testSparkSinkWriteDataWithCopy() { SparkSinkInjector.inject( dataset.write(), new SeaTunnelSinkWithBuffer(), - CatalogTableUtil.getCatalogTable("test", "test", "test", "test", rowType)) + CatalogTableUtil.getCatalogTable("test", "test", "test", "test", rowType), + spark.sparkContext().applicationId()) .option("checkpointLocation", "/tmp") .mode(SaveMode.Append) .save(); From ebe7744221ca51e4c2258895702e05f2fd3fbaae Mon Sep 17 00:00:00 2001 From: xiaochen <598457447@qq.com> Date: Fri, 29 Mar 2024 10:36:34 +0800 Subject: [PATCH 38/59] fix typo (#6611) --- .../starter/flowcontrol/FlowControlGate.java | 6 ++-- .../flowcontrol/FlowControlStrategy.java | 30 +++++++++---------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/seatunnel-core/seatunnel-core-starter/src/main/java/org/apache/seatunnel/core/starter/flowcontrol/FlowControlGate.java b/seatunnel-core/seatunnel-core-starter/src/main/java/org/apache/seatunnel/core/starter/flowcontrol/FlowControlGate.java index 5d06366e3aa..fed891a5eb5 100644 --- a/seatunnel-core/seatunnel-core-starter/src/main/java/org/apache/seatunnel/core/starter/flowcontrol/FlowControlGate.java +++ b/seatunnel-core/seatunnel-core-starter/src/main/java/org/apache/seatunnel/core/starter/flowcontrol/FlowControlGate.java @@ -32,15 +32,15 @@ public class FlowControlGate { private FlowControlGate(FlowControlStrategy flowControlStrategy) { final int bytesPerSecond = flowControlStrategy.getBytesPerSecond(); - final int countPreSecond = flowControlStrategy.getCountPreSecond(); + final int countPerSecond = flowControlStrategy.getCountPerSecond(); this.bytesRateLimiter = bytesPerSecond == DEFAULT_VALUE ? Optional.empty() : Optional.of(RateLimiter.create(bytesPerSecond)); this.countRateLimiter = - countPreSecond == DEFAULT_VALUE + countPerSecond == DEFAULT_VALUE ? Optional.empty() - : Optional.of(RateLimiter.create(countPreSecond)); + : Optional.of(RateLimiter.create(countPerSecond)); } public void audit(SeaTunnelRow row) { diff --git a/seatunnel-core/seatunnel-core-starter/src/main/java/org/apache/seatunnel/core/starter/flowcontrol/FlowControlStrategy.java b/seatunnel-core/seatunnel-core-starter/src/main/java/org/apache/seatunnel/core/starter/flowcontrol/FlowControlStrategy.java index 923dccee0d7..2a413627244 100644 --- a/seatunnel-core/seatunnel-core-starter/src/main/java/org/apache/seatunnel/core/starter/flowcontrol/FlowControlStrategy.java +++ b/seatunnel-core/seatunnel-core-starter/src/main/java/org/apache/seatunnel/core/starter/flowcontrol/FlowControlStrategy.java @@ -29,23 +29,23 @@ public final class FlowControlStrategy { private final int bytesPerSecond; - private final int countPreSecond; + private final int countPerSecond; - FlowControlStrategy(int bytesPerSecond, int countPreSecond) { - if (bytesPerSecond <= 0 || countPreSecond <= 0) { + FlowControlStrategy(int bytesPerSecond, int countPerSecond) { + if (bytesPerSecond <= 0 || countPerSecond <= 0) { throw new IllegalArgumentException( - "bytesPerSecond and countPreSecond must be positive"); + "bytesPerSecond and countPerSecond must be positive"); } this.bytesPerSecond = bytesPerSecond; - this.countPreSecond = countPreSecond; + this.countPerSecond = countPerSecond; } public int getBytesPerSecond() { return bytesPerSecond; } - public int getCountPreSecond() { - return countPreSecond; + public int getCountPerSecond() { + return countPerSecond; } public static Builder builder() { @@ -56,7 +56,7 @@ public static class Builder { private int bytesPerSecond = Integer.MAX_VALUE; - private int countPreSecond = Integer.MAX_VALUE; + private int countPerSecond = Integer.MAX_VALUE; private Builder() {} @@ -65,20 +65,20 @@ public Builder bytesPerSecond(int bytesPerSecond) { return this; } - public Builder countPerSecond(int countPreSecond) { - this.countPreSecond = countPreSecond; + public Builder countPerSecond(int countPerSecond) { + this.countPerSecond = countPerSecond; return this; } public FlowControlStrategy build() { - return new FlowControlStrategy(bytesPerSecond, countPreSecond); + return new FlowControlStrategy(bytesPerSecond, countPerSecond); } } - public static FlowControlStrategy of(int bytesPerSecond, int countPreSecond) { + public static FlowControlStrategy of(int bytesPerSecond, int countPerSecond) { return FlowControlStrategy.builder() .bytesPerSecond(bytesPerSecond) - .countPerSecond(countPreSecond) + .countPerSecond(countPerSecond) .build(); } @@ -86,8 +86,8 @@ public static FlowControlStrategy ofBytes(int bytesPerSecond) { return FlowControlStrategy.builder().bytesPerSecond(bytesPerSecond).build(); } - public static FlowControlStrategy ofCount(int countPreSecond) { - return FlowControlStrategy.builder().countPerSecond(countPreSecond).build(); + public static FlowControlStrategy ofCount(int countPerSecond) { + return FlowControlStrategy.builder().countPerSecond(countPerSecond).build(); } public static FlowControlStrategy fromMap(Map envOption) { From 2599d3b73692d0fea9b97649f316c57c26646111 Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Fri, 29 Mar 2024 10:37:02 +0800 Subject: [PATCH 39/59] [Chore] Fix `file` spell errors (#6606) --- ...SinkFactory.java => BaseMultipleTableFileSinkFactory.java} | 2 +- .../seatunnel/file/local/sink/LocalFileSinkFactory.java | 4 ++-- .../seatunnel/file/oss/sink/OssFileSinkFactory.java | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) rename seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/factory/{BaseMultipleTableFinkSinkFactory.java => BaseMultipleTableFileSinkFactory.java} (98%) diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/factory/BaseMultipleTableFinkSinkFactory.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/factory/BaseMultipleTableFileSinkFactory.java similarity index 98% rename from seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/factory/BaseMultipleTableFinkSinkFactory.java rename to seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/factory/BaseMultipleTableFileSinkFactory.java index b8c60f0e197..9f9f5f382f6 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/factory/BaseMultipleTableFinkSinkFactory.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/factory/BaseMultipleTableFileSinkFactory.java @@ -31,7 +31,7 @@ import org.apache.seatunnel.connectors.seatunnel.file.sink.commit.FileCommitInfo; import org.apache.seatunnel.connectors.seatunnel.file.sink.state.FileSinkState; -public abstract class BaseMultipleTableFinkSinkFactory +public abstract class BaseMultipleTableFileSinkFactory implements TableSinkFactory< SeaTunnelRow, FileSinkState, FileCommitInfo, FileAggregatedCommitInfo> { diff --git a/seatunnel-connectors-v2/connector-file/connector-file-local/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/local/sink/LocalFileSinkFactory.java b/seatunnel-connectors-v2/connector-file/connector-file-local/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/local/sink/LocalFileSinkFactory.java index 770e8866b54..b2e744722fb 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-local/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/local/sink/LocalFileSinkFactory.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-local/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/local/sink/LocalFileSinkFactory.java @@ -27,7 +27,7 @@ import org.apache.seatunnel.connectors.seatunnel.file.config.BaseSinkConfig; import org.apache.seatunnel.connectors.seatunnel.file.config.FileFormat; import org.apache.seatunnel.connectors.seatunnel.file.config.FileSystemType; -import org.apache.seatunnel.connectors.seatunnel.file.factory.BaseMultipleTableFinkSinkFactory; +import org.apache.seatunnel.connectors.seatunnel.file.factory.BaseMultipleTableFileSinkFactory; import org.apache.seatunnel.connectors.seatunnel.file.sink.commit.FileAggregatedCommitInfo; import org.apache.seatunnel.connectors.seatunnel.file.sink.commit.FileCommitInfo; import org.apache.seatunnel.connectors.seatunnel.file.sink.state.FileSinkState; @@ -35,7 +35,7 @@ import com.google.auto.service.AutoService; @AutoService(Factory.class) -public class LocalFileSinkFactory extends BaseMultipleTableFinkSinkFactory { +public class LocalFileSinkFactory extends BaseMultipleTableFileSinkFactory { @Override public String factoryIdentifier() { return FileSystemType.LOCAL.getFileSystemPluginName(); diff --git a/seatunnel-connectors-v2/connector-file/connector-file-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/sink/OssFileSinkFactory.java b/seatunnel-connectors-v2/connector-file/connector-file-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/sink/OssFileSinkFactory.java index 5931a46977b..38882175c78 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/sink/OssFileSinkFactory.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-oss/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/oss/sink/OssFileSinkFactory.java @@ -26,13 +26,13 @@ import org.apache.seatunnel.connectors.seatunnel.file.config.BaseSinkConfig; import org.apache.seatunnel.connectors.seatunnel.file.config.FileFormat; import org.apache.seatunnel.connectors.seatunnel.file.config.FileSystemType; -import org.apache.seatunnel.connectors.seatunnel.file.factory.BaseMultipleTableFinkSinkFactory; +import org.apache.seatunnel.connectors.seatunnel.file.factory.BaseMultipleTableFileSinkFactory; import org.apache.seatunnel.connectors.seatunnel.file.oss.config.OssConfigOptions; import com.google.auto.service.AutoService; @AutoService(Factory.class) -public class OssFileSinkFactory extends BaseMultipleTableFinkSinkFactory { +public class OssFileSinkFactory extends BaseMultipleTableFileSinkFactory { @Override public String factoryIdentifier() { return FileSystemType.OSS.getFileSystemPluginName(); From d1599f8ad97d9143bce24377bcc61195df232a13 Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Fri, 29 Mar 2024 10:39:35 +0800 Subject: [PATCH 40/59] [Improve][Connector-V2] Support read orc with schema config to cast type (#6531) --- .../seatunnel/api/table/type/TypeUtil.java | 44 +++++ .../file/config/BaseFileSourceConfig.java | 8 +- .../file/source/reader/OrcReadStrategy.java | 185 ++++++++++++++---- .../file/source/reader/ReadStrategy.java | 5 + .../e2e/connector/file/local/LocalFileIT.java | 5 + ...file_orc_to_assert_with_time_and_cast.conf | 126 ++++++++++++ .../src/test/resources/orc/orc_for_cast.orc | Bin 0 -> 4775 bytes 7 files changed, 334 insertions(+), 39 deletions(-) create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/TypeUtil.java create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/orc/local_file_orc_to_assert_with_time_and_cast.conf create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/orc/orc_for_cast.orc diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/TypeUtil.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/TypeUtil.java new file mode 100644 index 00000000000..b8df6d80e5b --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/TypeUtil.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.table.type; + +public class TypeUtil { + + /** Check if the data type can be converted to another data type. */ + public static boolean canConvert(SeaTunnelDataType from, SeaTunnelDataType to) { + // any type can be converted to string + if (from == to || to.getSqlType() == SqlType.STRING) { + return true; + } + if (from.getSqlType() == SqlType.TINYINT) { + return to.getSqlType() == SqlType.SMALLINT + || to.getSqlType() == SqlType.INT + || to.getSqlType() == SqlType.BIGINT; + } + if (from.getSqlType() == SqlType.SMALLINT) { + return to.getSqlType() == SqlType.INT || to.getSqlType() == SqlType.BIGINT; + } + if (from.getSqlType() == SqlType.INT) { + return to.getSqlType() == SqlType.BIGINT; + } + if (from.getSqlType() == SqlType.FLOAT) { + return to.getSqlType() == SqlType.DOUBLE; + } + return false; + } +} diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/config/BaseFileSourceConfig.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/config/BaseFileSourceConfig.java index c08a7a11def..5c16a7e28b3 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/config/BaseFileSourceConfig.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/config/BaseFileSourceConfig.java @@ -80,7 +80,8 @@ private List parseFilePaths(ReadonlyConfig readonlyConfig) { private CatalogTable parseCatalogTable(ReadonlyConfig readonlyConfig) { final CatalogTable catalogTable; - if (readonlyConfig.getOptional(TableSchemaOptions.SCHEMA).isPresent()) { + boolean configSchema = readonlyConfig.getOptional(TableSchemaOptions.SCHEMA).isPresent(); + if (configSchema) { catalogTable = CatalogTableUtil.buildWithConfig(getPluginName(), readonlyConfig); } else { catalogTable = CatalogTableUtil.buildSimpleTextTable(); @@ -99,7 +100,10 @@ private CatalogTable parseCatalogTable(ReadonlyConfig readonlyConfig) { case ORC: case PARQUET: return newCatalogTable( - catalogTable, readStrategy.getSeaTunnelRowTypeInfo(filePaths.get(0))); + catalogTable, + readStrategy.getSeaTunnelRowTypeInfoWithUserConfigRowType( + filePaths.get(0), + configSchema ? catalogTable.getSeaTunnelRowType() : null)); default: throw new FileConnectorException( FileConnectorErrorCode.FORMAT_NOT_SUPPORT, diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/OrcReadStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/OrcReadStrategy.java index 79158a54232..77b02ab03a9 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/OrcReadStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/OrcReadStrategy.java @@ -28,6 +28,7 @@ import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; +import org.apache.seatunnel.api.table.type.SqlType; import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; import org.apache.seatunnel.connectors.seatunnel.file.config.BaseSourceConfigOptions; import org.apache.seatunnel.connectors.seatunnel.file.exception.FileConnectorErrorCode; @@ -55,6 +56,8 @@ import lombok.extern.slf4j.Slf4j; +import javax.annotation.Nullable; + import java.io.IOException; import java.math.BigDecimal; import java.nio.ByteBuffer; @@ -62,12 +65,15 @@ import java.nio.charset.StandardCharsets; import java.sql.Timestamp; import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.apache.seatunnel.api.table.type.TypeUtil.canConvert; import static org.apache.seatunnel.connectors.seatunnel.file.sink.writer.OrcWriteStrategy.buildFieldWithRowType; @Slf4j @@ -120,7 +126,12 @@ public void read(String path, String tableId, Collector output) if (cols[j] == null) { fields[j] = null; } else { - fields[j] = readColumn(cols[j], children.get(j), num); + fields[j] = + readColumn( + cols[j], + children.get(j), + seaTunnelRowType.getFieldType(j), + num); } } SeaTunnelRow seaTunnelRow = new SeaTunnelRow(fields); @@ -134,6 +145,12 @@ public void read(String path, String tableId, Collector output) @Override public SeaTunnelRowType getSeaTunnelRowTypeInfo(String path) throws FileConnectorException { + return getSeaTunnelRowTypeInfoWithUserConfigRowType(path, null); + } + + @Override + public SeaTunnelRowType getSeaTunnelRowTypeInfoWithUserConfigRowType( + String path, SeaTunnelRowType configRowType) throws FileConnectorException { try (Reader reader = hadoopFileSystemProxy.doWithHadoopAuth( ((configuration, userGroupInformation) -> { @@ -158,7 +175,12 @@ public SeaTunnelRowType getSeaTunnelRowTypeInfo(String path) throws FileConnecto "Column [%s] does not exists in table schema [%s]", readColumns.get(i), String.join(",", fieldNames))); } - types[i] = orcDataType2SeaTunnelDataType(schema.getChildren().get(index)); + types[i] = + orcDataType2SeaTunnelDataType( + schema.getChildren().get(index), + configRowType != null && configRowType.getTotalFields() > i + ? configRowType.getFieldType(i) + : null); } seaTunnelRowType = new SeaTunnelRowType(fields, types); seaTunnelRowTypeWithPartition = mergePartitionTypes(path, seaTunnelRowType); @@ -209,39 +231,57 @@ boolean checkFileType(String path) { } } - private SeaTunnelDataType orcDataType2SeaTunnelDataType(TypeDescription typeDescription) { + private SeaTunnelDataType getFinalType( + SeaTunnelDataType fileType, SeaTunnelDataType configType) { + if (configType == null) { + return fileType; + } + return canConvert(fileType, configType) ? configType : fileType; + } + + private SeaTunnelDataType orcDataType2SeaTunnelDataType( + TypeDescription typeDescription, SeaTunnelDataType configType) { switch (typeDescription.getCategory()) { case BOOLEAN: - return BasicType.BOOLEAN_TYPE; + return getFinalType(BasicType.BOOLEAN_TYPE, configType); case INT: - return BasicType.INT_TYPE; + return getFinalType(BasicType.INT_TYPE, configType); case BYTE: - return BasicType.BYTE_TYPE; + return getFinalType(BasicType.BYTE_TYPE, configType); case SHORT: - return BasicType.SHORT_TYPE; + return getFinalType(BasicType.SHORT_TYPE, configType); case LONG: - return BasicType.LONG_TYPE; + return getFinalType(BasicType.LONG_TYPE, configType); case FLOAT: - return BasicType.FLOAT_TYPE; + return getFinalType(BasicType.FLOAT_TYPE, configType); case DOUBLE: - return BasicType.DOUBLE_TYPE; + return getFinalType(BasicType.DOUBLE_TYPE, configType); case BINARY: - return PrimitiveByteArrayType.INSTANCE; + return getFinalType(PrimitiveByteArrayType.INSTANCE, configType); case STRING: case VARCHAR: case CHAR: - return BasicType.STRING_TYPE; + return getFinalType(BasicType.STRING_TYPE, configType); case DATE: - return LocalTimeType.LOCAL_DATE_TYPE; + return getFinalType(LocalTimeType.LOCAL_DATE_TYPE, configType); case TIMESTAMP: - return LocalTimeType.LOCAL_DATE_TIME_TYPE; + // Support only return time when the type is timestamps + if (configType != null && configType.getSqlType().equals(SqlType.TIME)) { + return LocalTimeType.LOCAL_TIME_TYPE; + } + return getFinalType(LocalTimeType.LOCAL_DATE_TIME_TYPE, configType); case DECIMAL: int precision = typeDescription.getPrecision(); int scale = typeDescription.getScale(); - return new DecimalType(precision, scale); + return getFinalType(new DecimalType(precision, scale), configType); case LIST: TypeDescription listType = typeDescription.getChildren().get(0); - SeaTunnelDataType seaTunnelDataType = orcDataType2SeaTunnelDataType(listType); + SeaTunnelDataType seaTunnelDataType = + orcDataType2SeaTunnelDataType(listType, null); + if (configType instanceof ArrayType) { + SeaTunnelDataType elementType = ((ArrayType) configType).getElementType(); + seaTunnelDataType = orcDataType2SeaTunnelDataType(listType, elementType); + } switch (seaTunnelDataType.getSqlType()) { case STRING: return ArrayType.STRING_ARRAY_TYPE; @@ -270,16 +310,35 @@ private SeaTunnelDataType orcDataType2SeaTunnelDataType(TypeDescription typeD case MAP: TypeDescription keyType = typeDescription.getChildren().get(0); TypeDescription valueType = typeDescription.getChildren().get(1); - return new MapType<>( - orcDataType2SeaTunnelDataType(keyType), - orcDataType2SeaTunnelDataType(valueType)); + if (configType instanceof MapType) { + SeaTunnelDataType keyDataType = ((MapType) configType).getKeyType(); + SeaTunnelDataType valueDataType = + ((MapType) configType).getValueType(); + keyDataType = orcDataType2SeaTunnelDataType(keyType, keyDataType); + valueDataType = orcDataType2SeaTunnelDataType(valueType, valueDataType); + return new MapType<>(keyDataType, valueDataType); + } else { + return new MapType<>( + orcDataType2SeaTunnelDataType(keyType, null), + orcDataType2SeaTunnelDataType(valueType, null)); + } case STRUCT: List children = typeDescription.getChildren(); String[] fieldNames = typeDescription.getFieldNames().toArray(TYPE_ARRAY_STRING); - SeaTunnelDataType[] fieldTypes = - children.stream() - .map(this::orcDataType2SeaTunnelDataType) - .toArray(SeaTunnelDataType[]::new); + SeaTunnelDataType[] fieldTypes = new SeaTunnelDataType[children.size()]; + if (configType instanceof SeaTunnelRowType) { + for (int i = 0; i < children.size(); i++) { + fieldTypes[i] = + orcDataType2SeaTunnelDataType( + children.get(i), + ((SeaTunnelRowType) configType).getFieldType(i)); + } + } else { + fieldTypes = + children.stream() + .map(f -> orcDataType2SeaTunnelDataType(f, null)) + .toArray(SeaTunnelDataType[]::new); + } return new SeaTunnelRowType(fieldNames, fieldTypes); default: // do nothing @@ -293,30 +352,37 @@ private SeaTunnelDataType orcDataType2SeaTunnelDataType(TypeDescription typeD } } - private Object readColumn(ColumnVector colVec, TypeDescription colType, int rowNum) { + private Object readColumn( + ColumnVector colVec, + TypeDescription colType, + @Nullable SeaTunnelDataType dataType, + int rowNum) { Object columnObj = null; if (!colVec.isNull[rowNum]) { switch (colVec.type) { case LONG: - columnObj = readLongVal(colVec, colType, rowNum); + columnObj = readLongVal(colVec, colType, dataType, rowNum); break; case DOUBLE: columnObj = ((DoubleColumnVector) colVec).vector[rowNum]; if (colType.getCategory() == TypeDescription.Category.FLOAT) { columnObj = ((Double) columnObj).floatValue(); } + if (dataType != null && dataType.getSqlType().equals(SqlType.STRING)) { + columnObj = columnObj.toString(); + } break; case BYTES: - columnObj = readBytesVal(colVec, colType, rowNum); + columnObj = readBytesVal(colVec, colType, dataType, rowNum); break; case DECIMAL: - columnObj = readDecimalVal(colVec, rowNum); + columnObj = readDecimalVal(colVec, dataType, rowNum); break; case TIMESTAMP: - columnObj = readTimestampVal(colVec, colType, rowNum); + columnObj = readTimestampVal(colVec, colType, dataType, rowNum); break; case STRUCT: - columnObj = readStructVal(colVec, colType, rowNum); + columnObj = readStructVal(colVec, colType, dataType, rowNum); break; case LIST: columnObj = readListVal(colVec, colType, rowNum); @@ -336,7 +402,11 @@ private Object readColumn(ColumnVector colVec, TypeDescription colType, int rowN return columnObj; } - private Object readLongVal(ColumnVector colVec, TypeDescription colType, int rowNum) { + private Object readLongVal( + ColumnVector colVec, + TypeDescription colType, + SeaTunnelDataType dataType, + int rowNum) { Object colObj = null; if (!colVec.isNull[rowNum]) { LongColumnVector longVec = (LongColumnVector) colVec; @@ -353,11 +423,18 @@ private Object readLongVal(ColumnVector colVec, TypeDescription colType, int row } else if (colType.getCategory() == TypeDescription.Category.SHORT) { colObj = (short) longVal; } + if (dataType != null && dataType.getSqlType().equals(SqlType.STRING)) { + colObj = colObj.toString(); + } } return colObj; } - private Object readBytesVal(ColumnVector colVec, TypeDescription typeDescription, int rowNum) { + private Object readBytesVal( + ColumnVector colVec, + TypeDescription typeDescription, + SeaTunnelDataType dataType, + int rowNum) { Charset charset = StandardCharsets.UTF_8; if (pluginConfig != null) { charset = @@ -375,6 +452,11 @@ private Object readBytesVal(ColumnVector colVec, TypeDescription typeDescription && bytesObj != null) { bytesObj = ((String) bytesObj).getBytes(charset); } + if (dataType != null + && dataType.getSqlType().equals(SqlType.STRING) + && bytesObj != null) { + bytesObj = bytesObj.toString(); + } } return bytesObj; } @@ -400,16 +482,25 @@ private Object bytesVectorToString(BytesColumnVector bytesVector, int row, Chars charset); } - private Object readDecimalVal(ColumnVector colVec, int rowNum) { + private Object readDecimalVal(ColumnVector colVec, SeaTunnelDataType dataType, int rowNum) { Object decimalObj = null; if (!colVec.isNull[rowNum]) { DecimalColumnVector decimalVec = (DecimalColumnVector) colVec; decimalObj = decimalVec.vector[rowNum].getHiveDecimal().bigDecimalValue(); + if (dataType != null + && dataType.getSqlType().equals(SqlType.STRING) + && decimalObj != null) { + decimalObj = decimalObj.toString(); + } } return decimalObj; } - private Object readTimestampVal(ColumnVector colVec, TypeDescription colType, int rowNum) { + private Object readTimestampVal( + ColumnVector colVec, + TypeDescription colType, + SeaTunnelDataType dataType, + int rowNum) { Object timestampVal = null; if (!colVec.isNull[rowNum]) { TimestampColumnVector timestampVec = (TimestampColumnVector) colVec; @@ -420,12 +511,28 @@ private Object readTimestampVal(ColumnVector colVec, TypeDescription colType, in timestampVal = timestamp.toLocalDateTime(); if (colType.getCategory() == TypeDescription.Category.DATE) { timestampVal = LocalDate.ofEpochDay(timestamp.getTime()); + } else if (dataType != null && dataType.getSqlType() == SqlType.TIME) { + timestampVal = + LocalTime.of( + ((LocalDateTime) timestampVal).getHour(), + ((LocalDateTime) timestampVal).getMinute(), + ((LocalDateTime) timestampVal).getSecond(), + ((LocalDateTime) timestampVal).getNano()); + } + if (dataType != null + && dataType.getSqlType().equals(SqlType.STRING) + && timestampVal != null) { + timestampVal = timestampVal.toString(); } } return timestampVal; } - private Object readStructVal(ColumnVector colVec, TypeDescription colType, int rowNum) { + private Object readStructVal( + ColumnVector colVec, + TypeDescription colType, + SeaTunnelDataType dataType, + int rowNum) { Object structObj = null; if (!colVec.isNull[rowNum]) { StructColumnVector structVector = (StructColumnVector) colVec; @@ -433,8 +540,12 @@ private Object readStructVal(ColumnVector colVec, TypeDescription colType, int r Object[] fieldValues = new Object[fieldVec.length]; List fieldTypes = colType.getChildren(); for (int i = 0; i < fieldVec.length; i++) { - Object fieldObj = readColumn(fieldVec[i], fieldTypes.get(i), rowNum); - fieldValues[i] = fieldObj; + if (dataType instanceof SeaTunnelRowType) { + SeaTunnelDataType fieldType = ((SeaTunnelRowType) dataType).getFieldType(i); + fieldValues[i] = readColumn(fieldVec[i], fieldTypes.get(i), fieldType, rowNum); + } else { + fieldValues[i] = readColumn(fieldVec[i], fieldTypes.get(i), null, rowNum); + } } structObj = new SeaTunnelRow(fieldValues); } @@ -521,7 +632,7 @@ private Object readUnionVal(ColumnVector colVec, TypeDescription colType, int ro TypeDescription fieldType = unionFieldTypes.get(tagVal); if (tagVal < unionVector.fields.length) { ColumnVector fieldVector = unionVector.fields[tagVal]; - Object unionValue = readColumn(fieldVector, fieldType, rowNum); + Object unionValue = readColumn(fieldVector, fieldType, null, rowNum); columnValuePair = Pair.of(fieldType, unionValue); } else { throw new FileConnectorException( diff --git a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/ReadStrategy.java b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/ReadStrategy.java index a269594e1f8..d3a210f56ef 100644 --- a/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/ReadStrategy.java +++ b/seatunnel-connectors-v2/connector-file/connector-file-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/file/source/reader/ReadStrategy.java @@ -38,6 +38,11 @@ void read(String path, String tableId, Collector output) SeaTunnelRowType getSeaTunnelRowTypeInfo(String path) throws FileConnectorException; + default SeaTunnelRowType getSeaTunnelRowTypeInfoWithUserConfigRowType( + String path, SeaTunnelRowType rowType) throws FileConnectorException { + return getSeaTunnelRowTypeInfo(path); + } + // todo: use CatalogTable void setSeaTunnelRowTypeInfo(SeaTunnelRowType seaTunnelRowType); diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/java/org/apache/seatunnel/e2e/connector/file/local/LocalFileIT.java b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/java/org/apache/seatunnel/e2e/connector/file/local/LocalFileIT.java index d06dc9f890b..46213dde0f4 100644 --- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/java/org/apache/seatunnel/e2e/connector/file/local/LocalFileIT.java +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/java/org/apache/seatunnel/e2e/connector/file/local/LocalFileIT.java @@ -98,6 +98,9 @@ public class LocalFileIT extends TestSuiteBase { "/seatunnel/read/orc/name=tyrantlucifer/hobby=coding/e2e.orc", container); + ContainerUtil.copyFileIntoContainers( + "/orc/orc_for_cast.orc", "/seatunnel/read/orc_cast/e2e.orc", container); + ContainerUtil.copyFileIntoContainers( "/parquet/e2e.parquet", "/seatunnel/read/parquet/name=tyrantlucifer/hobby=coding/e2e.parquet", @@ -150,6 +153,8 @@ public void testLocalFileReadAndWrite(TestContainer container) helper.execute("/orc/local_file_orc_to_assert.conf"); // test read local orc file with projection helper.execute("/orc/local_file_orc_projection_to_assert.conf"); + // test read local orc file with projection and type cast + helper.execute("/orc/local_file_orc_to_assert_with_time_and_cast.conf"); // test write local parquet file helper.execute("/parquet/fake_to_local_file_parquet.conf"); // test read local parquet file diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/orc/local_file_orc_to_assert_with_time_and_cast.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/orc/local_file_orc_to_assert_with_time_and_cast.conf new file mode 100644 index 00000000000..476e21d923a --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/orc/local_file_orc_to_assert_with_time_and_cast.conf @@ -0,0 +1,126 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +env { + parallelism = 1 + job.mode = "BATCH" + + # You can set spark configuration here + spark.app.name = "SeaTunnel" + spark.executor.instances = 2 + spark.executor.cores = 1 + spark.executor.memory = "1g" + spark.master = local +} + +source { + LocalFile { + path = "/seatunnel/read/orc_cast" + file_format_type = "orc" + result_table_name = "fake" + schema = { + fields { + c_map = "map" + c_array = "array" + c_string = string + c_boolean = boolean + c_tinyint = tinyint + // change smallint to bigint + c_smallint = bigint + // change int to bigint + c_int = bigint + c_bigint = bigint + // change float value to string + c_float = string + c_double = double + c_bytes = bytes + c_date = date + c_decimal = "decimal(38, 18)" + // change timestamp value to time + c_timestamp = time + c_row = { + c_map = "map" + c_array = "array" + c_string = string + c_boolean = boolean + c_tinyint = tinyint + c_smallint = smallint + // change int value to string in c_row + c_int = string + c_bigint = bigint + c_float = float + c_double = double + c_bytes = bytes + c_date = date + c_decimal = "decimal(38, 18)" + c_timestamp = timestamp + } + } + } + } +} + +sink { + Assert { + rules { + row_rules = [ + { + rule_type = MAX_ROW + rule_value = 5 + } + ], + field_rules = [ + { + field_name = c_string + field_type = string + field_value = [ + { + rule_type = NOT_NULL + } + ] + }, + { + field_name = c_float + field_type = string + field_value = [ + { + rule_type = NOT_NULL + } + ] + }, + { + field_name = c_boolean + field_type = boolean + field_value = [ + { + rule_type = NOT_NULL + } + ] + }, + { + field_name = c_timestamp + field_type = time + field_value = [ + { + rule_type = NOT_NULL + } + ] + } + ] + } + } +} \ No newline at end of file diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/orc/orc_for_cast.orc b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-file-local-e2e/src/test/resources/orc/orc_for_cast.orc new file mode 100644 index 0000000000000000000000000000000000000000..9ccbdc02cbe58836991274165f8cef70ec7a3493 GIT binary patch literal 4775 zcmb8wcT^K?z5w8%1OkK@A|)UyAiZ}{5Rl%9G-=XAsv-gk0ul^O5JjYiDjk&~A}t{G zqlpv^NDn2_dyyV*#JzXVch@~<_n(}}yzlw7_n8?(BUJ_fprwLSf>zSK{K1e8g+U(TK6mVlb^kcpZ(-^HhK zkaFktJyD>^0jK~zQWyqWN>aH1oCXXMq@X1uM*?D`yG*}>L%|?87_0(YCPPG#+Pg?v zVg0=VC#^uh;o)Skm7sn5X2(p1)c{9Xpj+zb;eIQV_#hsmZR;-8sK zu0Cz~%`8b_>4X`%0u$9Am~|`tTp!qvPTxTbrx!)prHlwh0L@sAYB%T;n=fN1Fq$+w-0JA+j!ULqq zGbOxlqSfh;&K~M|7sV6u!TZS~hq2E6IJS@D32Xbr&dz=%(lg$O00FbVQjfyX9E?EY z-A~sTI_a{q>KP=m=@Zy55SB*iAH|AV0#?}@k5aFdMqID;tua@%w|J)WBu?9Z7J1q+ zF;lWGg?o2E^Z|XQ!Kg4>4i(+0TWrZ>0XGC>ZEKP$o16Ilzc4`F4@+?*1c}YD~mA&fFMfAb!eSm2^bhD z`}j?9wFBUl+ErdDRR)@;=+oOns)1>VK+&l>Bp?6)U@Ak@SeP$rqcbZAMbQv6dL0^c z4FE=CEGC=SafB^cy+;e z!uvZJnGsnZ3*!3YXCoSaa#~iH1Vv5T{W4j21i1uBwt|-EA<$UP3{H3Ya zEh{+8D0F+``0dPg?DOc|_j7MUQ%BaA=>UL?+W_D?na+ir$~2yNRN_@BmsYEZ} zMNcOCTlwFpg@mxE_-II9`sgabDQKSL0oP$($d}3agQz>4JxyDdZD0>w@Hn`1mSx+t^!n z&_*?+meD3hNDqD`=c4n*mIvRqfyBV?mv>1h=DQy3HMX1j9f4jKz6e+xsW`V;zJnsP{hxKB-C*4+p&wHeArA0_`%{vAIOZrrmr%MwH`v^MjHB?9x}EX!qo}*RQ;WSWHK?~PkfHL4g9${k*;J zSo%G1S5-Hu+c-#?XO5UJ~F8mWS5W+df&M*#6PsxC=aset*&iR~ zlAj+OWGA*Pk7m9bO{%pZCjQLr{kHS{Rb6iBhxYy-b9Tu?^}{83tI>;1xZs%7;GL>G z^4z-p4-MmQI``fYS1046Vs@2(W#m2?4cGf^Hy-JFVJ>GsE-uXqM;@|-6WxFcIZ*uK zvq1V0;w{a>S=^V5#xJXM>eONEqp~glBCj84)14kK#We^ z+txiV;OePw&?+=g>=m4m#>J;y}fF7 zC@FEh9vL2}vu`qjRn=b|xc;diw)d)Hao}^z(z?!%ZHZOU!G2%geMi}3NyGKe3ih(A z1~)VRN`aI)UqL<+3{Q@gBA&vJzu~?)7U#`K@Dsz2yN|uc1)@XsHkMt^s?IX)nrnU{ zK0Bz|9P5&AVe#f&vwjp-_i8uGU%+g0V&S;H&i&!VO~nrEK!(|t=I8sHt0rsT$6HGD z=WBY8HXgDBHJC{K{7_)25R?`XQSf!0=*8Bf^aTWjy>!5C$Lt%j!g-EN=>}E0JQf9# z)?Lw{+^wxI6VqwO4;F6Q+?&nF^0`O!fp`tAxXgQ;ed(U+5%|=kR*0|N|L9VC89LXG zRksK$`P}*O))d7sh!(^Hjkw_YRr2;bDq%;`>3og()XazrEqhi10Iq|jjiz_?E38#||-$WcG za8J=|MOmC-hQqyFe%ueJrDx6sr;j zVu+Sl2+#eDxFGOX6uLCHxrzh#%_U(?`Y;n7DQ59PP1loL3Suk+aVre>KxP}4$jdxC z9b#n47mq=oE*U*-FP5w!t#b(+f6r7ZP4E?!j7)QNk3OdylSE`Ztb{e6BiwHCtt9>J zG?hX$+!J{zxe=mHm~r7=u`ZIf-Y3pJGPu3Fa_Z|$4b1lE-DN9ZXXj-KIoBePCb2v{ z?n}>dhrE4KPhO2=B`A+hx9s|kg3imO{QLl}gvW!!DQ)UQ#@72qeKKMDHR{wXHv(RB2S?2-XhT(m*EvUrtm4%avDK0U)V;#cfHCy=Yy6j;Fp%prIe)$ldMWk+~ z20p*M+*7iwo^eRua#cZms_jpHCyT^7r5Ssd|V;t9^a~m>^*y> zX(wHM({ixc2%P>2rV@r`euIWpB#yw#%(bpS&-wC* zP*S^mPK|YiZQDlA$3W4Ph1mLy9&_1L9W^NfjG@y(TuR+C#g()Ad1>r}mrt3)b$`gQ zR{Io`_d65`Vfhkuiv4^6S&X0g*h@>)6ll(s|l(*{;y{UUd@qTHGV8?@-6{EKT(gLWGSO+xGka;@1bp6Xfi%lWzx&US7 z!BP9vLmP99+)slr=>W(1=_^@b%lcWSQ!4g>Qwi>z%^!-v8AY~1oE9$U)Q&0B*uQpNXvN8Rnn^R==Bh23 zj}H!MZlqi-i}b*<{G`Y2;Ad2h8v$_$+u#imZ;9I;scOzWp-i1MUn^YCumfekL9e3Q5+XoR@0pGre&uszkz=dO5M_iMPe;Y`1WA8PyzyrwF!w zpKP~8GCs7#jS@cQC%o}8aYcz5;52p~@&dYU#%!0UJc;~f-f1D6CbgkRAfhz=QqV$k oulnNdsg;qbgEL?PHH-`q98Atd4nX*qLE_ZtZ3+rQBURS_0Vl~8yZ`_I literal 0 HcmV?d00001 From 470bb97434fc51f7f5622eedc83dce80fcfe574b Mon Sep 17 00:00:00 2001 From: yulj <1435240630@qq.com> Date: Fri, 29 Mar 2024 10:40:19 +0800 Subject: [PATCH 41/59] [Bugfix][TDengine] Fix the issue of losing the driver due to multiple calls to the submit job REST API #6581 (#6596) --- .../connectors/seatunnel/tdengine/source/TDengineSource.java | 3 +++ .../seatunnel/tdengine/source/TDengineSourceReader.java | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/seatunnel-connectors-v2/connector-tdengine/src/main/java/org/apache/seatunnel/connectors/seatunnel/tdengine/source/TDengineSource.java b/seatunnel-connectors-v2/connector-tdengine/src/main/java/org/apache/seatunnel/connectors/seatunnel/tdengine/source/TDengineSource.java index 17ef9176ca5..2f2e6a3f98f 100644 --- a/seatunnel-connectors-v2/connector-tdengine/src/main/java/org/apache/seatunnel/connectors/seatunnel/tdengine/source/TDengineSource.java +++ b/seatunnel-connectors-v2/connector-tdengine/src/main/java/org/apache/seatunnel/connectors/seatunnel/tdengine/source/TDengineSource.java @@ -56,6 +56,7 @@ import static org.apache.seatunnel.connectors.seatunnel.tdengine.config.TDengineSourceConfig.ConfigNames.URL; import static org.apache.seatunnel.connectors.seatunnel.tdengine.config.TDengineSourceConfig.ConfigNames.USERNAME; import static org.apache.seatunnel.connectors.seatunnel.tdengine.config.TDengineSourceConfig.buildSourceConfig; +import static org.apache.seatunnel.connectors.seatunnel.tdengine.utils.TDengineUtil.checkDriverExist; /** * TDengine source each split corresponds one subtable @@ -135,6 +136,8 @@ private StableMetadata getStableMetadata(TDengineSourceConfig config) throws SQL config.getUsername(), "&password=", config.getPassword()); + // check td driver whether exist and if not, try to register + checkDriverExist(jdbcUrl); try (Connection conn = DriverManager.getConnection(jdbcUrl)) { try (Statement statement = conn.createStatement()) { ResultSet metaResultSet = diff --git a/seatunnel-connectors-v2/connector-tdengine/src/main/java/org/apache/seatunnel/connectors/seatunnel/tdengine/source/TDengineSourceReader.java b/seatunnel-connectors-v2/connector-tdengine/src/main/java/org/apache/seatunnel/connectors/seatunnel/tdengine/source/TDengineSourceReader.java index 62c67fd0117..6782f085bd3 100644 --- a/seatunnel-connectors-v2/connector-tdengine/src/main/java/org/apache/seatunnel/connectors/seatunnel/tdengine/source/TDengineSourceReader.java +++ b/seatunnel-connectors-v2/connector-tdengine/src/main/java/org/apache/seatunnel/connectors/seatunnel/tdengine/source/TDengineSourceReader.java @@ -44,6 +44,8 @@ import java.util.Properties; import java.util.Set; +import static org.apache.seatunnel.connectors.seatunnel.tdengine.utils.TDengineUtil.checkDriverExist; + @Slf4j public class TDengineSourceReader implements SourceReader { @@ -108,6 +110,8 @@ public void open() { // @bobo (tdengine) connProps.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "false"); try { + // check td driver whether exist and if not, try to register + checkDriverExist(jdbcUrl); conn = DriverManager.getConnection(jdbcUrl, connProps); } catch (SQLException e) { throw new TDengineConnectorException( From 505c1252bd35ef4ec07ec31cdecb92b660000a00 Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Fri, 29 Mar 2024 10:44:34 +0800 Subject: [PATCH 42/59] [Improve][Connector-V2] Add ElasticSearch type converter (#6546) --- .../catalog/ElasticSearchCatalog.java | 11 +- .../ElasticSearchDataTypeConvertor.java | 89 ++--- .../catalog/ElasticSearchTypeConverter.java | 364 ++++++++++++++++++ .../elasticsearch/client/EsRestClient.java | 134 +++++-- .../elasticsearch/client/EsType.java | 76 ++++ .../elasticsearch/dto/IndexInfo.java | 4 +- .../serialize/ElasticsearchRowSerializer.java | 37 +- .../DefaultSeaTunnelRowDeserializer.java | 24 +- .../elasticsearch/sink/ElasticsearchSink.java | 4 +- .../sink/ElasticsearchSinkFactory.java | 17 +- .../sink/ElasticsearchSinkWriter.java | 10 +- .../source/ElasticsearchSource.java | 12 +- .../ElasticsearchRowSerializerTest.java | 8 +- .../elasticsearch/ElasticsearchIT.java | 47 ++- ...asticsearch_source_and_sink_full_type.conf | 97 +++++ .../st_index_full_type_data.json | 137 +++++++ .../st_index_full_type_mapping.json | 162 ++++++++ 17 files changed, 1104 insertions(+), 129 deletions(-) create mode 100644 seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/ElasticSearchTypeConverter.java create mode 100644 seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/client/EsType.java create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-elasticsearch-e2e/src/test/resources/elasticsearch/elasticsearch_source_and_sink_full_type.conf create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-elasticsearch-e2e/src/test/resources/elasticsearch/st_index_full_type_data.json create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-elasticsearch-e2e/src/test/resources/elasticsearch/st_index_full_type_mapping.json diff --git a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/ElasticSearchCatalog.java b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/ElasticSearchCatalog.java index 066a69c2dc3..b1eb60e2899 100644 --- a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/ElasticSearchCatalog.java +++ b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/ElasticSearchCatalog.java @@ -32,7 +32,9 @@ import org.apache.seatunnel.api.table.catalog.exception.DatabaseNotExistException; import org.apache.seatunnel.api.table.catalog.exception.TableAlreadyExistException; import org.apache.seatunnel.api.table.catalog.exception.TableNotExistException; +import org.apache.seatunnel.api.table.converter.BasicTypeDefine; import org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsRestClient; +import org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType; import org.apache.seatunnel.connectors.seatunnel.elasticsearch.dto.ElasticsearchClusterInfo; import org.apache.seatunnel.connectors.seatunnel.elasticsearch.dto.source.IndexDocsCount; @@ -146,10 +148,8 @@ public CatalogTable getTable(TablePath tablePath) throws CatalogException, TableNotExistException { // Get the index mapping? checkNotNull(tablePath, "tablePath cannot be null"); - ElasticSearchDataTypeConvertor elasticSearchDataTypeConvertor = - new ElasticSearchDataTypeConvertor(); TableSchema.Builder builder = TableSchema.builder(); - Map fieldTypeMapping = + Map> fieldTypeMapping = esRestClient.getFieldTypeMapping(tablePath.getTableName(), Collections.emptyList()); buildColumnsWithErrorCheck( tablePath, @@ -159,8 +159,9 @@ public CatalogTable getTable(TablePath tablePath) // todo: we need to add a new type TEXT or add length in STRING type return PhysicalColumn.of( nameAndType.getKey(), - elasticSearchDataTypeConvertor.toSeaTunnelType( - nameAndType.getKey(), nameAndType.getValue()), + ElasticSearchTypeConverter.INSTANCE + .convert(nameAndType.getValue()) + .getDataType(), (Long) null, true, null, diff --git a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/ElasticSearchDataTypeConvertor.java b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/ElasticSearchDataTypeConvertor.java index 0e081f83efd..7aecdfb9ea6 100644 --- a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/ElasticSearchDataTypeConvertor.java +++ b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/ElasticSearchDataTypeConvertor.java @@ -17,11 +17,12 @@ package org.apache.seatunnel.connectors.seatunnel.elasticsearch.catalog; +import org.apache.seatunnel.api.table.catalog.Column; import org.apache.seatunnel.api.table.catalog.DataTypeConvertor; -import org.apache.seatunnel.api.table.type.BasicType; -import org.apache.seatunnel.api.table.type.LocalTimeType; +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.converter.BasicTypeDefine; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; -import org.apache.seatunnel.api.table.type.SqlType; +import org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType; import com.google.auto.service.AutoService; @@ -29,22 +30,11 @@ import static com.google.common.base.Preconditions.checkNotNull; +/** @deprecated instead by {@link ElasticSearchTypeConverter} */ +@Deprecated @AutoService(DataTypeConvertor.class) public class ElasticSearchDataTypeConvertor implements DataTypeConvertor { - public static final String STRING = "string"; - public static final String KEYWORD = "keyword"; - public static final String TEXT = "text"; - public static final String BOOLEAN = "boolean"; - public static final String BYTE = "byte"; - public static final String SHORT = "short"; - public static final String INTEGER = "integer"; - public static final String LONG = "long"; - public static final String FLOAT = "float"; - public static final String HALF_FLOAT = "half_float"; - public static final String DOUBLE = "double"; - public static final String DATE = "date"; - @Override public SeaTunnelDataType toSeaTunnelType(String field, String connectorDataType) { return toSeaTunnelType(field, connectorDataType, null); @@ -54,34 +44,14 @@ public SeaTunnelDataType toSeaTunnelType(String field, String connectorDataTy public SeaTunnelDataType toSeaTunnelType( String field, String connectorDataType, Map dataTypeProperties) { checkNotNull(connectorDataType, "connectorDataType can not be null"); - switch (connectorDataType) { - case STRING: - return BasicType.STRING_TYPE; - case KEYWORD: - return BasicType.STRING_TYPE; - case TEXT: - return BasicType.STRING_TYPE; - case BOOLEAN: - return BasicType.BOOLEAN_TYPE; - case BYTE: - return BasicType.BYTE_TYPE; - case SHORT: - return BasicType.SHORT_TYPE; - case INTEGER: - return BasicType.INT_TYPE; - case LONG: - return BasicType.LONG_TYPE; - case FLOAT: - return BasicType.FLOAT_TYPE; - case HALF_FLOAT: - return BasicType.FLOAT_TYPE; - case DOUBLE: - return BasicType.DOUBLE_TYPE; - case DATE: - return LocalTimeType.LOCAL_DATE_TIME_TYPE; - default: - return BasicType.STRING_TYPE; - } + BasicTypeDefine typeDefine = + BasicTypeDefine.builder() + .name(field) + .columnType(connectorDataType) + .dataType(connectorDataType) + .build(); + + return ElasticSearchTypeConverter.INSTANCE.convert(typeDefine).getDataType(); } @Override @@ -90,29 +60,14 @@ public String toConnectorType( SeaTunnelDataType seaTunnelDataType, Map dataTypeProperties) { checkNotNull(seaTunnelDataType, "seaTunnelDataType can not be null"); - SqlType sqlType = seaTunnelDataType.getSqlType(); - switch (sqlType) { - case STRING: - return STRING; - case BOOLEAN: - return BOOLEAN; - case BYTES: - return BYTE; - case TINYINT: - return SHORT; - case INT: - return INTEGER; - case BIGINT: - return LONG; - case FLOAT: - return FLOAT; - case DOUBLE: - return DOUBLE; - case TIMESTAMP: - return DATE; - default: - return STRING; - } + Column column = + PhysicalColumn.builder() + .name(field) + .dataType(seaTunnelDataType) + .nullable(true) + .build(); + BasicTypeDefine typeDefine = ElasticSearchTypeConverter.INSTANCE.reconvert(column); + return typeDefine.getColumnType(); } @Override diff --git a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/ElasticSearchTypeConverter.java b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/ElasticSearchTypeConverter.java new file mode 100644 index 00000000000..c7e21d1385c --- /dev/null +++ b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/catalog/ElasticSearchTypeConverter.java @@ -0,0 +1,364 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.elasticsearch.catalog; + +import org.apache.seatunnel.api.table.catalog.Column; +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.converter.BasicTypeConverter; +import org.apache.seatunnel.api.table.converter.BasicTypeDefine; +import org.apache.seatunnel.api.table.converter.TypeConverter; +import org.apache.seatunnel.api.table.type.ArrayType; +import org.apache.seatunnel.api.table.type.BasicType; +import org.apache.seatunnel.api.table.type.DecimalType; +import org.apache.seatunnel.api.table.type.LocalTimeType; +import org.apache.seatunnel.api.table.type.MapType; +import org.apache.seatunnel.api.table.type.SeaTunnelDataType; +import org.apache.seatunnel.api.table.type.SeaTunnelRowType; +import org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType; + +import com.google.auto.service.AutoService; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.AGGREGATE_METRIC_DOUBLE; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.BINARY; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.BOOLEAN; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.BYTE; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.COMPLETION; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.DATE; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.DATE_NANOS; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.DATE_RANGE; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.DENSE_VECTOR; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.DOUBLE; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.DOUBLE_RANGE; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.FLATTENED; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.FLOAT; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.FLOAT_RANGE; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.GEO_POINT; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.GEO_SHAPE; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.HALF_FLOAT; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.HISTOGRAM; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.INTEGER; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.INTEGER_RANGE; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.IP; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.IP_RANGE; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.JOIN; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.KEYWORD; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.LONG; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.LONG_RANGE; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.MATCH_ONLY_TEXT; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.NESTED; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.OBJECT; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.PERCOLATOR; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.POINT; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.RANK_FEATURE; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.RANK_FEATURES; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.SEARCH_AS_YOU_TYPE; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.SHAPE; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.SHORT; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.SPARSE_VECTOR; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.STRING; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.TEXT; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.TOKEN_COUNT; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.UNSIGNED_LONG; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.VERSION; + +@AutoService(TypeConverter.class) +public class ElasticSearchTypeConverter implements BasicTypeConverter> { + public static final ElasticSearchTypeConverter INSTANCE = new ElasticSearchTypeConverter(); + + @Override + public String identifier() { + return "Elasticsearch"; + } + + @Override + public Column convert(BasicTypeDefine typeDefine) { + PhysicalColumn.PhysicalColumnBuilder builder = + PhysicalColumn.builder() + .name(typeDefine.getName()) + .sourceType(typeDefine.getColumnType()) + .nullable(typeDefine.isNullable()) + .defaultValue(typeDefine.getDefaultValue()) + .comment(typeDefine.getComment()); + String type = typeDefine.getDataType().toLowerCase(); + switch (type) { + case AGGREGATE_METRIC_DOUBLE: + List metrics = + (List) typeDefine.getNativeType().getOptions().get("metrics"); + builder.dataType( + new SeaTunnelRowType( + metrics.toArray(new String[0]), + metrics.stream() + .map(s -> BasicType.DOUBLE_TYPE) + .toArray(SeaTunnelDataType[]::new))); + break; + case DENSE_VECTOR: + String elementType = + typeDefine.getNativeType().getOptions().get("element_type").toString(); + if (elementType.equals("byte")) { + builder.dataType(ArrayType.BYTE_ARRAY_TYPE); + } else { + builder.dataType(ArrayType.FLOAT_ARRAY_TYPE); + } + break; + case BYTE: + builder.dataType(BasicType.BYTE_TYPE); + break; + case BOOLEAN: + builder.dataType(BasicType.BOOLEAN_TYPE); + break; + case DATE: + builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); + builder.scale(3); + break; + case DATE_NANOS: + builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); + builder.scale(9); + break; + case DOUBLE: + case RANK_FEATURE: + builder.dataType(BasicType.DOUBLE_TYPE); + break; + case FLOAT: + case HALF_FLOAT: + builder.dataType(BasicType.FLOAT_TYPE); + break; + case HISTOGRAM: + SeaTunnelRowType rowType = + new SeaTunnelRowType( + new String[] {"values", "counts"}, + new SeaTunnelDataType[] { + ArrayType.DOUBLE_ARRAY_TYPE, ArrayType.LONG_ARRAY_TYPE + }); + builder.dataType(rowType); + break; + case INTEGER: + case TOKEN_COUNT: + builder.dataType(BasicType.INT_TYPE); + break; + case LONG: + builder.dataType(BasicType.LONG_TYPE); + break; + case SHORT: + builder.dataType(BasicType.SHORT_TYPE); + break; + case OBJECT: + Map> typeInfo = + (Map) typeDefine.getNativeType().getOptions(); + SeaTunnelRowType object = + new SeaTunnelRowType( + typeInfo.keySet().toArray(new String[0]), + typeInfo.values().stream() + .map(this::convert) + .map(Column::getDataType) + .toArray(SeaTunnelDataType[]::new)); + builder.dataType(object); + break; + case INTEGER_RANGE: + builder.dataType(new MapType<>(BasicType.STRING_TYPE, BasicType.INT_TYPE)); + break; + case FLOAT_RANGE: + builder.dataType(new MapType<>(BasicType.STRING_TYPE, BasicType.FLOAT_TYPE)); + break; + case LONG_RANGE: + builder.dataType(new MapType<>(BasicType.STRING_TYPE, BasicType.LONG_TYPE)); + break; + case DOUBLE_RANGE: + builder.dataType(new MapType<>(BasicType.STRING_TYPE, BasicType.DOUBLE_TYPE)); + break; + case DATE_RANGE: + builder.dataType( + new MapType<>(BasicType.STRING_TYPE, LocalTimeType.LOCAL_DATE_TIME_TYPE)); + break; + case IP_RANGE: + builder.dataType(new MapType<>(BasicType.STRING_TYPE, BasicType.STRING_TYPE)); + break; + case UNSIGNED_LONG: + builder.dataType(new DecimalType(20, 0)); + builder.columnLength(20L); + builder.scale(0); + break; + case TEXT: + case BINARY: + case VERSION: + case IP: + case JOIN: + case KEYWORD: + case FLATTENED: + case GEO_POINT: + case COMPLETION: + case STRING: + case GEO_SHAPE: + case NESTED: + case PERCOLATOR: + case POINT: + case RANK_FEATURES: + case SEARCH_AS_YOU_TYPE: + case SPARSE_VECTOR: + case MATCH_ONLY_TEXT: + case SHAPE: + default: + builder.dataType(BasicType.STRING_TYPE); + break; + } + return builder.build(); + } + + @Override + public BasicTypeDefine reconvert(Column column) { + BasicTypeDefine.BasicTypeDefineBuilder builder = + BasicTypeDefine.builder() + .name(column.getName()) + .nullable(column.isNullable()) + .comment(column.getComment()) + .defaultValue(column.getDefaultValue()); + switch (column.getDataType().getSqlType()) { + case BOOLEAN: + builder.columnType(BOOLEAN); + builder.dataType(BOOLEAN); + builder.nativeType(new EsType(BOOLEAN, new HashMap<>())); + break; + case BYTES: + builder.columnType(BINARY); + builder.dataType(BINARY); + builder.nativeType(new EsType(BINARY, new HashMap<>())); + break; + case TINYINT: + builder.columnType(BYTE); + builder.dataType(BYTE); + builder.nativeType(new EsType(BYTE, new HashMap<>())); + break; + case SMALLINT: + builder.columnType(SHORT); + builder.dataType(SHORT); + builder.nativeType(new EsType(SHORT, new HashMap<>())); + break; + case INT: + builder.columnType(INTEGER); + builder.dataType(INTEGER); + builder.nativeType(new EsType(INTEGER, new HashMap<>())); + break; + case BIGINT: + builder.columnType(LONG); + builder.dataType(LONG); + builder.nativeType(new EsType(LONG, new HashMap<>())); + break; + case FLOAT: + builder.columnType(FLOAT); + builder.dataType(FLOAT); + builder.nativeType(new EsType(FLOAT, new HashMap<>())); + break; + case DOUBLE: + builder.columnType(DOUBLE); + builder.dataType(DOUBLE); + builder.nativeType(new EsType(DOUBLE, new HashMap<>())); + break; + case DATE: + case TIMESTAMP: + Map option = new HashMap<>(); + if (column.getScale() != null && column.getScale() > 3) { + option.put("format", "strict_date_optional_time||epoch_millis"); + builder.columnType(DATE_NANOS); + builder.dataType(DATE_NANOS); + builder.nativeType(new EsType(DATE_NANOS, option)); + } else { + option.put("format", "strict_date_optional_time_nanos||epoch_millis"); + builder.columnType(DATE); + builder.dataType(DATE); + builder.nativeType(new EsType(DATE, option)); + } + break; + case DECIMAL: + builder.columnType(TEXT); + builder.dataType(TEXT); + builder.nativeType(new EsType(TEXT, new HashMap<>())); + break; + case MAP: + builder.columnType(FLATTENED); + builder.dataType(FLATTENED); + builder.nativeType(new EsType(FLATTENED, new HashMap<>())); + break; + case ARRAY: + BasicType type = ((ArrayType) column.getDataType()).getElementType(); + if (type.equals(BasicType.BYTE_TYPE)) { + builder.columnType(BINARY); + builder.dataType(BINARY); + builder.nativeType(new EsType(BINARY, new HashMap<>())); + } else if (type.equals(BasicType.SHORT_TYPE)) { + builder.columnType(SHORT); + builder.dataType(SHORT); + builder.nativeType(new EsType(SHORT, new HashMap<>())); + } else if (type.equals(BasicType.INT_TYPE)) { + builder.columnType(INTEGER); + builder.dataType(INTEGER); + builder.nativeType(new EsType(INTEGER, new HashMap<>())); + } else if (type.equals(BasicType.LONG_TYPE)) { + builder.columnType(LONG); + builder.dataType(LONG); + builder.nativeType(new EsType(LONG, new HashMap<>())); + } else if (type.equals(BasicType.FLOAT_TYPE)) { + builder.columnType(FLOAT); + builder.dataType(FLOAT); + builder.nativeType(new EsType(FLOAT, new HashMap<>())); + } else if (type.equals(BasicType.DOUBLE_TYPE)) { + builder.columnType(DOUBLE); + builder.dataType(DOUBLE); + builder.nativeType(new EsType(DOUBLE, new HashMap<>())); + } else if (type.equals(BasicType.STRING_TYPE)) { + builder.columnType(TEXT); + builder.dataType(TEXT); + builder.nativeType(new EsType(TEXT, new HashMap<>())); + } else { + builder.columnType(TEXT); + builder.dataType(TEXT); + builder.nativeType(new EsType(TEXT, new HashMap<>())); + } + break; + case ROW: + builder.columnType(OBJECT); + builder.dataType(OBJECT); + SeaTunnelRowType row = (SeaTunnelRowType) column.getDataType(); + Map> typeInfo = new HashMap<>(); + for (int i = 0; i < row.getFieldNames().length; i++) { + typeInfo.put( + row.getFieldName(i), + reconvert( + PhysicalColumn.of( + row.getFieldName(i), + row.getFieldType(i), + (Long) null, + true, + null, + null))); + } + builder.nativeType(new EsType(OBJECT, (Map) typeInfo)); + break; + case TIME: + case NULL: + case STRING: + default: + builder.columnType(TEXT); + builder.dataType(TEXT); + builder.nativeType(new EsType(TEXT, new HashMap<>())); + } + return builder.build(); + } +} diff --git a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/client/EsRestClient.java b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/client/EsRestClient.java index 50c47d1334f..18c9b7c109b 100644 --- a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/client/EsRestClient.java +++ b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/client/EsRestClient.java @@ -19,10 +19,12 @@ import org.apache.seatunnel.shade.com.fasterxml.jackson.databind.JsonNode; import org.apache.seatunnel.shade.com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.seatunnel.shade.com.fasterxml.jackson.databind.node.ArrayNode; import org.apache.seatunnel.shade.com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.seatunnel.shade.com.fasterxml.jackson.databind.node.TextNode; import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.table.converter.BasicTypeDefine; import org.apache.seatunnel.common.utils.JsonUtils; import org.apache.seatunnel.connectors.seatunnel.elasticsearch.config.EsClusterConnectionConfig; import org.apache.seatunnel.connectors.seatunnel.elasticsearch.dto.BulkResponse; @@ -34,6 +36,7 @@ import org.apache.seatunnel.connectors.seatunnel.elasticsearch.util.SSLUtils; import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.http.HttpHost; import org.apache.http.HttpStatus; import org.apache.http.auth.AuthScope; @@ -65,6 +68,13 @@ import java.util.function.Function; import java.util.stream.Collectors; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.AGGREGATE_METRIC_DOUBLE; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.ALIAS; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.DATE; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.DATE_NANOS; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.DENSE_VECTOR; +import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType.OBJECT; + @Slf4j public class EsRestClient { @@ -174,7 +184,7 @@ private static RestClientBuilder getRestClientBuilder( keystorePassword, truststorePath, truststorePassword); - sslContext.ifPresent(e -> httpClientBuilder.setSSLContext(e)); + sslContext.ifPresent(httpClientBuilder::setSSLContext); } else { SSLContext sslContext = SSLContexts.custom() @@ -237,7 +247,7 @@ public ElasticsearchClusterInfo getClusterInfo() { .clusterVersion(versionNode.get("number").asText()) .distribution( Optional.ofNullable(versionNode.get("distribution")) - .map(e -> e.asText()) + .map(JsonNode::asText) .orElse(null)) .build(); } catch (IOException e) { @@ -276,9 +286,7 @@ public ScrollResult searchByScroll( param.put("sort", new String[] {"_doc"}); param.put("size", scrollSize); String endpoint = "/" + index + "/_search?scroll=" + scrollTime; - ScrollResult scrollResult = - getDocsFromScrollRequest(endpoint, JsonUtils.toJsonString(param)); - return scrollResult; + return getDocsFromScrollRequest(endpoint, JsonUtils.toJsonString(param)); } /** @@ -291,9 +299,7 @@ public ScrollResult searchWithScrollId(String scrollId, String scrollTime) { Map param = new HashMap<>(); param.put("scroll_id", scrollId); param.put("scroll", scrollTime); - ScrollResult scrollResult = - getDocsFromScrollRequest("/_search/scroll", JsonUtils.toJsonString(param)); - return scrollResult; + return getDocsFromScrollRequest("/_search/scroll", JsonUtils.toJsonString(param)); } private ScrollResult getDocsFromScrollRequest(String endpoint, String requestBody) { @@ -319,8 +325,7 @@ private ScrollResult getDocsFromScrollRequest(String endpoint, String requestBod "POST %s,total shards(%d)!= successful shards(%d)", endpoint, totalShards, successful)); - ScrollResult scrollResult = getDocsFromScrollResponse(responseJson); - return scrollResult; + return getDocsFromScrollResponse(responseJson); } else { throw new ElasticsearchConnectorException( ElasticsearchConnectorErrorCode.SCROLL_REQUEST_ERROR, @@ -345,13 +350,11 @@ private ScrollResult getDocsFromScrollResponse(ObjectNode responseJson) { List> docs = new ArrayList<>(hitsNode.size()); scrollResult.setDocs(docs); - Iterator iter = hitsNode.iterator(); - while (iter.hasNext()) { + for (JsonNode jsonNode : hitsNode) { Map doc = new HashMap<>(); - JsonNode hitNode = iter.next(); - doc.put("_index", hitNode.get("_index").textValue()); - doc.put("_id", hitNode.get("_id").textValue()); - JsonNode source = hitNode.get("_source"); + doc.put("_index", jsonNode.get("_index").textValue()); + doc.put("_id", jsonNode.get("_id").textValue()); + JsonNode source = jsonNode.get("_source"); for (Iterator> iterator = source.fields(); iterator.hasNext(); ) { Map.Entry entry = iterator.next(); @@ -379,9 +382,7 @@ public List getIndexDocsCount(String index) { } if (response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) { String entity = EntityUtils.toString(response.getEntity()); - List indexDocsCounts = - JsonUtils.toList(entity, IndexDocsCount.class); - return indexDocsCounts; + return JsonUtils.toList(entity, IndexDocsCount.class); } else { throw new ElasticsearchConnectorException( ElasticsearchConnectorErrorCode.GET_INDEX_DOCS_COUNT_FAILED, @@ -423,10 +424,16 @@ public List listIndex() { } } - // todo: We don't support set the index mapping now. public void createIndex(String indexName) { + createIndex(indexName, null); + } + + public void createIndex(String indexName, String mapping) { String endpoint = String.format("/%s", indexName); Request request = new Request("PUT", endpoint); + if (StringUtils.isNotEmpty(mapping)) { + request.setJsonEntity(mapping); + } try { Response response = restClient.performRequest(request); if (response == null) { @@ -479,10 +486,11 @@ public void dropIndex(String tableName) { * @param index index name * @return {key-> field name,value->es type} */ - public Map getFieldTypeMapping(String index, List source) { + public Map> getFieldTypeMapping( + String index, List source) { String endpoint = String.format("/%s/_mappings", index); Request request = new Request("GET", endpoint); - Map mapping = new HashMap<>(); + Map> mapping = new HashMap<>(); try { Response response = restClient.performRequest(request); if (response == null) { @@ -525,9 +533,9 @@ public Map getFieldTypeMapping(String index, List source return mapping; } - private static Map getFieldTypeMappingFromProperties( + private static Map> getFieldTypeMappingFromProperties( JsonNode properties, List source) { - Map allElasticSearchFieldTypeInfoMap = new HashMap<>(); + Map> allElasticSearchFieldTypeInfoMap = new HashMap<>(); properties .fields() .forEachRemaining( @@ -535,26 +543,96 @@ private static Map getFieldTypeMappingFromProperties( String fieldName = entry.getKey(); JsonNode fieldProperty = entry.getValue(); if (fieldProperty.has("type")) { - allElasticSearchFieldTypeInfoMap.put( - fieldName, fieldProperty.get("type").asText()); + String type = fieldProperty.get("type").asText(); + BasicTypeDefine.BasicTypeDefineBuilder typeDefine = + BasicTypeDefine.builder() + .name(fieldName) + .columnType(type) + .dataType(type); + if (type.equalsIgnoreCase(AGGREGATE_METRIC_DOUBLE)) { + ArrayNode metrics = ((ArrayNode) fieldProperty.get("metrics")); + List metricsList = new ArrayList<>(); + for (JsonNode node : metrics) { + metricsList.add(node.asText()); + } + Map options = new HashMap<>(); + options.put("metrics", metricsList); + typeDefine.nativeType(new EsType(type, options)); + } else if (type.equalsIgnoreCase(ALIAS)) { + String path = fieldProperty.get("path").asText(); + Map options = new HashMap<>(); + options.put("path", path); + typeDefine.nativeType(new EsType(type, options)); + } else if (type.equalsIgnoreCase(DENSE_VECTOR)) { + String elementType = + fieldProperty.get("element_type") == null + ? "float" + : fieldProperty.get("element_type").asText(); + Map options = new HashMap<>(); + options.put("element_type", elementType); + typeDefine.nativeType(new EsType(type, options)); + } else if (type.equalsIgnoreCase(DATE) + || type.equalsIgnoreCase(DATE_NANOS)) { + String format = + fieldProperty.get("format") != null + ? fieldProperty.get("format").asText() + : "strict_date_optional_time_nanos||epoch_millis"; + Map options = new HashMap<>(); + options.put("format", format); + typeDefine.nativeType(new EsType(type, options)); + } else { + typeDefine.nativeType(new EsType(type, new HashMap<>())); + } + allElasticSearchFieldTypeInfoMap.put(fieldName, typeDefine.build()); + } else if (fieldProperty.has("properties")) { + // it should be object type + JsonNode propertiesNode = fieldProperty.get("properties"); + List fields = new ArrayList<>(); + propertiesNode.fieldNames().forEachRemaining(fields::add); + Map> subFieldTypeInfoMap = + getFieldTypeMappingFromProperties(propertiesNode, fields); + BasicTypeDefine.BasicTypeDefineBuilder typeDefine = + BasicTypeDefine.builder() + .name(fieldName) + .columnType(OBJECT) + .dataType(OBJECT); + typeDefine.nativeType( + new EsType(OBJECT, (Map) subFieldTypeInfoMap)); + allElasticSearchFieldTypeInfoMap.put(fieldName, typeDefine.build()); } }); if (CollectionUtils.isEmpty(source)) { return allElasticSearchFieldTypeInfoMap; } + allElasticSearchFieldTypeInfoMap.forEach( + (fieldName, fieldType) -> { + if (fieldType.getDataType().equalsIgnoreCase(ALIAS)) { + BasicTypeDefine type = + allElasticSearchFieldTypeInfoMap.get( + fieldType.getNativeType().getOptions().get("path")); + if (type != null) { + allElasticSearchFieldTypeInfoMap.put(fieldName, type); + } + } + }); + return source.stream() .collect( Collectors.toMap( Function.identity(), fieldName -> { - String fieldType = + BasicTypeDefine fieldType = allElasticSearchFieldTypeInfoMap.get(fieldName); if (fieldType == null) { log.warn( "fail to get elasticsearch field {} mapping type,so give a default type text", fieldName); - return "text"; + return BasicTypeDefine.builder() + .name(fieldName) + .columnType("text") + .dataType("text") + .build(); } return fieldType; })); diff --git a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/client/EsType.java b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/client/EsType.java new file mode 100644 index 00000000000..921ed44e98e --- /dev/null +++ b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/client/EsType.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.elasticsearch.client; + +import lombok.AllArgsConstructor; +import lombok.Getter; + +import java.util.Map; + +@Getter +@AllArgsConstructor +public class EsType { + + public static final String AGGREGATE_METRIC_DOUBLE = "aggregate_metric_double"; + public static final String ALIAS = "alias"; + public static final String BINARY = "binary"; + public static final String BYTE = "byte"; + public static final String BOOLEAN = "boolean"; + public static final String COMPLETION = "completion"; + public static final String DATE = "date"; + public static final String DATE_NANOS = "date_nanos"; + public static final String DENSE_VECTOR = "dense_vector"; + public static final String DOUBLE = "double"; + public static final String FLATTENED = "flattened"; + public static final String FLOAT = "float"; + public static final String GEO_POINT = "geo_point"; + public static final String GEO_SHAPE = "geo_shape"; + public static final String POINT = "point"; + public static final String INTEGER_RANGE = "integer_range"; + public static final String FLOAT_RANGE = "float_range"; + public static final String LONG_RANGE = "long_range"; + public static final String DOUBLE_RANGE = "double_range"; + public static final String DATE_RANGE = "date_range"; + public static final String IP_RANGE = "ip_range"; + public static final String HALF_FLOAT = "half_float"; + public static final String SCALED_FLOAT = "scaled_float"; + public static final String HISTOGRAM = "histogram"; + public static final String INTEGER = "integer"; + public static final String IP = "ip"; + public static final String JOIN = "join"; + public static final String KEYWORD = "keyword"; + public static final String LONG = "long"; + public static final String NESTED = "nested"; + public static final String OBJECT = "object"; + public static final String PERCOLATOR = "percolator"; + public static final String RANK_FEATURE = "rank_feature"; + public static final String RANK_FEATURES = "rank_features"; + public static final String SEARCH_AS_YOU_TYPE = "search_as_you_type"; + public static final String SHORT = "short"; + public static final String SHAPE = "shape"; + public static final String STRING = "string"; + public static final String SPARSE_VECTOR = "sparse_vector"; + public static final String TEXT = "text"; + public static final String MATCH_ONLY_TEXT = "match_only_text"; + public static final String TOKEN_COUNT = "token_count"; + public static final String UNSIGNED_LONG = "unsigned_long"; + public static final String VERSION = "version"; + + private String type; + private Map options; +} diff --git a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/dto/IndexInfo.java b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/dto/IndexInfo.java index cb10ed58c0e..67226341b5d 100644 --- a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/dto/IndexInfo.java +++ b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/dto/IndexInfo.java @@ -31,8 +31,8 @@ public class IndexInfo { private String[] primaryKeys; private String keyDelimiter; - public IndexInfo(ReadonlyConfig config) { - index = config.get(SinkConfig.INDEX); + public IndexInfo(String index, ReadonlyConfig config) { + this.index = index; type = config.get(SinkConfig.INDEX_TYPE); if (config.getOptional(SinkConfig.PRIMARY_KEYS).isPresent()) { primaryKeys = config.get(SinkConfig.PRIMARY_KEYS).toArray(new String[0]); diff --git a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/serialize/ElasticsearchRowSerializer.java b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/serialize/ElasticsearchRowSerializer.java index 584c373ae74..2f7eb86b91e 100644 --- a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/serialize/ElasticsearchRowSerializer.java +++ b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/serialize/ElasticsearchRowSerializer.java @@ -36,6 +36,7 @@ import java.time.temporal.Temporal; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.function.Function; @@ -82,7 +83,7 @@ public String serializeRow(SeaTunnelRow row) { private String serializeUpsert(SeaTunnelRow row) { String key = keyExtractor.apply(row); - Map document = toDocumentMap(row); + Map document = toDocumentMap(row, seaTunnelRowType); String documentStr; try { @@ -161,22 +162,44 @@ private String serializeDelete(SeaTunnelRow row) { .toString(); } - private Map toDocumentMap(SeaTunnelRow row) { - String[] fieldNames = seaTunnelRowType.getFieldNames(); + private Map toDocumentMap(SeaTunnelRow row, SeaTunnelRowType rowType) { + String[] fieldNames = rowType.getFieldNames(); Map doc = new HashMap<>(fieldNames.length); Object[] fields = row.getFields(); for (int i = 0; i < fieldNames.length; i++) { Object value = fields[i]; - if (value instanceof Temporal) { - // jackson not support jdk8 new time api - doc.put(fieldNames[i], value.toString()); + if (value == null) { + } else if (value instanceof SeaTunnelRow) { + doc.put( + fieldNames[i], + toDocumentMap( + (SeaTunnelRow) value, (SeaTunnelRowType) rowType.getFieldType(i))); } else { - doc.put(fieldNames[i], value); + doc.put(fieldNames[i], convertValue(value)); } } return doc; } + private Object convertValue(Object value) { + if (value instanceof Temporal) { + // jackson not support jdk8 new time api + return value.toString(); + } else if (value instanceof Map) { + for (Map.Entry entry : ((Map) value).entrySet()) { + ((Map) value).put(entry.getKey(), convertValue(entry.getValue())); + } + return value; + } else if (value instanceof List) { + for (int i = 0; i < ((List) value).size(); i++) { + ((List) value).set(i, convertValue(((List) value).get(i))); + } + return value; + } else { + return value; + } + } + private Map createMetadata(@NonNull SeaTunnelRow row, @NonNull String key) { Map actionMetadata = createMetadata(row); actionMetadata.put("_id", key); diff --git a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/serialize/source/DefaultSeaTunnelRowDeserializer.java b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/serialize/source/DefaultSeaTunnelRowDeserializer.java index 2d23b7dc51e..f3acd191efa 100644 --- a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/serialize/source/DefaultSeaTunnelRowDeserializer.java +++ b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/serialize/source/DefaultSeaTunnelRowDeserializer.java @@ -91,6 +91,9 @@ public class DefaultSeaTunnelRowDeserializer implements SeaTunnelRowDeserializer put( "yyyy-MM-dd HH:mm:ss.SSSSSS".length(), DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSSSSS")); + put( + "yyyy-MM-dd HH:mm:ss.SSSSSSSSS".length(), + DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSSSSSSSS")); } }; @@ -185,6 +188,25 @@ Object convertValue(SeaTunnelDataType fieldType, String fieldValue) convertMap.put(convertKey, convertValue); } return convertMap; + } else if (fieldType instanceof SeaTunnelRowType) { + SeaTunnelRowType rowType = (SeaTunnelRowType) fieldType; + Map collect = + mapper.readValue(fieldValue, new TypeReference>() {}); + Object[] seaTunnelFields = new Object[rowType.getTotalFields()]; + for (int i = 0; i < rowType.getTotalFields(); i++) { + String fieldName = rowType.getFieldName(i); + SeaTunnelDataType fieldDataType = rowType.getFieldType(i); + Object value = collect.get(fieldName); + if (value != null) { + seaTunnelFields[i] = + convertValue( + fieldDataType, + (value instanceof List || value instanceof Map) + ? mapper.writeValueAsString(value) + : value.toString()); + } + } + return new SeaTunnelRow(seaTunnelFields); } else if (fieldType instanceof PrimitiveByteArrayType) { return Base64.getDecoder().decode(fieldValue); } else if (VOID_TYPE.equals(fieldType) || fieldType == null) { @@ -204,7 +226,7 @@ private LocalDateTime parseDate(String fieldValue) { } catch (NumberFormatException e) { // no op } - String formatDate = fieldValue.replace("T", " "); + String formatDate = fieldValue.replace("T", " ").replace("Z", ""); if (fieldValue.length() == "yyyyMMdd".length() || fieldValue.length() == "yyyy-MM-dd".length()) { formatDate = fieldValue + " 00:00:00"; diff --git a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/sink/ElasticsearchSink.java b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/sink/ElasticsearchSink.java index 3d160adc070..d2ca6045eb5 100644 --- a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/sink/ElasticsearchSink.java +++ b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/sink/ElasticsearchSink.java @@ -72,7 +72,7 @@ public String getPluginName() { public SinkWriter createWriter( SinkWriter.Context context) { return new ElasticsearchSinkWriter( - context, catalogTable.getSeaTunnelRowType(), config, maxBatchSize, maxRetryCount); + context, catalogTable, config, maxBatchSize, maxRetryCount); } @Override @@ -89,7 +89,7 @@ public Optional getSaveModeHandler() { SchemaSaveMode schemaSaveMode = config.get(SinkConfig.SCHEMA_SAVE_MODE); DataSaveMode dataSaveMode = config.get(SinkConfig.DATA_SAVE_MODE); - TablePath tablePath = TablePath.of("", config.get(SinkConfig.INDEX)); + TablePath tablePath = TablePath.of("", catalogTable.getTableId().getTableName()); catalog.open(); return Optional.of( new DefaultSaveModeHandler( diff --git a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/sink/ElasticsearchSinkFactory.java b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/sink/ElasticsearchSinkFactory.java index 97548e3fdbd..ad2c01e47e7 100644 --- a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/sink/ElasticsearchSinkFactory.java +++ b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/sink/ElasticsearchSinkFactory.java @@ -18,6 +18,8 @@ package org.apache.seatunnel.connectors.seatunnel.elasticsearch.sink; import org.apache.seatunnel.api.configuration.util.OptionRule; +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.TableIdentifier; import org.apache.seatunnel.api.table.connector.TableSink; import org.apache.seatunnel.api.table.factory.Factory; import org.apache.seatunnel.api.table.factory.TableSinkFactory; @@ -26,6 +28,7 @@ import com.google.auto.service.AutoService; +import static org.apache.seatunnel.api.sink.SinkReplaceNameConstant.REPLACE_TABLE_NAME_KEY; import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.config.EsClusterConnectionConfig.HOSTS; import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.config.EsClusterConnectionConfig.PASSWORD; import static org.apache.seatunnel.connectors.seatunnel.elasticsearch.config.EsClusterConnectionConfig.TLS_KEY_STORE_PASSWORD; @@ -72,6 +75,18 @@ public OptionRule optionRule() { @Override public TableSink createSink(TableSinkFactoryContext context) { - return () -> new ElasticsearchSink(context.getOptions(), context.getCatalogTable()); + String original = context.getOptions().get(INDEX); + original = + original.replace( + REPLACE_TABLE_NAME_KEY, + context.getCatalogTable().getTableId().getTableName()); + CatalogTable newTable = + CatalogTable.of( + TableIdentifier.of( + context.getCatalogTable().getCatalogName(), + context.getCatalogTable().getTablePath().getDatabaseName(), + original), + context.getCatalogTable()); + return () -> new ElasticsearchSink(context.getOptions(), newTable); } } diff --git a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/sink/ElasticsearchSinkWriter.java b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/sink/ElasticsearchSinkWriter.java index 35ed49d498b..6edac760c1e 100644 --- a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/sink/ElasticsearchSinkWriter.java +++ b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/sink/ElasticsearchSinkWriter.java @@ -19,9 +19,9 @@ import org.apache.seatunnel.api.configuration.ReadonlyConfig; import org.apache.seatunnel.api.sink.SinkWriter; +import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.RowKind; import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.common.exception.CommonErrorCodeDeprecated; import org.apache.seatunnel.common.utils.RetryUtils; import org.apache.seatunnel.common.utils.RetryUtils.RetryMaterial; @@ -61,18 +61,20 @@ public class ElasticsearchSinkWriter public ElasticsearchSinkWriter( SinkWriter.Context context, - SeaTunnelRowType seaTunnelRowType, + CatalogTable catalogTable, ReadonlyConfig config, int maxBatchSize, int maxRetryCount) { this.context = context; this.maxBatchSize = maxBatchSize; - IndexInfo indexInfo = new IndexInfo(config); + IndexInfo indexInfo = new IndexInfo(catalogTable.getTableId().getTableName(), config); esRestClient = EsRestClient.createInstance(config); this.seaTunnelRowSerializer = new ElasticsearchRowSerializer( - esRestClient.getClusterInfo(), indexInfo, seaTunnelRowType); + esRestClient.getClusterInfo(), + indexInfo, + catalogTable.getSeaTunnelRowType()); this.requestEsList = new ArrayList<>(maxBatchSize); this.retryMaterial = diff --git a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/source/ElasticsearchSource.java b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/source/ElasticsearchSource.java index e99e66e420f..9909c9bba97 100644 --- a/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/source/ElasticsearchSource.java +++ b/seatunnel-connectors-v2/connector-elasticsearch/src/main/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/source/ElasticsearchSource.java @@ -30,10 +30,12 @@ import org.apache.seatunnel.api.table.catalog.TableIdentifier; import org.apache.seatunnel.api.table.catalog.TableSchema; import org.apache.seatunnel.api.table.catalog.schema.TableSchemaOptions; +import org.apache.seatunnel.api.table.converter.BasicTypeDefine; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.api.table.type.SeaTunnelRow; -import org.apache.seatunnel.connectors.seatunnel.elasticsearch.catalog.ElasticSearchDataTypeConvertor; +import org.apache.seatunnel.connectors.seatunnel.elasticsearch.catalog.ElasticSearchTypeConverter; import org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsRestClient; +import org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsType; import org.apache.seatunnel.connectors.seatunnel.elasticsearch.config.SourceConfig; import java.util.Arrays; @@ -62,16 +64,14 @@ public ElasticsearchSource(ReadonlyConfig config) { } else { source = config.get(SourceConfig.SOURCE); EsRestClient esRestClient = EsRestClient.createInstance(config); - Map esFieldType = + Map> esFieldType = esRestClient.getFieldTypeMapping(config.get(SourceConfig.INDEX), source); esRestClient.close(); SeaTunnelDataType[] fieldTypes = new SeaTunnelDataType[source.size()]; - ElasticSearchDataTypeConvertor elasticSearchDataTypeConvertor = - new ElasticSearchDataTypeConvertor(); for (int i = 0; i < source.size(); i++) { - String esType = esFieldType.get(source.get(i)); + BasicTypeDefine esType = esFieldType.get(source.get(i)); SeaTunnelDataType seaTunnelDataType = - elasticSearchDataTypeConvertor.toSeaTunnelType(source.get(i), esType); + ElasticSearchTypeConverter.INSTANCE.convert(esType).getDataType(); fieldTypes[i] = seaTunnelDataType; } TableSchema.Builder builder = TableSchema.builder(); diff --git a/seatunnel-connectors-v2/connector-elasticsearch/src/test/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/serialize/ElasticsearchRowSerializerTest.java b/seatunnel-connectors-v2/connector-elasticsearch/src/test/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/serialize/ElasticsearchRowSerializerTest.java index 6efa5bba4c9..5a269e07378 100644 --- a/seatunnel-connectors-v2/connector-elasticsearch/src/test/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/serialize/ElasticsearchRowSerializerTest.java +++ b/seatunnel-connectors-v2/connector-elasticsearch/src/test/java/org/apache/seatunnel/connectors/seatunnel/elasticsearch/serialize/ElasticsearchRowSerializerTest.java @@ -49,7 +49,7 @@ public void testSerializeUpsert() { ReadonlyConfig pluginConf = ReadonlyConfig.fromMap(confMap); ElasticsearchClusterInfo clusterInfo = ElasticsearchClusterInfo.builder().clusterVersion("8.0.0").build(); - IndexInfo indexInfo = new IndexInfo(pluginConf); + IndexInfo indexInfo = new IndexInfo(index, pluginConf); SeaTunnelRowType schema = new SeaTunnelRowType( new String[] {primaryKey, "name"}, @@ -88,7 +88,7 @@ public void testSerializeUpsertWithoutKey() { ReadonlyConfig pluginConf = ReadonlyConfig.fromMap(confMap); ElasticsearchClusterInfo clusterInfo = ElasticsearchClusterInfo.builder().clusterVersion("8.0.0").build(); - IndexInfo indexInfo = new IndexInfo(pluginConf); + IndexInfo indexInfo = new IndexInfo(index, pluginConf); SeaTunnelRowType schema = new SeaTunnelRowType( new String[] {"id", "name"}, @@ -127,7 +127,7 @@ public void testSerializeUpsertDocumentError() { ReadonlyConfig pluginConf = ReadonlyConfig.fromMap(confMap); ElasticsearchClusterInfo clusterInfo = ElasticsearchClusterInfo.builder().clusterVersion("8.0.0").build(); - IndexInfo indexInfo = new IndexInfo(pluginConf); + IndexInfo indexInfo = new IndexInfo(index, pluginConf); SeaTunnelRowType schema = new SeaTunnelRowType( new String[] {primaryKey, "name"}, @@ -165,7 +165,7 @@ public void testSerializeDelete() { ReadonlyConfig pluginConf = ReadonlyConfig.fromMap(confMap); ElasticsearchClusterInfo clusterInfo = ElasticsearchClusterInfo.builder().clusterVersion("8.0.0").build(); - IndexInfo indexInfo = new IndexInfo(pluginConf); + IndexInfo indexInfo = new IndexInfo(index, pluginConf); SeaTunnelRowType schema = new SeaTunnelRowType( new String[] {primaryKey, "name"}, diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-elasticsearch-e2e/src/test/java/org/apache/seatunnel/e2e/connector/elasticsearch/ElasticsearchIT.java b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-elasticsearch-e2e/src/test/java/org/apache/seatunnel/e2e/connector/elasticsearch/ElasticsearchIT.java index ddd106451f2..0a8d51c7ab3 100644 --- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-elasticsearch-e2e/src/test/java/org/apache/seatunnel/e2e/connector/elasticsearch/ElasticsearchIT.java +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-elasticsearch-e2e/src/test/java/org/apache/seatunnel/e2e/connector/elasticsearch/ElasticsearchIT.java @@ -26,10 +26,14 @@ import org.apache.seatunnel.common.utils.JsonUtils; import org.apache.seatunnel.connectors.seatunnel.elasticsearch.catalog.ElasticSearchCatalog; import org.apache.seatunnel.connectors.seatunnel.elasticsearch.client.EsRestClient; +import org.apache.seatunnel.connectors.seatunnel.elasticsearch.dto.BulkResponse; import org.apache.seatunnel.connectors.seatunnel.elasticsearch.dto.source.ScrollResult; import org.apache.seatunnel.e2e.common.TestResource; import org.apache.seatunnel.e2e.common.TestSuiteBase; import org.apache.seatunnel.e2e.common.container.TestContainer; +import org.apache.seatunnel.e2e.common.util.ContainerUtil; + +import org.apache.commons.io.IOUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; @@ -48,6 +52,7 @@ import java.io.IOException; import java.math.BigDecimal; +import java.nio.charset.StandardCharsets; import java.time.Duration; import java.time.LocalDate; import java.time.LocalDateTime; @@ -78,7 +83,7 @@ public class ElasticsearchIT extends TestSuiteBase implements TestResource { public void startUp() throws Exception { container = new ElasticsearchContainer( - DockerImageName.parse("elasticsearch:8.0.0") + DockerImageName.parse("elasticsearch:8.9.0") .asCompatibleSubstituteFor( "docker.elastic.co/elasticsearch/elasticsearch")) .withNetwork(NETWORK) @@ -89,7 +94,7 @@ public void startUp() throws Exception { .withStartupTimeout(Duration.ofMinutes(5)) .withLogConsumer( new Slf4jLogConsumer( - DockerLoggerFactory.getLogger("elasticsearch:8.0.0"))); + DockerLoggerFactory.getLogger("elasticsearch:8.9.0"))); Startables.deepStart(Stream.of(container)).join(); log.info("Elasticsearch container started"); esRestClient = @@ -105,6 +110,7 @@ public void startUp() throws Exception { Optional.empty()); testDataset = generateTestDataSet(); createIndexDocs(); + createIndexWithFullType(); } /** create a index,and bulk some documents */ @@ -125,6 +131,31 @@ private void createIndexDocs() { esRestClient.bulk(requestBody.toString()); } + private void createIndexWithFullType() throws IOException, InterruptedException { + String mapping = + IOUtils.toString( + ContainerUtil.getResourcesFile( + "/elasticsearch/st_index_full_type_mapping.json") + .toURI(), + StandardCharsets.UTF_8); + esRestClient.createIndex("st_index_full_type", mapping); + BulkResponse response = + esRestClient.bulk( + "{ \"index\" : { \"_index\" : \"st_index_full_type\", \"_id\" : \"1\" } }\n" + + IOUtils.toString( + ContainerUtil.getResourcesFile( + "/elasticsearch/st_index_full_type_data.json") + .toURI(), + StandardCharsets.UTF_8) + .replace("\n", "") + + "\n"); + Assertions.assertFalse(response.isErrors(), response.getResponse()); + // waiting index refresh + Thread.sleep(2000L); + Assertions.assertEquals( + 2, esRestClient.getIndexDocsCount("st_index_full_type").get(0).getDocsCount()); + } + @TestTemplate public void testElasticsearch(TestContainer container) throws IOException, InterruptedException { @@ -136,6 +167,18 @@ public void testElasticsearch(TestContainer container) Assertions.assertIterableEquals(mapTestDatasetForDSL(), sinkData); } + @TestTemplate + public void testElasticsearchWithFullType(TestContainer container) + throws IOException, InterruptedException { + Container.ExecResult execResult = + container.executeJob("/elasticsearch/elasticsearch_source_and_sink_full_type.conf"); + Assertions.assertEquals(0, execResult.getExitCode()); + Thread.sleep(2000L); + Assertions.assertEquals( + 1, + esRestClient.getIndexDocsCount("st_index_full_type_target").get(0).getDocsCount()); + } + private List generateTestDataSet() throws JsonProcessingException { String[] fields = new String[] { diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-elasticsearch-e2e/src/test/resources/elasticsearch/elasticsearch_source_and_sink_full_type.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-elasticsearch-e2e/src/test/resources/elasticsearch/elasticsearch_source_and_sink_full_type.conf new file mode 100644 index 00000000000..4c2ca0fae04 --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-elasticsearch-e2e/src/test/resources/elasticsearch/elasticsearch_source_and_sink_full_type.conf @@ -0,0 +1,97 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +###### +###### This config file is a demonstration of streaming processing in seatunnel config +###### + +env { + parallelism = 1 + job.mode = "BATCH" + #checkpoint.interval = 10000 +} + +source { + Elasticsearch { + hosts = ["https://elasticsearch:9200"] + username = "elastic" + password = "elasticsearch" + tls_verify_certificate = false + tls_verify_hostname = false + index = "st_index_full_type" + source = [ + "aggregate_metric_double", + "alias", + "binary", + "byte", + "boolean", + "completion", + "date", + "date_nanos", + "dense_vector", + "double", + "flattened", + "float", + "geo_point", + "geo_shape", + "point", + "integer_range", + "float_range", + "long_range", + "double_range", + "date_range", + "ip_range", + "half_float", + "scaled_float", + "histogram", + "integer", + "ip", + "join", + "keyword", + "long", + "nested", + "object", + "percolator", + "rank_feature", + "rank_features", + "shape", + "search_as_you_type", + "short", + "text", + "match_only_text", + "name", + "unsigned_long", + "version" + ] + } +} + +transform { +} + +sink { + Elasticsearch { + hosts = ["https://elasticsearch:9200"] + username = "elastic" + password = "elasticsearch" + tls_verify_certificate = false + tls_verify_hostname = false + index = "st_index_full_type_target" + "schema_save_mode"="CREATE_SCHEMA_WHEN_NOT_EXIST" + "data_save_mode"="APPEND_DATA" + } +} \ No newline at end of file diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-elasticsearch-e2e/src/test/resources/elasticsearch/st_index_full_type_data.json b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-elasticsearch-e2e/src/test/resources/elasticsearch/st_index_full_type_data.json new file mode 100644 index 00000000000..ace28fbacbd --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-elasticsearch-e2e/src/test/resources/elasticsearch/st_index_full_type_data.json @@ -0,0 +1,137 @@ +{ + "aggregate_metric_double": { + "min": 10, + "max": 100, + "sum": 1000, + "value_count": 5 + }, + "binary": "binary_data", + "byte": 127, + "boolean": true, + "completion": { + "input": [ + "search term", + "another term" + ] + }, + "date": "2024-03-19", + "date_nanos": "2024-03-19T12:30:45.123456789Z", + "dense_vector": [ + 1.0, + 2.0, + 3.0 + ], + "double": 3.14159, + "flattened": { + "nested_field1": "value1", + "nested_field2": "value2" + }, + "float": 3.14, + "geo_point": { + "lat": 40.7128, + "lon": -74.0060 + }, + "geo_shape": { + "type": "point", + "coordinates": [ + 100.0, + 0.0 + ] + }, + "point": { + "type": "Point", + "coordinates": [ + 100.0, + 0.0 + ] + }, + "integer_range": { + "gte": 10, + "lte": 20 + }, + "float_range": { + "gte": 1.0, + "lte": 5.0 + }, + "long_range": { + "gte": 100, + "lte": 200 + }, + "double_range": { + "gte": 1.0, + "lte": 10.0 + }, + "date_range": { + "gte": "2024-01-01", + "lte": "2024-03-31" + }, + "ip_range": { + "gte": "192.0.2.0", + "lte": "192.0.2.255" + }, + "half_float": 3.14, + "scaled_float": 1.23, + "histogram": { + "values": [ + 0.1, + 0.2, + 0.3, + 0.4, + 0.5 + ], + "counts": [ + 3, + 7, + 23, + 12, + 6 + ] + }, + "integer": 42, + "ip": "192.0.2.1", + "join": { + "name": "question" + }, + "keyword": "keyword_value", + "long": 1234567890, + "nested": { + "nested_field1": "value1", + "nested_field2": "value2" + }, + "object": { + "age": 30, + "name": { + "first": "John", + "last": "Doe" + } + }, + "percolator": { + "match": { + "keyword": "keyword_value" + } + }, + "rank_feature": 5.0, + "rank_features": { + "feature1": 10.0, + "feature2": 20.0 + }, + "shape": "POINT (-377.03653 389.897676)", + "search_as_you_type": "searchable text", + "short": 32767, + "sparse_vector": { + "index": [ + 0, + 2, + 4 + ], + "values": [ + 1.0, + 2.0, + 3.0 + ] + }, + "text": "full text", + "match_only_text": "match only text", + "name": "John Doe", + "version": "1.0" +} \ No newline at end of file diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-elasticsearch-e2e/src/test/resources/elasticsearch/st_index_full_type_mapping.json b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-elasticsearch-e2e/src/test/resources/elasticsearch/st_index_full_type_mapping.json new file mode 100644 index 00000000000..bf7df0d279d --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-elasticsearch-e2e/src/test/resources/elasticsearch/st_index_full_type_mapping.json @@ -0,0 +1,162 @@ +{ + "mappings": { + "properties": { + "aggregate_metric_double": { + "type": "aggregate_metric_double", + "metrics": [ + "min", + "max", + "sum", + "value_count" + ], + "default_metric": "max" + }, + "alias": { + "type": "alias", + "path": "aggregate_metric_double" + }, + "binary": { + "type": "binary" + }, + "byte": { + "type": "byte" + }, + "boolean": { + "type": "boolean" + }, + "completion": { + "type": "completion" + }, + "date": { + "type": "date" + }, + "date_nanos": { + "type": "date_nanos" + }, + "dense_vector": { + "type": "dense_vector", + "dims": 3 + }, + "double": { + "type": "double" + }, + "flattened": { + "type": "flattened" + }, + "float": { + "type": "float" + }, + "geo_point": { + "type": "geo_point" + }, + "geo_shape": { + "type": "geo_shape" + }, + "point": { + "type": "point" + }, + "integer_range": { + "type": "integer_range" + }, + "float_range": { + "type": "float_range" + }, + "long_range": { + "type": "long_range" + }, + "double_range": { + "type": "double_range" + }, + "date_range": { + "type": "date_range" + }, + "ip_range": { + "type": "ip_range" + }, + "half_float": { + "type": "half_float" + }, + "scaled_float": { + "type": "scaled_float", + "scaling_factor": 100 + }, + "histogram": { + "type": "histogram" + }, + "integer": { + "type": "integer" + }, + "ip": { + "type": "ip" + }, + "join": { + "type": "join", + "relations": { + "question": "answer" + } + }, + "keyword": { + "type": "keyword" + }, + "long": { + "type": "long" + }, + "nested": { + "type": "nested" + }, + "object": { + "properties": { + "age": { + "type": "integer" + }, + "name": { + "properties": { + "first": { + "type": "text" + }, + "last": { + "type": "text" + } + } + } + } + }, + "percolator": { + "type": "percolator" + }, + "rank_feature": { + "type": "rank_feature" + }, + "rank_features": { + "type": "rank_features" + }, + "shape": { + "type": "shape" + }, + "search_as_you_type": { + "type": "search_as_you_type" + }, + "short": { + "type": "short" + }, + "text": { + "type": "text" + }, + "match_only_text": { + "type": "text" + }, + "name": { + "type": "text", + "fields": { + "length": { + "type": "token_count", + "analyzer": "standard" + } + } + }, + "version": { + "type": "version" + } + } + } +} \ No newline at end of file From 5795b265ccbb66466220da3e8fd251cf551a6ebb Mon Sep 17 00:00:00 2001 From: Jarvis Date: Fri, 29 Mar 2024 11:28:30 +0800 Subject: [PATCH 43/59] [Fix][StarRocks] Fix NPE when upstream catalogtable table path only have table name part (#6540) --- .../starrocks/sink/StarRocksSinkFactory.java | 10 +- .../e2e/connector/starrocks/StarRocksIT.java | 34 +++++- .../src/test/resources/fake-to-starrocks.conf | 106 ++++++++++++++++++ 3 files changed, 145 insertions(+), 5 deletions(-) create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/resources/fake-to-starrocks.conf diff --git a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/sink/StarRocksSinkFactory.java b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/sink/StarRocksSinkFactory.java index 08fc6906981..f05f912b6f6 100644 --- a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/sink/StarRocksSinkFactory.java +++ b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/sink/StarRocksSinkFactory.java @@ -89,13 +89,15 @@ public TableSink createSink(TableSinkFactoryContext context) { String sinkDatabaseName = sinkConfig.getDatabase(); String sinkTableName = sinkConfig.getTable(); // to replace - String finalDatabaseName = - sinkDatabaseName.replace(REPLACE_DATABASE_NAME_KEY, sourceDatabaseName); + sinkDatabaseName = + sinkDatabaseName.replace( + REPLACE_DATABASE_NAME_KEY, + sourceDatabaseName != null ? sourceDatabaseName : ""); String finalTableName = this.replaceFullTableName(sinkTableName, tableId); // rebuild TableIdentifier and catalogTable TableIdentifier newTableId = TableIdentifier.of( - tableId.getCatalogName(), finalDatabaseName, null, finalTableName); + tableId.getCatalogName(), sinkDatabaseName, null, finalTableName); catalogTable = CatalogTable.of( newTableId, @@ -107,7 +109,7 @@ public TableSink createSink(TableSinkFactoryContext context) { CatalogTable finalCatalogTable = catalogTable; // reset sinkConfig.setTable(finalTableName); - sinkConfig.setDatabase(finalDatabaseName); + sinkConfig.setDatabase(sinkDatabaseName); return () -> new StarRocksSink(sinkConfig, finalCatalogTable, context.getOptions()); } diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/java/org/apache/seatunnel/e2e/connector/starrocks/StarRocksIT.java b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/java/org/apache/seatunnel/e2e/connector/starrocks/StarRocksIT.java index ff8a934cb4a..783b0416ba7 100644 --- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/java/org/apache/seatunnel/e2e/connector/starrocks/StarRocksIT.java +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/java/org/apache/seatunnel/e2e/connector/starrocks/StarRocksIT.java @@ -110,6 +110,31 @@ public class StarRocksIT extends TestSuiteBase implements TestResource { + "\"storage_format\" = \"DEFAULT\"" + ")"; + private static final String DDL_FAKE_SINK_TABLE = + "create table " + + DATABASE + + "." + + "fake_table_sink" + + " (\n" + + " id BIGINT,\n" + + " c_string STRING,\n" + + " c_boolean BOOLEAN,\n" + + " c_tinyint TINYINT,\n" + + " c_int INT,\n" + + " c_bigint BIGINT,\n" + + " c_float FLOAT,\n" + + " c_double DOUBLE,\n" + + " c_decimal Decimal(2, 1),\n" + + " c_date DATE\n" + + ")ENGINE=OLAP\n" + + "DUPLICATE KEY(`id`)\n" + + "DISTRIBUTED BY HASH(`id`) BUCKETS 1\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\"," + + "\"storage_format\" = \"DEFAULT\"" + + ")"; + private static final String INIT_DATA_SQL = "insert into " + DATABASE @@ -253,6 +278,13 @@ public void testStarRocksSink(TestContainer container) } } + @TestTemplate + public void testSinkWithCatalogTableNameOnly(TestContainer container) + throws IOException, InterruptedException { + Container.ExecResult execResult = container.executeJob("/fake-to-starrocks.conf"); + Assertions.assertEquals(0, execResult.getExitCode(), execResult.getStderr()); + } + private void initializeJdbcConnection() throws SQLException, ClassNotFoundException, MalformedURLException, InstantiationException, IllegalAccessException { @@ -274,7 +306,7 @@ private void initializeJdbcTable() { // create source table statement.execute(DDL_SOURCE); // create sink table - // statement.execute(DDL_SINK); + statement.execute(DDL_FAKE_SINK_TABLE); } catch (SQLException e) { throw new RuntimeException("Initializing table failed!", e); } diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/resources/fake-to-starrocks.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/resources/fake-to-starrocks.conf new file mode 100644 index 00000000000..3dca9e725cd --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/resources/fake-to-starrocks.conf @@ -0,0 +1,106 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +env { + parallelism = 1 + job.mode = "BATCH" +} + +source { + FakeSource { + parallelism = 1 + result_table_name = "fake" + row.num = 100 + schema { + table = "FakeTable" + columns = [ + { + name = id + type = bigint + nullable = false + defaultValue = 0 + }, + { + name = c_string + type = string + nullable = true + }, + { + name = c_boolean + type = boolean + nullable = true + }, + { + name = c_tinyint + type = tinyint + nullable = true + }, + { + name = c_int + type = int + nullable = true + }, + { + name = c_bigint + type = bigint + nullable = true + }, + { + name = c_float + type = float + nullable = true + }, + { + name = c_double + type = double + nullable = true + }, + { + name = c_decimal + type = "decimal(2, 1)" + nullable = true + }, + { + name = c_date + type = date + nullable = true + } + ] + } + } +} + +transform { +} + +sink { + StarRocks { + source_table_name = "fake" + nodeUrls = ["starrocks_e2e:8030"] + username = root + password = "" + database = "test" + table = "fake_table_sink" + batch_max_rows = 100 + max_retries = 3 + base-url="jdbc:mysql://starrocks_e2e:9030/test" + starrocks.config = { + format = "JSON" + strip_outer_array = true + } + } +} \ No newline at end of file From 93c6cac62cda32fde94fe629f819a1b904b067db Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Fri, 29 Mar 2024 14:55:27 +0800 Subject: [PATCH 44/59] [Improve][Test] Run all test when code merged into dev branch (#6609) * [Improve][Test] Run all test when code merged into dev branch * [Improve][Test] Run all test when code merged into dev branch --- .github/workflows/backend.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/backend.yml b/.github/workflows/backend.yml index 5c8e75897d7..e63a5ac51ea 100644 --- a/.github/workflows/backend.yml +++ b/.github/workflows/backend.yml @@ -100,6 +100,7 @@ jobs: current_branch='${{ steps.git_init.outputs.branch }}' pip install GitPython workspace="${GITHUB_WORKSPACE}" + repository_owner="${GITHUB_REPOSITORY_OWNER}" cv2_files=`python tools/update_modules_check/check_file_updates.py ua $workspace apache/dev origin/$current_branch "seatunnel-connectors-v2/**"` true_or_false=${cv2_files%%$'\n'*} file_list=${cv2_files#*$'\n'} @@ -133,6 +134,9 @@ jobs: api_files=`python tools/update_modules_check/check_file_updates.py ua $workspace apache/dev origin/$current_branch "seatunnel-api/**" "seatunnel-common/**" "seatunnel-config/**" "seatunnel-connectors/**" "seatunnel-core/**" "seatunnel-e2e/seatunnel-e2e-common/**" "seatunnel-formats/**" "seatunnel-plugin-discovery/**" "seatunnel-transforms-v2/**" "seatunnel-translation/**" "seatunnel-e2e/seatunnel-transforms-v2-e2e/**" "seatunnel-connectors/**" "pom.xml" "**/workflows/**" "tools/**" "seatunnel-dist/**"` true_or_false=${api_files%%$'\n'*} file_list=${api_files#*$'\n'} + if [[ $repository_owner == 'apache' ]];then + true_or_false='true' + fi echo "api=$true_or_false" >> $GITHUB_OUTPUT echo "api_files=$file_list" >> $GITHUB_OUTPUT From fd2a57d82ab9be835ff589d3ab3cbca9aecaf7af Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Mon, 1 Apr 2024 10:38:43 +0800 Subject: [PATCH 45/59] [Improve][Test] Fix test unstable on `ResourceManger` and `EventReport` module (#6620) --- .github/workflows/backend.yml | 4 +- .../seatunnel/SeaTunnelContainer.java | 2 + .../ResourceManagerFunctionTest.java | 56 ------------------- .../resourcemanager/ResourceManagerTest.java | 29 +++++++++- 4 files changed, 33 insertions(+), 58 deletions(-) delete mode 100644 seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/resourcemanager/ResourceManagerFunctionTest.java diff --git a/.github/workflows/backend.yml b/.github/workflows/backend.yml index e63a5ac51ea..519cf8533d2 100644 --- a/.github/workflows/backend.yml +++ b/.github/workflows/backend.yml @@ -556,7 +556,7 @@ jobs: - name: run seatunnel zeta integration test if: needs.changes.outputs.api == 'true' run: | - ./mvnw -T 1 -B verify -DskipUT=true -DskipIT=false -D"license.skipAddThirdParty"=true --no-snapshot-updates -pl :connector-seatunnel-e2e-base -am -Pci + ./mvnw -T 1 -B verify -DskipUT=true -DskipIT=false -D"license.skipAddThirdParty"=true --no-snapshot-updates -pl :connector-seatunnel-e2e-base,:connector-console-seatunnel-e2e -am -Pci env: MAVEN_OPTS: -Xmx4096m engine-k8s-it: @@ -578,6 +578,8 @@ jobs: env: KUBECONFIG: /etc/rancher/k3s/k3s.yaml - uses: actions/checkout@v2 + - name: free disk space + run: tools/github/free_disk_space.sh - name: Set up JDK ${{ matrix.java }} uses: actions/setup-java@v3 with: diff --git a/seatunnel-e2e/seatunnel-e2e-common/src/test/java/org/apache/seatunnel/e2e/common/container/seatunnel/SeaTunnelContainer.java b/seatunnel-e2e/seatunnel-e2e-common/src/test/java/org/apache/seatunnel/e2e/common/container/seatunnel/SeaTunnelContainer.java index aa4d62024f5..ef83f83257f 100644 --- a/seatunnel-e2e/seatunnel-e2e-common/src/test/java/org/apache/seatunnel/e2e/common/container/seatunnel/SeaTunnelContainer.java +++ b/seatunnel-e2e/seatunnel-e2e-common/src/test/java/org/apache/seatunnel/e2e/common/container/seatunnel/SeaTunnelContainer.java @@ -248,6 +248,8 @@ private static boolean isSystemThread(String s) { || s.startsWith("Timer-") || s.contains("InterruptTimer") || s.contains("Java2D Disposer") + || s.contains("OkHttp ConnectionPool") + || s.startsWith("http-report-event-scheduler") || s.contains( "org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner") || s.startsWith("Log4j2-TF-") diff --git a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/resourcemanager/ResourceManagerFunctionTest.java b/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/resourcemanager/ResourceManagerFunctionTest.java deleted file mode 100644 index acb4237f070..00000000000 --- a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/resourcemanager/ResourceManagerFunctionTest.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.seatunnel.engine.server.resourcemanager; - -import org.apache.seatunnel.engine.server.AbstractSeaTunnelServerTest; -import org.apache.seatunnel.engine.server.resourcemanager.resource.ResourceProfile; -import org.apache.seatunnel.engine.server.resourcemanager.resource.SlotProfile; - -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -import com.hazelcast.cluster.Address; - -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import java.util.concurrent.ExecutionException; -import java.util.stream.Collectors; - -public class ResourceManagerFunctionTest - extends AbstractSeaTunnelServerTest { - - @Test - public void testApplyResourceWithRandomResult() - throws ExecutionException, InterruptedException { - FakeResourceManager resourceManager = new FakeResourceManager(nodeEngine); - - List resourceProfiles = new ArrayList<>(); - resourceProfiles.add(new ResourceProfile()); - resourceProfiles.add(new ResourceProfile()); - resourceProfiles.add(new ResourceProfile()); - resourceProfiles.add(new ResourceProfile()); - resourceProfiles.add(new ResourceProfile()); - List slotProfiles = resourceManager.applyResources(1L, resourceProfiles).get(); - Assertions.assertEquals(slotProfiles.size(), 5); - - Set
addresses = - slotProfiles.stream().map(SlotProfile::getWorker).collect(Collectors.toSet()); - Assertions.assertTrue(addresses.size() > 1); - } -} diff --git a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/resourcemanager/ResourceManagerTest.java b/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/resourcemanager/ResourceManagerTest.java index 6dc246d8f46..92442838895 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/resourcemanager/ResourceManagerTest.java +++ b/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/resourcemanager/ResourceManagerTest.java @@ -27,11 +27,15 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import com.hazelcast.cluster.Address; + import java.util.ArrayList; import java.util.List; +import java.util.Set; import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; -public class ResourceManagerTest extends AbstractSeaTunnelServerTest { +public class ResourceManagerTest extends AbstractSeaTunnelServerTest { private ResourceManager resourceManager; @@ -77,4 +81,27 @@ public void testApplyRequest() throws ExecutionException, InterruptedException { new ResourceProfile(CPU.of(0), Memory.of(Long.MAX_VALUE))) .get()); } + + @Test + public void testApplyResourceWithRandomResult() + throws ExecutionException, InterruptedException { + FakeResourceManager resourceManager = new FakeResourceManager(nodeEngine); + + List resourceProfiles = new ArrayList<>(); + resourceProfiles.add(new ResourceProfile()); + resourceProfiles.add(new ResourceProfile()); + resourceProfiles.add(new ResourceProfile()); + resourceProfiles.add(new ResourceProfile()); + resourceProfiles.add(new ResourceProfile()); + List slotProfiles = resourceManager.applyResources(1L, resourceProfiles).get(); + Assertions.assertEquals(slotProfiles.size(), 5); + + boolean hasDifferentWorker = false; + for (int i = 0; i < 5; i++) { + Set
addresses = + slotProfiles.stream().map(SlotProfile::getWorker).collect(Collectors.toSet()); + hasDifferentWorker = addresses.size() > 1; + } + Assertions.assertTrue(hasDifferentWorker, "should have different worker for each slot"); + } } From cdb1856e8444e7c00002d4ae1cad90b2a38f0ddd Mon Sep 17 00:00:00 2001 From: Eric Date: Mon, 1 Apr 2024 14:52:27 +0800 Subject: [PATCH 46/59] [Hotfix] Fix DEFAULT TABLE problem (#6352) --- pom.xml | 1 + .../api/table/catalog/CatalogTableUtil.java | 4 ++-- .../jdbc/internal/dialect/mysql/MysqlDialect.java | 8 ++++++-- .../jdbc/internal/dialect/oracle/OracleDialect.java | 8 ++++++-- .../jdbc/internal/dialect/psql/PostgresDialect.java | 7 +++++-- .../internal/dialect/sqlserver/SqlServerDialect.java | 8 ++++++-- .../connector/amazondynamodb/AmazondynamodbIT.java | 12 +++++++----- 7 files changed, 33 insertions(+), 15 deletions(-) diff --git a/pom.xml b/pom.xml index 6c0bb7e719b..4d4a3731ae6 100644 --- a/pom.xml +++ b/pom.xml @@ -21,6 +21,7 @@ org.apache apache 31 + org.apache.seatunnel diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/CatalogTableUtil.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/CatalogTableUtil.java index 6f2b6adeb25..b268fe612e8 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/CatalogTableUtil.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/CatalogTableUtil.java @@ -115,14 +115,14 @@ public static List getCatalogTables( return optionalCatalog .map( c -> { - long startTime = System.currentTimeMillis(); try (Catalog catalog = c) { + long startTime = System.currentTimeMillis(); catalog.open(); List catalogTables = catalog.getTables(readonlyConfig); log.info( String.format( - "Get catalog tables, cost time: %d", + "Get catalog tables, cost time: %d ms", System.currentTimeMillis() - startTime)); if (catalogTables.isEmpty()) { throw new SeaTunnelException( diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/mysql/MysqlDialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/mysql/MysqlDialect.java index a6632a58732..5527417e916 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/mysql/MysqlDialect.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/mysql/MysqlDialect.java @@ -174,12 +174,16 @@ public Long approximateRowCntStatement(Connection connection, JdbcSourceTable ta // 2. If a query is configured but does not contain a WHERE clause and tablePath is // configured , use TABLE STATUS. // 3. If a query is configured with a WHERE clause, or a query statement is configured but - // tablePath is not, use COUNT(*). + // tablePath is TablePath.DEFAULT, use COUNT(*). boolean useTableStats = StringUtils.isBlank(table.getQuery()) || (!table.getQuery().toLowerCase().contains("where") - && table.getTablePath() != null); + && table.getTablePath() != null + && !TablePath.DEFAULT + .getFullName() + .equals(table.getTablePath().getFullName())); + if (useTableStats) { // The statement used to get approximate row count which is less // accurate than COUNT(*), but is more efficient for large table. diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oracle/OracleDialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oracle/OracleDialect.java index 1cfeb8d7056..8dedc6dfc19 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oracle/OracleDialect.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oracle/OracleDialect.java @@ -184,12 +184,16 @@ public Long approximateRowCntStatement(Connection connection, JdbcSourceTable ta // 2. If a query is configured but does not contain a WHERE clause and tablePath is // configured, use TABLE STATUS. // 3. If a query is configured with a WHERE clause, or a query statement is configured but - // tablePath is not, use COUNT(*). + // tablePath is TablePath.DEFAULT, use COUNT(*). boolean useTableStats = StringUtils.isBlank(table.getQuery()) || (!table.getQuery().toLowerCase().contains("where") - && table.getTablePath() != null); + && table.getTablePath() != null + && !TablePath.DEFAULT + .getFullName() + .equals(table.getTablePath().getFullName())); + if (useTableStats) { TablePath tablePath = table.getTablePath(); String analyzeTable = diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresDialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresDialect.java index d1bf6257ec5..51c5eb67d21 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresDialect.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresDialect.java @@ -155,12 +155,15 @@ public Long approximateRowCntStatement(Connection connection, JdbcSourceTable ta // 2. If a query is configured but does not contain a WHERE clause and tablePath is // configured, use TABLE STATUS. // 3. If a query is configured with a WHERE clause, or a query statement is configured but - // tablePath is not, use COUNT(*). + // tablePath is TablePath.DEFAULT, use COUNT(*). boolean useTableStats = StringUtils.isBlank(table.getQuery()) || (!table.getQuery().toLowerCase().contains("where") - && table.getTablePath() != null); + && table.getTablePath() != null + && !TablePath.DEFAULT + .getFullName() + .equals(table.getTablePath().getFullName())); if (useTableStats) { String rowCountQuery = String.format( diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/sqlserver/SqlServerDialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/sqlserver/SqlServerDialect.java index 8826e1fdc9e..87e7418966d 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/sqlserver/SqlServerDialect.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/sqlserver/SqlServerDialect.java @@ -165,12 +165,16 @@ public Long approximateRowCntStatement(Connection connection, JdbcSourceTable ta // 2. If a query is configured but does not contain a WHERE clause and tablePath is // configured, use TABLE STATUS. // 3. If a query is configured with a WHERE clause, or a query statement is configured but - // tablePath is not, use COUNT(*). + // tablePath is TablePath.DEFAULT, use COUNT(*). boolean useTableStats = StringUtils.isBlank(table.getQuery()) || (!table.getQuery().toLowerCase().contains("where") - && table.getTablePath() != null); + && table.getTablePath() != null + && !TablePath.DEFAULT + .getFullName() + .equals(table.getTablePath().getFullName())); + if (useTableStats) { TablePath tablePath = table.getTablePath(); try (Statement stmt = connection.createStatement()) { diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-amazondynamodb-e2e/src/test/java/org/apache/seatunnel/e2e/connector/amazondynamodb/AmazondynamodbIT.java b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-amazondynamodb-e2e/src/test/java/org/apache/seatunnel/e2e/connector/amazondynamodb/AmazondynamodbIT.java index a7a9e3f7024..5ae72f6d956 100644 --- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-amazondynamodb-e2e/src/test/java/org/apache/seatunnel/e2e/connector/amazondynamodb/AmazondynamodbIT.java +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-amazondynamodb-e2e/src/test/java/org/apache/seatunnel/e2e/connector/amazondynamodb/AmazondynamodbIT.java @@ -94,9 +94,11 @@ public class AmazondynamodbIT extends TestSuiteBase implements TestResource { @TestTemplate public void testAmazondynamodb(TestContainer container) throws Exception { + assertHasData(SOURCE_TABLE); Container.ExecResult execResult = container.executeJob(AMAZONDYNAMODB_JOB_CONFIG); Assertions.assertEquals(0, execResult.getExitCode()); - assertHasData(); + assertHasData(SOURCE_TABLE); + assertHasData(SINK_TABLE); compareResult(); clearSinkTable(); } @@ -168,10 +170,10 @@ private void clearSinkTable() { createTable(dynamoDbClient, SINK_TABLE); } - private void assertHasData() { - ScanResponse scan = - dynamoDbClient.scan(ScanRequest.builder().tableName(SINK_TABLE).build()); - Assertions.assertTrue(scan.hasItems(), "sink table is empty."); + private void assertHasData(String tableName) { + ScanResponse scan = dynamoDbClient.scan(ScanRequest.builder().tableName(tableName).build()); + Assertions.assertTrue( + !scan.items().isEmpty(), String.format("table %s is empty.", tableName)); } private void compareResult() { From 2346d0ea7c5e840ddfe532b1a1cceb9093f5c4b3 Mon Sep 17 00:00:00 2001 From: Jarvis Date: Tue, 2 Apr 2024 10:11:26 +0800 Subject: [PATCH 47/59] [E2E] Enable StarRocksCDCSinkIT (#6626) --- .../starrocks/StarRocksCDCSinkIT.java | 22 +++++++++++++++++-- .../write-cdc-changelog-to-starrocks.conf | 2 +- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/java/org/apache/seatunnel/e2e/connector/starrocks/StarRocksCDCSinkIT.java b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/java/org/apache/seatunnel/e2e/connector/starrocks/StarRocksCDCSinkIT.java index ee7e24ce1c8..1a16662f99f 100644 --- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/java/org/apache/seatunnel/e2e/connector/starrocks/StarRocksCDCSinkIT.java +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/java/org/apache/seatunnel/e2e/connector/starrocks/StarRocksCDCSinkIT.java @@ -19,12 +19,15 @@ import org.apache.seatunnel.e2e.common.TestResource; import org.apache.seatunnel.e2e.common.TestSuiteBase; +import org.apache.seatunnel.e2e.common.container.ContainerExtendedFactory; +import org.apache.seatunnel.e2e.common.container.EngineType; import org.apache.seatunnel.e2e.common.container.TestContainer; +import org.apache.seatunnel.e2e.common.junit.DisabledOnContainer; +import org.apache.seatunnel.e2e.common.junit.TestContainerExtension; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.TestTemplate; import org.testcontainers.containers.Container; import org.testcontainers.containers.GenericContainer; @@ -53,7 +56,6 @@ import static org.awaitility.Awaitility.given; @Slf4j -@Disabled("There are still errors unfixed @Hisoka-X") public class StarRocksCDCSinkIT extends TestSuiteBase implements TestResource { private static final String DOCKER_IMAGE = "d87904488/starrocks-starter:2.2.1"; private static final String DRIVER_CLASS = "com.mysql.cj.jdbc.Driver"; @@ -87,6 +89,18 @@ public class StarRocksCDCSinkIT extends TestSuiteBase implements TestResource { private Connection jdbcConnection; private GenericContainer starRocksServer; + @TestContainerExtension + private final ContainerExtendedFactory extendedFactory = + container -> { + Container.ExecResult extraCommands = + container.execInContainer( + "bash", + "-c", + "mkdir -p /tmp/seatunnel/plugins/Jdbc/lib && cd /tmp/seatunnel/plugins/Jdbc/lib && curl -O " + + SR_DRIVER_JAR); + Assertions.assertEquals(0, extraCommands.getExitCode()); + }; + @BeforeAll @Override public void startUp() { @@ -119,6 +133,10 @@ public void tearDown() throws Exception { } @TestTemplate + @DisabledOnContainer( + value = {}, + type = {EngineType.SPARK}, + disabledReason = "Currently Spark engine unsupported DELETE operation") public void testStarRocksSink(TestContainer container) throws Exception { Container.ExecResult execResult = container.executeJob("/write-cdc-changelog-to-starrocks.conf"); diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/resources/write-cdc-changelog-to-starrocks.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/resources/write-cdc-changelog-to-starrocks.conf index 83488e32556..e358e97d624 100644 --- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/resources/write-cdc-changelog-to-starrocks.conf +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-starrocks-e2e/src/test/resources/write-cdc-changelog-to-starrocks.conf @@ -65,7 +65,7 @@ sink { password = "" database = "test" table = "e2e_table_sink" - + base-url = "jdbc:mysql://starrocks_cdc_e2e:9030/test" batch_max_rows = 100 max_retries = 3 From 6e0c81d492997cb962f7b23ef192f13071fbc835 Mon Sep 17 00:00:00 2001 From: YalikWang <34478654+YalikWang@users.noreply.github.com> Date: Tue, 2 Apr 2024 10:42:02 +0800 Subject: [PATCH 48/59] [fix][connector-rocketmq]Fix a NPE problem when checkpoint.interval is set too small(#6624) (#6625) --- .../rocketmq/source/RocketMqSourceReader.java | 26 +++++++++++-------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/seatunnel-connectors-v2/connector-rocketmq/src/main/java/org/apache/seatunnel/connectors/seatunnel/rocketmq/source/RocketMqSourceReader.java b/seatunnel-connectors-v2/connector-rocketmq/src/main/java/org/apache/seatunnel/connectors/seatunnel/rocketmq/source/RocketMqSourceReader.java index fd4f9860724..2beef96f1bc 100644 --- a/seatunnel-connectors-v2/connector-rocketmq/src/main/java/org/apache/seatunnel/connectors/seatunnel/rocketmq/source/RocketMqSourceReader.java +++ b/seatunnel-connectors-v2/connector-rocketmq/src/main/java/org/apache/seatunnel/connectors/seatunnel/rocketmq/source/RocketMqSourceReader.java @@ -242,17 +242,21 @@ public void notifyCheckpointComplete(long checkpointId) throws Exception { Long offset = entry.getValue(); try { if (messageQueue != null && offset != null) { - consumerThreads - .get(messageQueue) - .getTasks() - .put( - consumer -> { - if (this.metadata.isEnabledCommitCheckpoint()) { - consumer.getOffsetStore() - .updateOffset(messageQueue, offset, false); - consumer.getOffsetStore().persist(messageQueue); - } - }); + RocketMqConsumerThread rocketMqConsumerThread = + consumerThreads.get(messageQueue); + if (rocketMqConsumerThread != null) { + rocketMqConsumerThread + .getTasks() + .put( + consumer -> { + if (this.metadata.isEnabledCommitCheckpoint()) { + consumer.getOffsetStore() + .updateOffset( + messageQueue, offset, false); + consumer.getOffsetStore().persist(messageQueue); + } + }); + } } } catch (InterruptedException e) { log.error("commit offset failed", e); From 72ebc73e87487125e1aad7a01cd622e8b8c1f4b6 Mon Sep 17 00:00:00 2001 From: xiaochen <598457447@qq.com> Date: Tue, 2 Apr 2024 10:49:45 +0800 Subject: [PATCH 49/59] [Fix][Doc] Fix some spell errors (#6628) --- docs/en/seatunnel-engine/engine-jar-storage-mode.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/seatunnel-engine/engine-jar-storage-mode.md b/docs/en/seatunnel-engine/engine-jar-storage-mode.md index cdb30e9dc18..eff40eac9b4 100644 --- a/docs/en/seatunnel-engine/engine-jar-storage-mode.md +++ b/docs/en/seatunnel-engine/engine-jar-storage-mode.md @@ -15,7 +15,7 @@ We are committed to ongoing efforts to enhance and stabilize this functionality, We can enable the optimization job submission process, which is configured in the `seatunel.yaml`. After enabling the optimization of the Seatunnel job submission process configuration item, users can use the Seatunnel Zeta engine as the execution engine without placing the connector Jar packages required for task execution or the third-party Jar packages that the connector relies on in each engine `connector` directory. Users only need to place all the Jar packages for task execution on the client that submits the job, and the client will automatically upload the Jars required for task execution to the Zeta engine. It is necessary to enable this configuration item when submitting jobs in Docker or k8s mode, -which can fundamentally solve the problem of large container images caused by the heavy weight of the Seatunnrl Zeta engine. In the image, only the core framework package of the Zeta engine needs to be provided, +which can fundamentally solve the problem of large container images caused by the heavy weight of the Seatunnel Zeta engine. In the image, only the core framework package of the Zeta engine needs to be provided, and then the jar package of the connector and the third-party jar package that the connector relies on can be separately uploaded to the pod for distribution. After enabling the optimization job submission process configuration item, you do not need to place the following two types of Jar packages in the Zeta engine: @@ -26,7 +26,7 @@ COMMON_ PLUGIN_ JARS refers to the third-party Jar package that the connector re When common jars do not exist in Zeta's `lib`, it can upload the local common jars of the client to the `lib` directory of all engine nodes. This way, even if the user does not place a jar on all nodes in Zeta's `lib`, the task can still be executed normally. However, we do not recommend relying on the configuration item of opening the optimization job submission process to upload the third-party Jar package that the connector relies on. -If you use Zeta Engine, please add the the third-party jar package files that the connector relies on to `$SEATUNNEL_HOME/lib/` directory on each node, such as jdbc drivers. +If you use Zeta Engine, please add the third-party jar package files that the connector relies on to `$SEATUNNEL_HOME/lib/` directory on each node, such as jdbc drivers. # ConnectorJar storage strategy @@ -36,7 +36,7 @@ Two different storage strategies provide a more flexible storage mode for Jar fi ## Related configuration -| paramemter | default value | describe | +| parameter | default value | describe | |-------------------------------------|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------| | connector-jar-storage-enable | false | Whether to enable uploading the connector Jar package to the engine. The default enabled state is false. | | connector-jar-storage-mode | SHARED | Engine-side Jar package storage mode selection. There are two optional modes, SHARED and ISOLATED. The default Jar package storage mode is SHARED. | @@ -79,7 +79,7 @@ Example: ```yaml jar-storage: - connector-jar-storage-enable:true + connector-jar-storage-enable: true connector-jar-storage-mode: SHARED connector-jar-storage-path: "" connector-jar-cleanup-task-interval: 3600 From 2a0a0b9d19cca478f01fde32958b6dda33a45737 Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Tue, 2 Apr 2024 10:50:20 +0800 Subject: [PATCH 50/59] [Fix][Connector-V2] Fix add hive partition error when partition already existed (#6577) --- .../seatunnel/hive/commit/HiveSinkAggregatedCommitter.java | 3 --- .../seatunnel/hive/utils/HiveMetaStoreProxy.java | 7 ++++++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/commit/HiveSinkAggregatedCommitter.java b/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/commit/HiveSinkAggregatedCommitter.java index 0e423f3e875..0f57f864580 100644 --- a/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/commit/HiveSinkAggregatedCommitter.java +++ b/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/commit/HiveSinkAggregatedCommitter.java @@ -24,7 +24,6 @@ import org.apache.seatunnel.connectors.seatunnel.file.sink.commit.FileSinkAggregatedCommitter; import org.apache.seatunnel.connectors.seatunnel.hive.utils.HiveMetaStoreProxy; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.thrift.TException; import lombok.extern.slf4j.Slf4j; @@ -71,8 +70,6 @@ public List commit( try { hiveMetaStore.addPartitions(dbName, tableName, partitions); log.info("Add these partitions {}", partitions); - } catch (AlreadyExistsException e) { - log.warn("These partitions {} are already exists", partitions); } catch (TException e) { log.error("Failed to add these partitions {}", partitions, e); errorCommitInfos.add(aggregatedCommitInfo); diff --git a/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/utils/HiveMetaStoreProxy.java b/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/utils/HiveMetaStoreProxy.java index e2941340f88..6a1288b661e 100644 --- a/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/utils/HiveMetaStoreProxy.java +++ b/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/utils/HiveMetaStoreProxy.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.thrift.TException; @@ -131,7 +132,11 @@ public void addPartitions( @NonNull String dbName, @NonNull String tableName, List partitions) throws TException { for (String partition : partitions) { - hiveMetaStoreClient.appendPartition(dbName, tableName, partition); + try { + hiveMetaStoreClient.appendPartition(dbName, tableName, partition); + } catch (AlreadyExistsException e) { + log.warn("The partition {} are already exists", partition); + } } } From e60beb28ecdea41cc3730630f487cd6fa398568c Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Tue, 2 Apr 2024 13:35:58 +0800 Subject: [PATCH 51/59] [Improve][CDC] Improve read performance when record not contains schema field (#6571) --- .../connectors/cdc/base/utils/SourceRecordUtils.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/utils/SourceRecordUtils.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/utils/SourceRecordUtils.java index 872669eacd3..abbdb5b76a8 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/utils/SourceRecordUtils.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/utils/SourceRecordUtils.java @@ -202,10 +202,8 @@ public static TablePath getTablePath(SourceRecord record) { String databaseName = sourceStruct.getString(AbstractSourceInfo.DATABASE_NAME_KEY); String tableName = sourceStruct.getString(AbstractSourceInfo.TABLE_NAME_KEY); String schemaName = null; - try { + if (sourceStruct.schema().field(AbstractSourceInfo.SCHEMA_NAME_KEY) != null) { schemaName = sourceStruct.getString(AbstractSourceInfo.SCHEMA_NAME_KEY); - } catch (Throwable e) { - // ignore } return TablePath.of(databaseName, schemaName, tableName); } From fe33422161b4ad7f210b1f3d9eb3de49c4657cc3 Mon Sep 17 00:00:00 2001 From: hailin0 Date: Tue, 2 Apr 2024 16:09:18 +0800 Subject: [PATCH 52/59] [Improve][CDC] Optimize split state memory allocation in increment phase (#6554) --- pom.xml | 14 + .../relational/JdbcSourceEventDispatcher.java | 18 + .../enumerator/HybridSplitAssigner.java | 22 ++ .../IncrementalSourceEnumerator.java | 12 + .../enumerator/IncrementalSplitAssigner.java | 23 ++ .../enumerator/SnapshotSplitAssigner.java | 28 ++ .../event/CompletedSnapshotPhaseEvent.java | 34 ++ .../reader/IncrementalSourceReader.java | 15 +- .../IncrementalSourceRecordEmitter.java | 28 ++ .../IncrementalSourceStreamFetcher.java | 15 +- .../split/state/IncrementalSplitState.java | 41 ++ .../cdc/base/utils/SourceRecordUtils.java | 7 + .../enumerator/HybridSplitAssignerTest.java | 132 +++++++ .../IncrementalSourceStreamFetcherTest.java | 367 ++++++++++++++++++ .../state/IncrementalSplitStateTest.java | 169 ++++++++ .../LogMinerStreamingChangeEventSource.java | 1 + .../SqlServerStreamingChangeEventSource.java | 1 + .../source/reader/SourceReaderBase.java | 7 +- 18 files changed, 921 insertions(+), 13 deletions(-) create mode 100644 seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/event/CompletedSnapshotPhaseEvent.java create mode 100644 seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/HybridSplitAssignerTest.java create mode 100644 seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceStreamFetcherTest.java create mode 100644 seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/source/split/state/IncrementalSplitStateTest.java diff --git a/pom.xml b/pom.xml index 4d4a3731ae6..0f59747f72f 100644 --- a/pom.xml +++ b/pom.xml @@ -117,6 +117,7 @@ 1.81 4.13.2 5.9.0 + 4.11.0 1.3.3 3.3.0 3.2.0 @@ -357,6 +358,13 @@ ${junit4.version} + + org.mockito + mockito-junit-jupiter + ${mockito.version} + test + + com.fasterxml.jackson.core jackson-annotations @@ -521,6 +529,12 @@ test + + org.mockito + mockito-junit-jupiter + test + + diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/relational/JdbcSourceEventDispatcher.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/relational/JdbcSourceEventDispatcher.java index 23dfabd9fa4..90cc8126f45 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/relational/JdbcSourceEventDispatcher.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/relational/JdbcSourceEventDispatcher.java @@ -25,7 +25,9 @@ import org.apache.kafka.connect.source.SourceRecord; import io.debezium.config.CommonConnectorConfig; +import io.debezium.config.Configuration; import io.debezium.connector.base.ChangeEventQueue; +import io.debezium.heartbeat.Heartbeat; import io.debezium.pipeline.DataChangeEvent; import io.debezium.pipeline.EventDispatcher; import io.debezium.pipeline.source.spi.EventMetadataProvider; @@ -37,6 +39,8 @@ import io.debezium.schema.TopicSelector; import io.debezium.util.SchemaNameAdjuster; +import java.time.Duration; +import java.time.temporal.ChronoUnit; import java.util.Map; /** @@ -71,6 +75,10 @@ public JdbcSourceEventDispatcher( filter, changeEventCreator, metadataProvider, + Heartbeat.create( + getHeartbeatInterval(connectorConfig), + topicSelector.getHeartbeatTopic(), + connectorConfig.getLogicalName()), schemaNameAdjuster); this.queue = queue; this.topic = topicSelector.getPrimaryTopic(); @@ -92,4 +100,14 @@ public void dispatchWatermarkEvent( sourcePartition, topic, sourceSplit.splitId(), watermarkKind, watermark); queue.enqueue(new DataChangeEvent(sourceRecord)); } + + private static Duration getHeartbeatInterval(CommonConnectorConfig connectorConfig) { + Configuration configuration = connectorConfig.getConfig(); + Duration heartbeatInterval = + configuration.getDuration(Heartbeat.HEARTBEAT_INTERVAL, ChronoUnit.MILLIS); + if (heartbeatInterval.isZero()) { + return Duration.ofMillis(5000); + } + return heartbeatInterval; + } } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/HybridSplitAssigner.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/HybridSplitAssigner.java index 9070e2fb88f..d6b0bdb96cb 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/HybridSplitAssigner.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/HybridSplitAssigner.java @@ -17,6 +17,8 @@ package org.apache.seatunnel.connectors.cdc.base.source.enumerator; +import org.apache.seatunnel.shade.com.google.common.annotations.VisibleForTesting; + import org.apache.seatunnel.connectors.cdc.base.config.SourceConfig; import org.apache.seatunnel.connectors.cdc.base.dialect.DataSourceDialect; import org.apache.seatunnel.connectors.cdc.base.source.enumerator.state.HybridPendingSplitsState; @@ -31,9 +33,11 @@ import io.debezium.relational.TableId; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Optional; +import java.util.function.Predicate; /** Assigner for Hybrid split which contains snapshot splits and incremental splits. */ public class HybridSplitAssigner implements SplitAssigner { @@ -146,4 +150,22 @@ public void notifyCheckpointComplete(long checkpointId) { snapshotSplitAssigner.notifyCheckpointComplete(checkpointId); incrementalSplitAssigner.notifyCheckpointComplete(checkpointId); } + + @VisibleForTesting + IncrementalSplitAssigner getIncrementalSplitAssigner() { + return incrementalSplitAssigner; + } + + @VisibleForTesting + SnapshotSplitAssigner getSnapshotSplitAssigner() { + return snapshotSplitAssigner; + } + + public boolean completedSnapshotPhase(List tableIds) { + return Arrays.asList( + snapshotSplitAssigner.completedSnapshotPhase(tableIds), + incrementalSplitAssigner.completedSnapshotPhase(tableIds)) + .stream() + .allMatch(Predicate.isEqual(true)); + } } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/IncrementalSourceEnumerator.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/IncrementalSourceEnumerator.java index 86f7ac42def..b17b910e5d8 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/IncrementalSourceEnumerator.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/IncrementalSourceEnumerator.java @@ -20,6 +20,7 @@ import org.apache.seatunnel.api.source.SourceEvent; import org.apache.seatunnel.api.source.SourceSplitEnumerator; import org.apache.seatunnel.connectors.cdc.base.source.enumerator.state.PendingSplitsState; +import org.apache.seatunnel.connectors.cdc.base.source.event.CompletedSnapshotPhaseEvent; import org.apache.seatunnel.connectors.cdc.base.source.event.CompletedSnapshotSplitsAckEvent; import org.apache.seatunnel.connectors.cdc.base.source.event.CompletedSnapshotSplitsReportEvent; import org.apache.seatunnel.connectors.cdc.base.source.event.SnapshotSplitWatermark; @@ -120,6 +121,17 @@ public void handleSourceEvent(int subtaskId, SourceEvent sourceEvent) { .map(SnapshotSplitWatermark::getSplitId) .collect(Collectors.toList())); context.sendEventToSourceReader(subtaskId, ackEvent); + } else if (sourceEvent instanceof CompletedSnapshotPhaseEvent) { + LOG.debug( + "The enumerator receives completed snapshot phase event {} from subtask {}.", + sourceEvent, + subtaskId); + CompletedSnapshotPhaseEvent event = (CompletedSnapshotPhaseEvent) sourceEvent; + if (splitAssigner instanceof HybridSplitAssigner) { + ((HybridSplitAssigner) splitAssigner).completedSnapshotPhase(event.getTableIds()); + LOG.info( + "Clean the SnapshotSplitAssigner#assignedSplits/splitCompletedOffsets to empty."); + } } } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/IncrementalSplitAssigner.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/IncrementalSplitAssigner.java index fe8204f6cd2..7b45ee1ef61 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/IncrementalSplitAssigner.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/IncrementalSplitAssigner.java @@ -17,6 +17,8 @@ package org.apache.seatunnel.connectors.cdc.base.source.enumerator; +import org.apache.seatunnel.shade.com.google.common.annotations.VisibleForTesting; + import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.connectors.cdc.base.config.SourceConfig; import org.apache.seatunnel.connectors.cdc.base.source.enumerator.state.IncrementalPhaseState; @@ -45,6 +47,8 @@ import java.util.Set; import java.util.stream.Collectors; +import static org.apache.seatunnel.shade.com.google.common.base.Preconditions.checkArgument; + /** Assigner for incremental split. */ public class IncrementalSplitAssigner implements SplitAssigner { @@ -255,4 +259,23 @@ private IncrementalSplit createIncrementalSplit( completedSnapshotSplitInfos, checkpointDataType); } + + @VisibleForTesting + void setSplitAssigned(boolean assigned) { + this.splitAssigned = assigned; + } + + public boolean completedSnapshotPhase(List tableIds) { + checkArgument(splitAssigned && noMoreSplits()); + + for (String splitKey : new ArrayList<>(context.getAssignedSnapshotSplit().keySet())) { + SnapshotSplit assignedSplit = context.getAssignedSnapshotSplit().get(splitKey); + if (tableIds.contains(assignedSplit.getTableId())) { + context.getAssignedSnapshotSplit().remove(splitKey); + context.getSplitCompletedOffsets().remove(assignedSplit.splitId()); + } + } + return context.getAssignedSnapshotSplit().isEmpty() + && context.getSplitCompletedOffsets().isEmpty(); + } } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/SnapshotSplitAssigner.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/SnapshotSplitAssigner.java index 443343947cd..c16dd811028 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/SnapshotSplitAssigner.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/SnapshotSplitAssigner.java @@ -17,6 +17,8 @@ package org.apache.seatunnel.connectors.cdc.base.source.enumerator; +import org.apache.seatunnel.shade.com.google.common.annotations.VisibleForTesting; + import org.apache.seatunnel.connectors.cdc.base.config.SourceConfig; import org.apache.seatunnel.connectors.cdc.base.dialect.DataSourceDialect; import org.apache.seatunnel.connectors.cdc.base.source.enumerator.splitter.ChunkSplitter; @@ -45,6 +47,8 @@ import java.util.concurrent.ConcurrentLinkedQueue; import java.util.stream.Collectors; +import static org.apache.seatunnel.shade.com.google.common.base.Preconditions.checkArgument; + /** Assigner for snapshot split. */ public class SnapshotSplitAssigner implements SplitAssigner { private static final Logger LOG = LoggerFactory.getLogger(SnapshotSplitAssigner.class); @@ -278,4 +282,28 @@ public boolean isCompleted() { private boolean allSplitsCompleted() { return noMoreSplits() && assignedSplits.size() == splitCompletedOffsets.size(); } + + @VisibleForTesting + Map getAssignedSplits() { + return assignedSplits; + } + + @VisibleForTesting + Map getSplitCompletedOffsets() { + return splitCompletedOffsets; + } + + public boolean completedSnapshotPhase(List tableIds) { + checkArgument(isCompleted() && allSplitsCompleted()); + + for (String splitKey : new ArrayList<>(assignedSplits.keySet())) { + SnapshotSplit assignedSplit = assignedSplits.get(splitKey); + if (tableIds.contains(assignedSplit.getTableId())) { + assignedSplits.remove(splitKey); + splitCompletedOffsets.remove(assignedSplit.splitId()); + } + } + + return assignedSplits.isEmpty() && splitCompletedOffsets.isEmpty(); + } } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/event/CompletedSnapshotPhaseEvent.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/event/CompletedSnapshotPhaseEvent.java new file mode 100644 index 00000000000..623bf1c1a98 --- /dev/null +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/event/CompletedSnapshotPhaseEvent.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.cdc.base.source.event; + +import org.apache.seatunnel.api.source.SourceEvent; + +import io.debezium.relational.TableId; +import lombok.AllArgsConstructor; +import lombok.Data; + +import java.util.List; + +@Data +@AllArgsConstructor +public class CompletedSnapshotPhaseEvent implements SourceEvent { + private static final long serialVersionUID = 1L; + + private List tableIds; +} diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/IncrementalSourceReader.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/IncrementalSourceReader.java index 7f9d890197d..829f68763da 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/IncrementalSourceReader.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/IncrementalSourceReader.java @@ -22,6 +22,7 @@ import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.connectors.cdc.base.config.SourceConfig; import org.apache.seatunnel.connectors.cdc.base.dialect.DataSourceDialect; +import org.apache.seatunnel.connectors.cdc.base.source.event.CompletedSnapshotPhaseEvent; import org.apache.seatunnel.connectors.cdc.base.source.event.CompletedSnapshotSplitsReportEvent; import org.apache.seatunnel.connectors.cdc.base.source.event.SnapshotSplitWatermark; import org.apache.seatunnel.connectors.cdc.base.source.split.IncrementalSplit; @@ -207,7 +208,19 @@ protected SourceSplitStateBase initializedState(SourceSplitBase split) { debeziumDeserializationSchema.restoreCheckpointProducedType( incrementalSplit.getCheckpointDataType()); } - return new IncrementalSplitState(split.asIncrementalSplit()); + IncrementalSplitState splitState = new IncrementalSplitState(incrementalSplit); + if (splitState.autoEnterPureIncrementPhaseIfAllowed()) { + log.info( + "The incremental split[{}] startup position {} is equal the maxSnapshotSplitsHighWatermark {}, auto enter pure increment phase.", + incrementalSplit.splitId(), + splitState.getStartupOffset(), + splitState.getMaxSnapshotSplitsHighWatermark()); + log.info("Clean the IncrementalSplit#completedSnapshotSplitInfos to empty."); + CompletedSnapshotPhaseEvent event = + new CompletedSnapshotPhaseEvent(splitState.getTableIds()); + context.sendSourceEventToEnumerator(event); + } + return splitState; } } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/IncrementalSourceRecordEmitter.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/IncrementalSourceRecordEmitter.java index 65474a0d9fd..a98a9d09591 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/IncrementalSourceRecordEmitter.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/IncrementalSourceRecordEmitter.java @@ -22,9 +22,11 @@ import org.apache.seatunnel.api.source.Collector; import org.apache.seatunnel.api.source.SourceReader; import org.apache.seatunnel.api.table.event.SchemaChangeEvent; +import org.apache.seatunnel.connectors.cdc.base.source.event.CompletedSnapshotPhaseEvent; import org.apache.seatunnel.connectors.cdc.base.source.offset.Offset; import org.apache.seatunnel.connectors.cdc.base.source.offset.OffsetFactory; import org.apache.seatunnel.connectors.cdc.base.source.split.SourceRecords; +import org.apache.seatunnel.connectors.cdc.base.source.split.state.IncrementalSplitState; import org.apache.seatunnel.connectors.cdc.base.source.split.state.SourceSplitStateBase; import org.apache.seatunnel.connectors.cdc.debezium.DebeziumDeserializationSchema; import org.apache.seatunnel.connectors.seatunnel.common.source.reader.RecordEmitter; @@ -65,6 +67,7 @@ public class IncrementalSourceRecordEmitter protected final OffsetFactory offsetFactory; + protected final SourceReader.Context context; protected final Counter recordFetchDelay; protected final Counter recordEmitDelay; protected final EventListener eventListener; @@ -76,6 +79,7 @@ public IncrementalSourceRecordEmitter( this.debeziumDeserializationSchema = debeziumDeserializationSchema; this.outputCollector = new OutputCollector<>(); this.offsetFactory = offsetFactory; + this.context = context; this.recordFetchDelay = context.getMetricsContext().counter(CDC_RECORD_FETCH_DELAY); this.recordEmitDelay = context.getMetricsContext().counter(CDC_RECORD_EMIT_DELAY); this.eventListener = context.getEventListener(); @@ -90,6 +94,7 @@ public void emitRecord( SourceRecord next = elementIterator.next(); reportMetrics(next); processElement(next, collector, splitState); + markEnterPureIncrementPhase(next, splitState); } } @@ -138,6 +143,29 @@ protected void processElement( } } + private void markEnterPureIncrementPhase( + SourceRecord element, SourceSplitStateBase splitState) { + if (splitState.isIncrementalSplitState()) { + IncrementalSplitState incrementalSplitState = splitState.asIncrementalSplitState(); + if (incrementalSplitState.isEnterPureIncrementPhase()) { + return; + } + Offset position = getOffsetPosition(element); + if (incrementalSplitState.markEnterPureIncrementPhaseIfNeed(position)) { + log.info( + "The current record position {} is after the maxSnapshotSplitsHighWatermark {}, " + + "mark enter pure increment phase.", + position, + incrementalSplitState.getMaxSnapshotSplitsHighWatermark()); + log.info("Clean the IncrementalSplit#completedSnapshotSplitInfos to empty."); + + CompletedSnapshotPhaseEvent completedSnapshotPhaseEvent = + new CompletedSnapshotPhaseEvent(incrementalSplitState.getTableIds()); + context.sendSourceEventToEnumerator(completedSnapshotPhaseEvent); + } + } + } + private Offset getWatermark(SourceRecord watermarkEvent) { return getOffsetPosition(watermarkEvent.sourceOffset()); } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceStreamFetcher.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceStreamFetcher.java index e34970054b1..4cad739ac6a 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceStreamFetcher.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceStreamFetcher.java @@ -170,7 +170,7 @@ private Iterator splitNormalStream(List batchEve *

After event batch: [checkpoint-before] [SchemaChangeEvent-1, SchemaChangeEvent-2, * checkpoint-after] [a, b, c, d, e] */ - private Iterator splitSchemaChangeStream(List batchEvents) { + Iterator splitSchemaChangeStream(List batchEvents) { List sourceRecordsSet = new ArrayList<>(); List sourceRecordList = new ArrayList<>(); @@ -181,11 +181,6 @@ private Iterator splitSchemaChangeStream(List ba if (!shouldEmit(currentRecord)) { continue; } - if (!SourceRecordUtils.isDataChangeRecord(currentRecord) - && !SourceRecordUtils.isSchemaChangeEvent(currentRecord)) { - sourceRecordList.add(currentRecord); - continue; - } if (SourceRecordUtils.isSchemaChangeEvent(currentRecord)) { if (!schemaChangeResolver.support(currentRecord)) { @@ -208,9 +203,11 @@ private Iterator splitSchemaChangeStream(List ba sourceRecordList = new ArrayList<>(); sourceRecordList.add(currentRecord); } - } else if (SourceRecordUtils.isDataChangeRecord(currentRecord)) { + } else if (SourceRecordUtils.isDataChangeRecord(currentRecord) + || SourceRecordUtils.isHeartbeatRecord(currentRecord)) { if (previousRecord == null - || SourceRecordUtils.isDataChangeRecord(previousRecord)) { + || SourceRecordUtils.isDataChangeRecord(previousRecord) + || SourceRecordUtils.isHeartbeatRecord(previousRecord)) { sourceRecordList.add(currentRecord); } else { sourceRecordList.add( @@ -274,7 +271,7 @@ public void close() { } /** Returns the record should emit or not. */ - private boolean shouldEmit(SourceRecord sourceRecord) { + boolean shouldEmit(SourceRecord sourceRecord) { if (taskContext.isDataChangeRecord(sourceRecord)) { Offset position = taskContext.getStreamOffset(sourceRecord); TableId tableId = getTableId(sourceRecord); diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/split/state/IncrementalSplitState.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/split/state/IncrementalSplitState.java index 4157569766f..c04026bf1ea 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/split/state/IncrementalSplitState.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/split/state/IncrementalSplitState.java @@ -24,6 +24,7 @@ import lombok.Getter; import lombok.Setter; +import java.util.Comparator; import java.util.List; /** The state of split to describe the change log of table(s). */ @@ -39,11 +40,27 @@ public class IncrementalSplitState extends SourceSplitStateBase { /** Obtained by configuration, may not end */ private Offset stopOffset; + private Offset maxSnapshotSplitsHighWatermark; + private volatile boolean enterPureIncrementPhase; + public IncrementalSplitState(IncrementalSplit split) { super(split); this.tableIds = split.getTableIds(); this.startupOffset = split.getStartupOffset(); this.stopOffset = split.getStopOffset(); + + if (split.getCompletedSnapshotSplitInfos().isEmpty()) { + this.maxSnapshotSplitsHighWatermark = null; + this.enterPureIncrementPhase = true; + } else { + this.maxSnapshotSplitsHighWatermark = + split.getCompletedSnapshotSplitInfos().stream() + .filter(e -> e.getWatermark() != null) + .max(Comparator.comparing(o -> o.getWatermark().getHighWatermark())) + .map(e -> e.getWatermark().getHighWatermark()) + .get(); + this.enterPureIncrementPhase = false; + } } @Override @@ -56,4 +73,28 @@ public IncrementalSplit toSourceSplit() { getStopOffset(), incrementalSplit.getCompletedSnapshotSplitInfos()); } + + public synchronized boolean markEnterPureIncrementPhaseIfNeed(Offset currentRecordPosition) { + if (enterPureIncrementPhase) { + return false; + } + + if (currentRecordPosition.isAtOrAfter(maxSnapshotSplitsHighWatermark)) { + split.asIncrementalSplit().getCompletedSnapshotSplitInfos().clear(); + this.enterPureIncrementPhase = true; + return true; + } + + return false; + } + + public synchronized boolean autoEnterPureIncrementPhaseIfAllowed() { + if (!enterPureIncrementPhase + && maxSnapshotSplitsHighWatermark.compareTo(startupOffset) == 0) { + split.asIncrementalSplit().getCompletedSnapshotSplitInfos().clear(); + enterPureIncrementPhase = true; + return true; + } + return false; + } } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/utils/SourceRecordUtils.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/utils/SourceRecordUtils.java index abbdb5b76a8..e06213b06d5 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/utils/SourceRecordUtils.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/utils/SourceRecordUtils.java @@ -47,6 +47,8 @@ private SourceRecordUtils() {} public static final String SCHEMA_CHANGE_EVENT_KEY_NAME = "io.debezium.connector.mysql.SchemaChangeKey"; + public static final String HEARTBEAT_VALUE_SCHEMA_KEY_NAME = + "io.debezium.connector.common.Heartbeat"; private static final DocumentReader DOCUMENT_READER = DocumentReader.defaultReader(); /** Converts a {@link ResultSet} row to an array of Objects. */ @@ -105,6 +107,11 @@ public static boolean isDataChangeRecord(SourceRecord record) { && value.getString(Envelope.FieldName.OPERATION) != null; } + public static boolean isHeartbeatRecord(SourceRecord record) { + Schema valueSchema = record.valueSchema(); + return valueSchema != null && valueSchema.name().equals(HEARTBEAT_VALUE_SCHEMA_KEY_NAME); + } + public static TableId getTableId(SourceRecord dataRecord) { Struct value = (Struct) dataRecord.value(); Struct source = value.getStruct(Envelope.FieldName.SOURCE); diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/HybridSplitAssignerTest.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/HybridSplitAssignerTest.java new file mode 100644 index 00000000000..2c931eb9e43 --- /dev/null +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/source/enumerator/HybridSplitAssignerTest.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.cdc.base.source.enumerator; + +import org.apache.seatunnel.connectors.cdc.base.source.enumerator.state.HybridPendingSplitsState; +import org.apache.seatunnel.connectors.cdc.base.source.enumerator.state.SnapshotPhaseState; +import org.apache.seatunnel.connectors.cdc.base.source.event.SnapshotSplitWatermark; +import org.apache.seatunnel.connectors.cdc.base.source.split.SnapshotSplit; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import io.debezium.relational.TableId; + +import java.util.AbstractMap; +import java.util.Arrays; +import java.util.Collections; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public class HybridSplitAssignerTest { + @Test + public void testCompletedSnapshotPhase() { + Map assignedSplits = createAssignedSplits(); + Map splitCompletedOffsets = createSplitCompletedOffsets(); + SnapshotPhaseState snapshotPhaseState = + new SnapshotPhaseState( + Collections.emptyList(), + Collections.emptyList(), + assignedSplits, + splitCompletedOffsets, + true, + Collections.emptyList(), + false, + false); + HybridPendingSplitsState checkpointState = + new HybridPendingSplitsState(snapshotPhaseState, null); + SplitAssigner.Context context = + new SplitAssigner.Context<>( + null, + Collections.emptySet(), + checkpointState.getSnapshotPhaseState().getAssignedSplits(), + checkpointState.getSnapshotPhaseState().getSplitCompletedOffsets()); + HybridSplitAssigner splitAssigner = + new HybridSplitAssigner<>(context, 1, 1, checkpointState, null, null); + splitAssigner.getIncrementalSplitAssigner().setSplitAssigned(true); + + Assertions.assertFalse( + splitAssigner.completedSnapshotPhase(Arrays.asList(TableId.parse("db1.table1")))); + Assertions.assertFalse( + splitAssigner.getSnapshotSplitAssigner().getAssignedSplits().isEmpty()); + Assertions.assertFalse( + splitAssigner.getSnapshotSplitAssigner().getSplitCompletedOffsets().isEmpty()); + Assertions.assertFalse(context.getAssignedSnapshotSplit().isEmpty()); + Assertions.assertFalse(context.getSplitCompletedOffsets().isEmpty()); + + Assertions.assertTrue( + splitAssigner.completedSnapshotPhase(Arrays.asList(TableId.parse("db1.table2")))); + Assertions.assertTrue( + splitAssigner.getSnapshotSplitAssigner().getAssignedSplits().isEmpty()); + Assertions.assertTrue( + splitAssigner.getSnapshotSplitAssigner().getSplitCompletedOffsets().isEmpty()); + Assertions.assertTrue(context.getAssignedSnapshotSplit().isEmpty()); + Assertions.assertTrue(context.getSplitCompletedOffsets().isEmpty()); + } + + private static Map createAssignedSplits() { + return Stream.of( + new AbstractMap.SimpleEntry<>( + "db1.table1.1", + new SnapshotSplit( + "db1.table1.1", + TableId.parse("db1.table1"), + null, + null, + null)), + new AbstractMap.SimpleEntry<>( + "db1.table1.2", + new SnapshotSplit( + "db1.table1.2", + TableId.parse("db1.table1"), + null, + null, + null)), + new AbstractMap.SimpleEntry<>( + "db1.table2.1", + new SnapshotSplit( + "db1.table2.1", + TableId.parse("db1.table2"), + null, + null, + null)), + new AbstractMap.SimpleEntry<>( + "db1.table2.2", + new SnapshotSplit( + "db1.table2.2", + TableId.parse("db1.table2"), + null, + null, + null))) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } + + private static Map createSplitCompletedOffsets() { + return Stream.of( + new AbstractMap.SimpleEntry<>( + "db1.table1.1", new SnapshotSplitWatermark(null, null, null)), + new AbstractMap.SimpleEntry<>( + "db1.table1.2", new SnapshotSplitWatermark(null, null, null)), + new AbstractMap.SimpleEntry<>( + "db1.table2.1", new SnapshotSplitWatermark(null, null, null)), + new AbstractMap.SimpleEntry<>( + "db1.table2.2", new SnapshotSplitWatermark(null, null, null))) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } +} diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceStreamFetcherTest.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceStreamFetcherTest.java new file mode 100644 index 00000000000..64ac4f4a0c1 --- /dev/null +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/source/reader/external/IncrementalSourceStreamFetcherTest.java @@ -0,0 +1,367 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.cdc.base.source.reader.external; + +import org.apache.seatunnel.connectors.cdc.base.schema.SchemaChangeResolver; +import org.apache.seatunnel.connectors.cdc.base.source.split.SourceRecords; +import org.apache.seatunnel.connectors.cdc.base.source.split.wartermark.WatermarkEvent; +import org.apache.seatunnel.connectors.cdc.base.utils.SourceRecordUtils; + +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaBuilder; +import org.apache.kafka.connect.data.Struct; +import org.apache.kafka.connect.source.SourceRecord; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import io.debezium.data.Envelope; +import io.debezium.heartbeat.Heartbeat; +import io.debezium.pipeline.DataChangeEvent; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.atomic.AtomicReference; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +public class IncrementalSourceStreamFetcherTest { + + @Test + public void testSplitSchemaChangeStream() throws Exception { + IncrementalSourceStreamFetcher fetcher = createFetcher(); + + List inputEvents = new ArrayList<>(); + List records = new ArrayList<>(); + inputEvents.add(new DataChangeEvent(createDataEvent())); + inputEvents.add(new DataChangeEvent(createDataEvent())); + Iterator outputEvents = fetcher.splitSchemaChangeStream(inputEvents); + outputEvents.forEachRemaining(records::add); + + Assertions.assertEquals(1, records.size()); + Assertions.assertEquals(2, records.get(0).getSourceRecordList().size()); + Assertions.assertTrue( + SourceRecordUtils.isDataChangeRecord(records.get(0).getSourceRecordList().get(0))); + Assertions.assertTrue( + SourceRecordUtils.isDataChangeRecord(records.get(0).getSourceRecordList().get(1))); + + inputEvents = new ArrayList<>(); + records = new ArrayList<>(); + inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); + inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); + outputEvents = fetcher.splitSchemaChangeStream(inputEvents); + outputEvents.forEachRemaining(records::add); + + Assertions.assertEquals(2, records.size()); + Assertions.assertEquals(1, records.get(0).getSourceRecordList().size()); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( + records.get(0).getSourceRecordList().get(0))); + Assertions.assertEquals(3, records.get(1).getSourceRecordList().size()); + Assertions.assertTrue( + SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(0))); + Assertions.assertTrue( + SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(1))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeAfterWatermarkEvent( + records.get(1).getSourceRecordList().get(2))); + + inputEvents = new ArrayList<>(); + records = new ArrayList<>(); + inputEvents.add(new DataChangeEvent(createDataEvent())); + inputEvents.add(new DataChangeEvent(createDataEvent())); + inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); + inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); + outputEvents = fetcher.splitSchemaChangeStream(inputEvents); + outputEvents.forEachRemaining(records::add); + + Assertions.assertEquals(2, records.size()); + Assertions.assertEquals(3, records.get(0).getSourceRecordList().size()); + Assertions.assertEquals(3, records.get(1).getSourceRecordList().size()); + Assertions.assertTrue( + SourceRecordUtils.isDataChangeRecord(records.get(0).getSourceRecordList().get(0))); + Assertions.assertTrue( + SourceRecordUtils.isDataChangeRecord(records.get(0).getSourceRecordList().get(1))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( + records.get(0).getSourceRecordList().get(2))); + Assertions.assertTrue( + SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(0))); + Assertions.assertTrue( + SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(1))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeAfterWatermarkEvent( + records.get(1).getSourceRecordList().get(2))); + + inputEvents = new ArrayList<>(); + records = new ArrayList<>(); + inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); + inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); + inputEvents.add(new DataChangeEvent(createDataEvent())); + inputEvents.add(new DataChangeEvent(createDataEvent())); + outputEvents = fetcher.splitSchemaChangeStream(inputEvents); + outputEvents.forEachRemaining(records::add); + + Assertions.assertEquals(3, records.size()); + Assertions.assertEquals(1, records.get(0).getSourceRecordList().size()); + Assertions.assertEquals(3, records.get(1).getSourceRecordList().size()); + Assertions.assertEquals(2, records.get(2).getSourceRecordList().size()); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( + records.get(0).getSourceRecordList().get(0))); + Assertions.assertTrue( + SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(0))); + Assertions.assertTrue( + SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(1))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeAfterWatermarkEvent( + records.get(1).getSourceRecordList().get(2))); + Assertions.assertTrue( + SourceRecordUtils.isDataChangeRecord(records.get(2).getSourceRecordList().get(0))); + Assertions.assertTrue( + SourceRecordUtils.isDataChangeRecord(records.get(2).getSourceRecordList().get(1))); + + inputEvents = new ArrayList<>(); + records = new ArrayList<>(); + inputEvents.add(new DataChangeEvent(createDataEvent())); + inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); + inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); + inputEvents.add(new DataChangeEvent(createDataEvent())); + outputEvents = fetcher.splitSchemaChangeStream(inputEvents); + outputEvents.forEachRemaining(records::add); + + Assertions.assertEquals(3, records.size()); + Assertions.assertEquals(2, records.get(0).getSourceRecordList().size()); + Assertions.assertEquals(3, records.get(1).getSourceRecordList().size()); + Assertions.assertEquals(1, records.get(2).getSourceRecordList().size()); + Assertions.assertTrue( + SourceRecordUtils.isDataChangeRecord(records.get(0).getSourceRecordList().get(0))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( + records.get(0).getSourceRecordList().get(1))); + Assertions.assertTrue( + SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(0))); + Assertions.assertTrue( + SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(1))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeAfterWatermarkEvent( + records.get(1).getSourceRecordList().get(2))); + Assertions.assertTrue( + SourceRecordUtils.isDataChangeRecord(records.get(2).getSourceRecordList().get(0))); + + inputEvents = new ArrayList<>(); + records = new ArrayList<>(); + inputEvents.add(new DataChangeEvent(createDataEvent())); + inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); + inputEvents.add(new DataChangeEvent(createDataEvent())); + inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); + outputEvents = fetcher.splitSchemaChangeStream(inputEvents); + outputEvents.forEachRemaining(records::add); + + Assertions.assertEquals(4, records.size()); + Assertions.assertEquals(2, records.get(0).getSourceRecordList().size()); + Assertions.assertEquals(2, records.get(1).getSourceRecordList().size()); + Assertions.assertEquals(2, records.get(2).getSourceRecordList().size()); + Assertions.assertEquals(2, records.get(3).getSourceRecordList().size()); + Assertions.assertTrue( + SourceRecordUtils.isDataChangeRecord(records.get(0).getSourceRecordList().get(0))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( + records.get(0).getSourceRecordList().get(1))); + Assertions.assertTrue( + SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(0))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeAfterWatermarkEvent( + records.get(1).getSourceRecordList().get(1))); + Assertions.assertTrue( + SourceRecordUtils.isDataChangeRecord(records.get(2).getSourceRecordList().get(0))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( + records.get(2).getSourceRecordList().get(1))); + Assertions.assertTrue( + SourceRecordUtils.isSchemaChangeEvent(records.get(3).getSourceRecordList().get(0))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeAfterWatermarkEvent( + records.get(3).getSourceRecordList().get(1))); + + inputEvents = new ArrayList<>(); + records = new ArrayList<>(); + inputEvents.add(new DataChangeEvent(createHeartbeatEvent())); + inputEvents.add(new DataChangeEvent(createDataEvent())); + inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); + inputEvents.add(new DataChangeEvent(createHeartbeatEvent())); + inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); + inputEvents.add(new DataChangeEvent(createDataEvent())); + inputEvents.add(new DataChangeEvent(createDataEvent())); + inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); + inputEvents.add(new DataChangeEvent(createHeartbeatEvent())); + inputEvents.add(new DataChangeEvent(createDataEvent())); + inputEvents.add(new DataChangeEvent(createHeartbeatEvent())); + inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); + inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); + inputEvents.add(new DataChangeEvent(createHeartbeatEvent())); + inputEvents.add(new DataChangeEvent(createDataEvent())); + inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); + inputEvents.add(new DataChangeEvent(createDataEvent())); + inputEvents.add(new DataChangeEvent(createHeartbeatEvent())); + outputEvents = fetcher.splitSchemaChangeStream(inputEvents); + outputEvents.forEachRemaining(records::add); + + Assertions.assertEquals(11, records.size()); + Assertions.assertEquals(3, records.get(0).getSourceRecordList().size()); + Assertions.assertTrue( + SourceRecordUtils.isHeartbeatRecord(records.get(0).getSourceRecordList().get(0))); + Assertions.assertTrue( + SourceRecordUtils.isDataChangeRecord(records.get(0).getSourceRecordList().get(1))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( + records.get(0).getSourceRecordList().get(2))); + Assertions.assertEquals(2, records.get(1).getSourceRecordList().size()); + Assertions.assertTrue( + SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(0))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeAfterWatermarkEvent( + records.get(1).getSourceRecordList().get(1))); + Assertions.assertEquals(2, records.get(2).getSourceRecordList().size()); + Assertions.assertTrue( + SourceRecordUtils.isHeartbeatRecord(records.get(2).getSourceRecordList().get(0))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( + records.get(2).getSourceRecordList().get(1))); + Assertions.assertEquals(2, records.get(3).getSourceRecordList().size()); + Assertions.assertTrue( + SourceRecordUtils.isSchemaChangeEvent(records.get(3).getSourceRecordList().get(0))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeAfterWatermarkEvent( + records.get(3).getSourceRecordList().get(1))); + Assertions.assertEquals(3, records.get(4).getSourceRecordList().size()); + Assertions.assertTrue( + SourceRecordUtils.isDataChangeRecord(records.get(4).getSourceRecordList().get(0))); + Assertions.assertTrue( + SourceRecordUtils.isDataChangeRecord(records.get(4).getSourceRecordList().get(1))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( + records.get(4).getSourceRecordList().get(2))); + Assertions.assertEquals(2, records.get(5).getSourceRecordList().size()); + Assertions.assertTrue( + SourceRecordUtils.isSchemaChangeEvent(records.get(5).getSourceRecordList().get(0))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeAfterWatermarkEvent( + records.get(5).getSourceRecordList().get(1))); + Assertions.assertEquals(4, records.get(6).getSourceRecordList().size()); + Assertions.assertTrue( + SourceRecordUtils.isHeartbeatRecord(records.get(6).getSourceRecordList().get(0))); + Assertions.assertTrue( + SourceRecordUtils.isDataChangeRecord(records.get(6).getSourceRecordList().get(1))); + Assertions.assertTrue( + SourceRecordUtils.isHeartbeatRecord(records.get(6).getSourceRecordList().get(2))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( + records.get(6).getSourceRecordList().get(3))); + Assertions.assertEquals(3, records.get(7).getSourceRecordList().size()); + Assertions.assertTrue( + SourceRecordUtils.isSchemaChangeEvent(records.get(7).getSourceRecordList().get(0))); + Assertions.assertTrue( + SourceRecordUtils.isSchemaChangeEvent(records.get(7).getSourceRecordList().get(1))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeAfterWatermarkEvent( + records.get(7).getSourceRecordList().get(2))); + Assertions.assertEquals(3, records.get(8).getSourceRecordList().size()); + Assertions.assertTrue( + SourceRecordUtils.isHeartbeatRecord(records.get(8).getSourceRecordList().get(0))); + Assertions.assertTrue( + SourceRecordUtils.isDataChangeRecord(records.get(8).getSourceRecordList().get(1))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( + records.get(8).getSourceRecordList().get(2))); + Assertions.assertEquals(2, records.get(9).getSourceRecordList().size()); + Assertions.assertTrue( + SourceRecordUtils.isSchemaChangeEvent(records.get(9).getSourceRecordList().get(0))); + Assertions.assertTrue( + WatermarkEvent.isSchemaChangeAfterWatermarkEvent( + records.get(9).getSourceRecordList().get(1))); + Assertions.assertEquals(2, records.get(10).getSourceRecordList().size()); + Assertions.assertTrue( + SourceRecordUtils.isDataChangeRecord(records.get(10).getSourceRecordList().get(0))); + Assertions.assertTrue( + SourceRecordUtils.isHeartbeatRecord(records.get(10).getSourceRecordList().get(1))); + } + + static SourceRecord createSchemaChangeEvent() { + Schema keySchema = + SchemaBuilder.struct().name(SourceRecordUtils.SCHEMA_CHANGE_EVENT_KEY_NAME).build(); + SourceRecord record = + new SourceRecord( + Collections.emptyMap(), + Collections.emptyMap(), + null, + keySchema, + null, + null, + null); + Assertions.assertTrue(SourceRecordUtils.isSchemaChangeEvent(record)); + return record; + } + + static SourceRecord createDataEvent() { + Schema valueSchema = + SchemaBuilder.struct() + .field(Envelope.FieldName.OPERATION, Schema.STRING_SCHEMA) + .build(); + Struct value = new Struct(valueSchema); + value.put(valueSchema.field(Envelope.FieldName.OPERATION), "c"); + SourceRecord record = + new SourceRecord( + Collections.emptyMap(), + Collections.emptyMap(), + null, + null, + null, + valueSchema, + value); + Assertions.assertTrue(SourceRecordUtils.isDataChangeRecord(record)); + return record; + } + + static SourceRecord createHeartbeatEvent() throws InterruptedException { + Heartbeat heartbeat = Heartbeat.create(Duration.ofNanos(1), "test", "test"); + AtomicReference eventRef = new AtomicReference<>(); + heartbeat.forcedBeat( + Collections.singletonMap("heartbeat", "heartbeat"), + Collections.singletonMap("heartbeat", "heartbeat"), + sourceRecord -> eventRef.set(sourceRecord)); + return eventRef.get(); + } + + static IncrementalSourceStreamFetcher createFetcher() { + SchemaChangeResolver schemaChangeResolver = mock(SchemaChangeResolver.class); + when(schemaChangeResolver.support(any())).thenReturn(true); + IncrementalSourceStreamFetcher fetcher = + new IncrementalSourceStreamFetcher(null, 0, schemaChangeResolver); + IncrementalSourceStreamFetcher spy = spy(fetcher); + doReturn(true).when(spy).shouldEmit(any()); + return spy; + } +} diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/source/split/state/IncrementalSplitStateTest.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/source/split/state/IncrementalSplitStateTest.java new file mode 100644 index 00000000000..4a0b40852a2 --- /dev/null +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/source/split/state/IncrementalSplitStateTest.java @@ -0,0 +1,169 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.cdc.base.source.split.state; + +import org.apache.seatunnel.connectors.cdc.base.source.event.SnapshotSplitWatermark; +import org.apache.seatunnel.connectors.cdc.base.source.offset.Offset; +import org.apache.seatunnel.connectors.cdc.base.source.split.CompletedSnapshotSplitInfo; +import org.apache.seatunnel.connectors.cdc.base.source.split.IncrementalSplit; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import io.debezium.relational.TableId; +import lombok.AllArgsConstructor; +import lombok.ToString; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public class IncrementalSplitStateTest { + + @Test + public void testMarkEnterPureIncrementPhaseIfNeed() { + Offset startupOffset = new TestOffset(100); + List snapshotSplits = Collections.emptyList(); + IncrementalSplit split = createIncrementalSplit(startupOffset, snapshotSplits); + IncrementalSplitState splitState = new IncrementalSplitState(split); + Assertions.assertNull(splitState.getMaxSnapshotSplitsHighWatermark()); + Assertions.assertTrue(splitState.isEnterPureIncrementPhase()); + Assertions.assertFalse(splitState.markEnterPureIncrementPhaseIfNeed(null)); + + startupOffset = new TestOffset(100); + snapshotSplits = + Stream.of( + createCompletedSnapshotSplitInfo( + "test1", new TestOffset(100), new TestOffset(100)), + createCompletedSnapshotSplitInfo( + "test2", new TestOffset(100), new TestOffset(100))) + .collect(Collectors.toList()); + split = createIncrementalSplit(startupOffset, snapshotSplits); + splitState = new IncrementalSplitState(split); + Assertions.assertEquals(startupOffset, splitState.getMaxSnapshotSplitsHighWatermark()); + Assertions.assertFalse(splitState.isEnterPureIncrementPhase()); + Assertions.assertFalse(splitState.markEnterPureIncrementPhaseIfNeed(new TestOffset(99))); + Assertions.assertFalse(splitState.isEnterPureIncrementPhase()); + Assertions.assertFalse(snapshotSplits.isEmpty()); + Assertions.assertTrue(splitState.markEnterPureIncrementPhaseIfNeed(new TestOffset(100))); + Assertions.assertTrue(snapshotSplits.isEmpty()); + Assertions.assertFalse(splitState.markEnterPureIncrementPhaseIfNeed(new TestOffset(100))); + Assertions.assertFalse(splitState.markEnterPureIncrementPhaseIfNeed(new TestOffset(101))); + + startupOffset = new TestOffset(100); + snapshotSplits = + Stream.of( + createCompletedSnapshotSplitInfo( + "test1", new TestOffset(1), new TestOffset(50)), + createCompletedSnapshotSplitInfo( + "test2", new TestOffset(50), new TestOffset(200))) + .collect(Collectors.toList()); + split = createIncrementalSplit(startupOffset, snapshotSplits); + splitState = new IncrementalSplitState(split); + Assertions.assertEquals( + new TestOffset(200), splitState.getMaxSnapshotSplitsHighWatermark()); + Assertions.assertFalse(splitState.isEnterPureIncrementPhase()); + Assertions.assertTrue(splitState.markEnterPureIncrementPhaseIfNeed(new TestOffset(201))); + Assertions.assertTrue(splitState.isEnterPureIncrementPhase()); + Assertions.assertTrue(snapshotSplits.isEmpty()); + Assertions.assertFalse(splitState.markEnterPureIncrementPhaseIfNeed(new TestOffset(200))); + Assertions.assertTrue(splitState.isEnterPureIncrementPhase()); + Assertions.assertFalse(splitState.markEnterPureIncrementPhaseIfNeed(new TestOffset(201))); + Assertions.assertFalse(splitState.markEnterPureIncrementPhaseIfNeed(new TestOffset(202))); + } + + @Test + public void testAutoEnterPureIncrementPhaseIfAllowed() { + Offset startupOffset = new TestOffset(100); + List snapshotSplits = Collections.emptyList(); + IncrementalSplit split = createIncrementalSplit(startupOffset, snapshotSplits); + IncrementalSplitState splitState = new IncrementalSplitState(split); + Assertions.assertTrue(splitState.isEnterPureIncrementPhase()); + Assertions.assertFalse(splitState.autoEnterPureIncrementPhaseIfAllowed()); + + startupOffset = new TestOffset(100); + snapshotSplits = + Stream.of( + createCompletedSnapshotSplitInfo( + "test1", new TestOffset(100), new TestOffset(100)), + createCompletedSnapshotSplitInfo( + "test2", new TestOffset(100), new TestOffset(100))) + .collect(Collectors.toList()); + split = createIncrementalSplit(startupOffset, snapshotSplits); + splitState = new IncrementalSplitState(split); + + Assertions.assertFalse(splitState.isEnterPureIncrementPhase()); + Assertions.assertTrue(splitState.autoEnterPureIncrementPhaseIfAllowed()); + Assertions.assertTrue(splitState.isEnterPureIncrementPhase()); + Assertions.assertFalse(splitState.autoEnterPureIncrementPhaseIfAllowed()); + Assertions.assertTrue(splitState.isEnterPureIncrementPhase()); + + startupOffset = new TestOffset(100); + snapshotSplits = + Stream.of( + createCompletedSnapshotSplitInfo( + "test1", new TestOffset(100), new TestOffset(100)), + createCompletedSnapshotSplitInfo( + "test2", new TestOffset(100), new TestOffset(101))) + .collect(Collectors.toList()); + split = createIncrementalSplit(startupOffset, snapshotSplits); + splitState = new IncrementalSplitState(split); + Assertions.assertFalse(splitState.isEnterPureIncrementPhase()); + Assertions.assertFalse(splitState.autoEnterPureIncrementPhaseIfAllowed()); + } + + private static IncrementalSplit createIncrementalSplit( + Offset startupOffset, List snapshotSplits) { + return new IncrementalSplit( + "test", + Arrays.asList(new TableId("db", "schema", "table")), + startupOffset, + null, + snapshotSplits, + null); + } + + private static CompletedSnapshotSplitInfo createCompletedSnapshotSplitInfo( + String splitId, Offset lowWatermark, Offset highWatermark) { + return new CompletedSnapshotSplitInfo( + splitId, + new TableId("db", "schema", "table"), + null, + null, + null, + new SnapshotSplitWatermark(null, lowWatermark, highWatermark)); + } + + @ToString + @AllArgsConstructor + static class TestOffset extends Offset { + private int offset; + + @Override + public int compareTo(Offset o) { + return Integer.compare(offset, ((TestOffset) o).offset); + } + + @Override + public boolean equals(Object o) { + return o instanceof TestOffset && offset == ((TestOffset) o).offset; + } + } +} diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/io/debezium/connector/oracle/logminer/LogMinerStreamingChangeEventSource.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/io/debezium/connector/oracle/logminer/LogMinerStreamingChangeEventSource.java index 4ce4d17a028..0e9e9368657 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/io/debezium/connector/oracle/logminer/LogMinerStreamingChangeEventSource.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-oracle/src/main/java/io/debezium/connector/oracle/logminer/LogMinerStreamingChangeEventSource.java @@ -241,6 +241,7 @@ public void execute(ChangeEventSourceContext context, OracleOffsetContext offset // log before proceeding. if (archiveLogOnlyMode && startScn.equals(endScn)) { pauseBetweenMiningSessions(); + dispatcher.dispatchHeartbeatEvent(offsetContext); continue; } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-sqlserver/src/main/java/io/debezium/connector/sqlserver/SqlServerStreamingChangeEventSource.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-sqlserver/src/main/java/io/debezium/connector/sqlserver/SqlServerStreamingChangeEventSource.java index 3053598c664..05eba0d96f9 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-sqlserver/src/main/java/io/debezium/connector/sqlserver/SqlServerStreamingChangeEventSource.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-sqlserver/src/main/java/io/debezium/connector/sqlserver/SqlServerStreamingChangeEventSource.java @@ -181,6 +181,7 @@ public void execute( && shouldIncreaseFromLsn) { LOGGER.debug("No change in the database"); metronome.pause(); + dispatcher.dispatchHeartbeatEvent(offsetContext); continue; } diff --git a/seatunnel-connectors-v2/connector-common/src/main/java/org/apache/seatunnel/connectors/seatunnel/common/source/reader/SourceReaderBase.java b/seatunnel-connectors-v2/connector-common/src/main/java/org/apache/seatunnel/connectors/seatunnel/common/source/reader/SourceReaderBase.java index 668ed78dcac..29dd2ff6f5e 100644 --- a/seatunnel-connectors-v2/connector-common/src/main/java/org/apache/seatunnel/connectors/seatunnel/common/source/reader/SourceReaderBase.java +++ b/seatunnel-connectors-v2/connector-common/src/main/java/org/apache/seatunnel/connectors/seatunnel/common/source/reader/SourceReaderBase.java @@ -26,6 +26,7 @@ import org.apache.seatunnel.connectors.seatunnel.common.source.reader.fetcher.SplitFetcherManager; import org.apache.seatunnel.connectors.seatunnel.common.source.reader.splitreader.SplitReader; +import lombok.Getter; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; @@ -62,7 +63,7 @@ public abstract class SourceReaderBase currentFetch; - private SplitContext currentSplitContext; + protected SplitContext currentSplitContext; private Collector currentSplitOutput; private boolean noMoreSplitsAssignment; @@ -234,9 +235,9 @@ public int getNumberOfCurrentlyAssignedSplits() { protected abstract SplitT toSplitType(String splitId, SplitStateT splitState); @RequiredArgsConstructor - private static final class SplitContext { + protected static final class SplitContext { final String splitId; - final SplitStateT state; + @Getter final SplitStateT state; Collector splitOutput; Collector getOrCreateSplitOutput(Collector output) { From 80f392afbb654145ea3188ba51b1f4bafcd2ef5b Mon Sep 17 00:00:00 2001 From: L-Gryps <42635285+L-Gryps@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:42:52 +0800 Subject: [PATCH 53/59] [feature][connector-v2] add xugudb connector (#6561) --- .github/workflows/backend.yml | 2 +- docs/en/connector-v2/sink/Jdbc.md | 1 + docs/en/connector-v2/source/Jdbc.md | 1 + .../connector-jdbc/pom.xml | 7 + .../jdbc/catalog/xugu/XuguCatalog.java | 266 +++++++ .../jdbc/catalog/xugu/XuguCatalogFactory.java | 63 ++ .../xugu/XuguCreateTableSqlBuilder.java | 141 ++++ .../internal/dialect/DatabaseIdentifier.java | 1 + .../internal/dialect/xugu/XuguDialect.java | 231 ++++++ .../dialect/xugu/XuguDialectFactory.java | 45 ++ .../dialect/xugu/XuguJdbcRowConverter.java | 29 + .../dialect/xugu/XuguTypeConverter.java | 385 ++++++++++ .../internal/dialect/xugu/XuguTypeMapper.java | 63 ++ .../dialect/xugu/XuguTypeConverterTest.java | 660 ++++++++++++++++++ .../src/main/assembly/assembly-bin-ci.xml | 1 + .../connector-jdbc-e2e-part-7/pom.xml | 5 + .../connectors/seatunnel/jdbc/JdbcXuguIT.java | 246 +++++++ .../resources/jdbc_xugu_source_and_sink.conf | 47 ++ .../jdbc_xugu_source_and_upsert_sink.conf | 48 ++ 19 files changed, 2241 insertions(+), 1 deletion(-) create mode 100644 seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/xugu/XuguCatalog.java create mode 100644 seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/xugu/XuguCatalogFactory.java create mode 100644 seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/xugu/XuguCreateTableSqlBuilder.java create mode 100644 seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguDialect.java create mode 100644 seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguDialectFactory.java create mode 100644 seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguJdbcRowConverter.java create mode 100644 seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguTypeConverter.java create mode 100644 seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguTypeMapper.java create mode 100644 seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguTypeConverterTest.java create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-7/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/JdbcXuguIT.java create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-7/src/test/resources/jdbc_xugu_source_and_sink.conf create mode 100644 seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-7/src/test/resources/jdbc_xugu_source_and_upsert_sink.conf diff --git a/.github/workflows/backend.yml b/.github/workflows/backend.yml index 519cf8533d2..9975d477dae 100644 --- a/.github/workflows/backend.yml +++ b/.github/workflows/backend.yml @@ -997,7 +997,7 @@ jobs: java-version: ${{ matrix.java }} distribution: 'temurin' cache: 'maven' - - name: run jdbc connectors integration test (part-6) + - name: run jdbc connectors integration test (part-7) if: needs.changes.outputs.api == 'true' run: | ./mvnw -B -T 1 verify -DskipUT=true -DskipIT=false -D"license.skipAddThirdParty"=true --no-snapshot-updates -pl :connector-jdbc-e2e-part-7 -am -Pci diff --git a/docs/en/connector-v2/sink/Jdbc.md b/docs/en/connector-v2/sink/Jdbc.md index f0b74414a4e..c2591761ec5 100644 --- a/docs/en/connector-v2/sink/Jdbc.md +++ b/docs/en/connector-v2/sink/Jdbc.md @@ -235,6 +235,7 @@ there are some reference value for params above. | Vertica | com.vertica.jdbc.Driver | jdbc:vertica://localhost:5433 | / | https://repo1.maven.org/maven2/com/vertica/jdbc/vertica-jdbc/12.0.3-0/vertica-jdbc-12.0.3-0.jar | | Kingbase | com.kingbase8.Driver | jdbc:kingbase8://localhost:54321/db_test | / | https://repo1.maven.org/maven2/cn/com/kingbase/kingbase8/8.6.0/kingbase8-8.6.0.jar | | OceanBase | com.oceanbase.jdbc.Driver | jdbc:oceanbase://localhost:2881 | / | https://repo1.maven.org/maven2/com/oceanbase/oceanbase-client/2.4.3/oceanbase-client-2.4.3.jar | +| xugu | com.xugu.cloudjdbc.Driver | jdbc:xugu://localhost:5138 | / | https://repo1.maven.org/maven2/com/xugudb/xugu-jdbc/12.2.0/xugu-jdbc-12.2.0.jar | ## Example diff --git a/docs/en/connector-v2/source/Jdbc.md b/docs/en/connector-v2/source/Jdbc.md index 09c3ab636d7..225576001d7 100644 --- a/docs/en/connector-v2/source/Jdbc.md +++ b/docs/en/connector-v2/source/Jdbc.md @@ -227,6 +227,7 @@ there are some reference value for params above. | Kingbase | com.kingbase8.Driver | jdbc:kingbase8://localhost:54321/db_test | https://repo1.maven.org/maven2/cn/com/kingbase/kingbase8/8.6.0/kingbase8-8.6.0.jar | | OceanBase | com.oceanbase.jdbc.Driver | jdbc:oceanbase://localhost:2881 | https://repo1.maven.org/maven2/com/oceanbase/oceanbase-client/2.4.3/oceanbase-client-2.4.3.jar | | Hive | org.apache.hive.jdbc.HiveDriver | jdbc:hive2://localhost:10000 | https://repo1.maven.org/maven2/org/apache/hive/hive-jdbc/3.1.3/hive-jdbc-3.1.3-standalone.jar | +| xugu | com.xugu.cloudjdbc.Driver | jdbc:xugu://localhost:5138 | https://repo1.maven.org/maven2/com/xugudb/xugu-jdbc/12.2.0/xugu-jdbc-12.2.0.jar | ## Example diff --git a/seatunnel-connectors-v2/connector-jdbc/pom.xml b/seatunnel-connectors-v2/connector-jdbc/pom.xml index 5880036c90f..db8c95dd0fd 100644 --- a/seatunnel-connectors-v2/connector-jdbc/pom.xml +++ b/seatunnel-connectors-v2/connector-jdbc/pom.xml @@ -50,6 +50,7 @@ 8.6.0 3.1.3 2.4.3 + 12.2.0 @@ -188,6 +189,12 @@ ${oceanbase.jdbc.version} provided + + com.xugudb + xugu-jdbc + ${xugu.jdbc.version} + provided + diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/xugu/XuguCatalog.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/xugu/XuguCatalog.java new file mode 100644 index 00000000000..462e109c76a --- /dev/null +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/xugu/XuguCatalog.java @@ -0,0 +1,266 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.xugu; + +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.Column; +import org.apache.seatunnel.api.table.catalog.ConstraintKey; +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.api.table.catalog.exception.CatalogException; +import org.apache.seatunnel.api.table.catalog.exception.DatabaseNotExistException; +import org.apache.seatunnel.api.table.converter.BasicTypeDefine; +import org.apache.seatunnel.common.utils.JdbcUrlUtil; +import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.AbstractJdbcCatalog; +import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.utils.CatalogUtils; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.xugu.XuguTypeConverter; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.xugu.XuguTypeMapper; + +import org.apache.commons.lang3.StringUtils; + +import lombok.extern.slf4j.Slf4j; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +@Slf4j +public class XuguCatalog extends AbstractJdbcCatalog { + + protected static List EXCLUDED_SCHEMAS = + Collections.unmodifiableList(Arrays.asList("GUEST", "SYSAUDITOR", "SYSSSO")); + + private static final String SELECT_COLUMNS_SQL_TEMPLATE = + "SELECT\n" + + " dc.COLUMN_NAME,\n" + + " CASE\n" + + " WHEN dc.TYPE_NAME LIKE 'INTERVAL%%' THEN 'INTERVAL' ELSE REGEXP_SUBSTR(dc.TYPE_NAME, '^[^(]+')\n" + + " END AS TYPE_NAME,\n" + + " dc.TYPE_NAME ||\n" + + " CASE\n" + + " WHEN dc.TYPE_NAME IN ('VARCHAR', 'CHAR') THEN '(' || dc.COLUMN_LENGTH || ')'\n" + + " WHEN dc.TYPE_NAME IN ('NUMERIC') AND dc.COLUMN_PRECISION IS NOT NULL AND dc.COLUMN_SCALE IS NOT NULL THEN '(' || dc.COLUMN_PRECISION || ', ' || dc.COLUMN_SCALE || ')'\n" + + " WHEN dc.TYPE_NAME IN ('NUMERIC') AND dc.COLUMN_PRECISION IS NOT NULL AND dc.COLUMN_SCALE IS NULL THEN '(' || dc.COLUMN_PRECISION || ')'\n" + + " WHEN dc.TYPE_NAME IN ('TIMESTAMP') THEN '(' || dc.COLUMN_SCALE || ')'\n" + + " END AS FULL_TYPE_NAME,\n" + + " dc.COLUMN_LENGTH,\n" + + " dc.COLUMN_PRECISION,\n" + + " dc.COLUMN_SCALE,\n" + + " dc.COLUMN_COMMENT,\n" + + " dc.DEFAULT_VALUE,\n" + + " CASE\n" + + " dc.IS_NULLABLE WHEN TRUE THEN 'NO' ELSE 'YES'\n" + + " END AS IS_NULLABLE\n" + + "FROM\n" + + " (\n" + + " SELECT\n" + + " c.col_name AS COLUMN_NAME,\n" + + " CASE\n" + + " WHEN c.type_name = 'CHAR' AND c.\"VARYING\" = TRUE THEN 'VARCHAR'\n" + + " WHEN c.type_name = 'DATETIME' AND c.TIMESTAMP_T = 'i' THEN 'TIMESTAMP' ELSE c.type_name\n" + + " END AS TYPE_NAME,\n" + + " DECODE(c.type_name,\n" + + " 'TINYINT', 1, 'SMALLINT', 2,\n" + + " 'INTEGER', 4, 'BIGINT', 8,\n" + + " 'FLOAT', 4, 'DOUBLE', 8,\n" + + " 'NUMERIC', 17,\n" + + " 'CHAR', DECODE(c.scale, -1, 60000, c.scale),\n" + + " 'DATE', 4, 'DATETIME', 8,\n" + + " 'TIMESTAMP', 8, 'DATETIME WITH TIME ZONE', 8,\n" + + " 'TIME', 4, 'TIME WITH TIME ZONE', 4,\n" + + " 'INTERVAL YEAR', 4, 'INTERVAL MONTH', 4,\n" + + " 'INTERVAL DAY', 4, 'INTERVAL HOUR', 4,\n" + + " 'INTERVAL MINUTE', 4, 'INTERVAL SECOND', 8,\n" + + " 'INTERVAL YEAR TO MONTH', 4,\n" + + " 'INTERVAL DAY TO HOUR', 4,\n" + + " 'INTERVAL DAY TO MINUTE', 4,\n" + + " 'INTERVAL DAY TO SECOND', 8,\n" + + " 'INTERVAL HOUR TO MINUTE', 4,\n" + + " 'INTERVAL HOUR TO SECOND', 8,\n" + + " 'INTERVAL MINUTE TO SECOND', 8,\n" + + " 'CLOB', 2147483648,\n" + + " 'BLOB', 2147483648, 'BINARY', 2147483648,\n" + + " 'GUID', 2, 'BOOLEAN', 1,\n" + + " 'ROWVERSION', 8, 'ROWID', 10, NULL) AS COLUMN_LENGTH,\n" + + " DECODE(TRUNC(c.scale / 65536), 0, NULL, TRUNC(c.scale / 65536)::INTEGER) AS COLUMN_PRECISION,\n" + + " DECODE(DECODE(c.type_name, 'CHAR',-1, c.scale),-1, NULL, MOD(c.scale, 65536)) AS COLUMN_SCALE,\n" + + " c.comments AS COLUMN_COMMENT,\n" + + " c.DEF_VAL AS DEFAULT_VALUE,\n" + + " c.NOT_NULl AS IS_NULLABLE\n" + + " FROM\n" + + " dba_columns c\n" + + " LEFT JOIN dba_tables tab ON\n" + + " c.db_id = tab.db_id\n" + + " AND c.table_id = tab.table_id\n" + + " LEFT JOIN dba_schemas sc ON\n" + + " tab.schema_id = sc.schema_id\n" + + " AND tab.db_id = sc.db_id\n" + + " WHERE\n" + + " sc.schema_name = '%s'\n" + + " AND tab.table_name = '%s'\n" + + ") AS dc \n"; + + public XuguCatalog( + String catalogName, + String username, + String pwd, + JdbcUrlUtil.UrlInfo urlInfo, + String defaultSchema) { + super(catalogName, username, pwd, urlInfo, defaultSchema); + } + + @Override + protected String getListDatabaseSql() { + return "SELECT DB_NAME FROM dba_databases"; + } + + @Override + protected String getCreateTableSql(TablePath tablePath, CatalogTable table) { + return new XuguCreateTableSqlBuilder(table).build(tablePath); + } + + @Override + protected String getDropTableSql(TablePath tablePath) { + return String.format("DROP TABLE %s", tablePath.getSchemaAndTableName("\"")); + } + + @Override + protected String getCreateDatabaseSql(String databaseName) { + return String.format("CREATE DATABASE \"%s\"", databaseName); + } + + @Override + protected String getDropDatabaseSql(String databaseName) { + return String.format("DROP DATABASE \"%s\"", databaseName); + } + + @Override + protected String getListTableSql(String databaseName) { + return "SELECT user_name ,table_name FROM all_users au \n" + + "INNER JOIN all_tables at ON au.user_id=at.user_id AND au.db_id=at.db_id"; + } + + @Override + protected String getTableName(ResultSet rs) throws SQLException { + if (EXCLUDED_SCHEMAS.contains(rs.getString(1))) { + return null; + } + return rs.getString(1) + "." + rs.getString(2); + } + + @Override + protected String getSelectColumnsSql(TablePath tablePath) { + return String.format( + SELECT_COLUMNS_SQL_TEMPLATE, tablePath.getSchemaName(), tablePath.getTableName()); + } + + @Override + protected Column buildColumn(ResultSet resultSet) throws SQLException { + String columnName = resultSet.getString("COLUMN_NAME"); + String typeName = resultSet.getString("TYPE_NAME"); + String fullTypeName = resultSet.getString("FULL_TYPE_NAME"); + long columnLength = resultSet.getLong("COLUMN_LENGTH"); + Long columnPrecision = resultSet.getObject("COLUMN_PRECISION", Long.class); + Integer columnScale = resultSet.getObject("COLUMN_SCALE", Integer.class); + String columnComment = resultSet.getString("COLUMN_COMMENT"); + Object defaultValue = resultSet.getObject("DEFAULT_VALUE"); + boolean isNullable = resultSet.getString("IS_NULLABLE").equals("YES"); + + BasicTypeDefine typeDefine = + BasicTypeDefine.builder() + .name(columnName) + .columnType(fullTypeName) + .dataType(typeName) + .length(columnLength) + .precision(columnPrecision) + .scale(columnScale) + .nullable(isNullable) + .defaultValue(defaultValue) + .comment(columnComment) + .build(); + return XuguTypeConverter.INSTANCE.convert(typeDefine); + } + + @Override + protected String getUrlFromDatabaseName(String databaseName) { + return defaultUrl; + } + + @Override + protected String getOptionTableName(TablePath tablePath) { + return tablePath.getSchemaAndTableName(); + } + + @Override + public boolean tableExists(TablePath tablePath) throws CatalogException { + try { + if (StringUtils.isNotBlank(tablePath.getDatabaseName())) { + return databaseExists(tablePath.getDatabaseName()) + && listTables(tablePath.getDatabaseName()) + .contains(tablePath.getSchemaAndTableName()); + } + return listTables().contains(tablePath.getSchemaAndTableName()); + } catch (DatabaseNotExistException e) { + return false; + } + } + + private List listTables() { + List databases = listDatabases(); + return listTables(databases.get(0)); + } + + @Override + public CatalogTable getTable(String sqlQuery) throws SQLException { + Connection defaultConnection = getConnection(defaultUrl); + return CatalogUtils.getCatalogTable(defaultConnection, sqlQuery, new XuguTypeMapper()); + } + + @Override + protected String getTruncateTableSql(TablePath tablePath) { + return String.format( + "TRUNCATE TABLE \"%s\".\"%s\"", + tablePath.getSchemaName(), tablePath.getTableName()); + } + + @Override + protected String getExistDataSql(TablePath tablePath) { + return String.format( + "SELECT * FROM \"%s\".\"%s\" WHERE ROWNUM = 1", + tablePath.getSchemaName(), tablePath.getTableName()); + } + + @Override + protected List getConstraintKeys(DatabaseMetaData metaData, TablePath tablePath) + throws SQLException { + try { + return getConstraintKeys( + metaData, + tablePath.getDatabaseName(), + tablePath.getSchemaName(), + tablePath.getTableName()); + } catch (SQLException e) { + log.info("Obtain constraint failure", e); + return new ArrayList<>(); + } + } +} diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/xugu/XuguCatalogFactory.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/xugu/XuguCatalogFactory.java new file mode 100644 index 00000000000..ac0f3e24ae9 --- /dev/null +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/xugu/XuguCatalogFactory.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.xugu; + +import org.apache.seatunnel.api.configuration.ReadonlyConfig; +import org.apache.seatunnel.api.configuration.util.OptionRule; +import org.apache.seatunnel.api.configuration.util.OptionValidationException; +import org.apache.seatunnel.api.table.catalog.Catalog; +import org.apache.seatunnel.api.table.factory.CatalogFactory; +import org.apache.seatunnel.api.table.factory.Factory; +import org.apache.seatunnel.common.utils.JdbcUrlUtil; +import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.JdbcCatalogOptions; +import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.oracle.OracleURLParser; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.DatabaseIdentifier; + +import com.google.auto.service.AutoService; + +import java.util.Optional; + +@AutoService(Factory.class) +public class XuguCatalogFactory implements CatalogFactory { + + @Override + public String factoryIdentifier() { + return DatabaseIdentifier.XUGU; + } + + @Override + public Catalog createCatalog(String catalogName, ReadonlyConfig options) { + String urlWithDatabase = options.get(JdbcCatalogOptions.BASE_URL); + JdbcUrlUtil.UrlInfo urlInfo = OracleURLParser.parse(urlWithDatabase); + Optional defaultDatabase = urlInfo.getDefaultDatabase(); + if (!defaultDatabase.isPresent()) { + throw new OptionValidationException(JdbcCatalogOptions.BASE_URL); + } + return new XuguCatalog( + catalogName, + options.get(JdbcCatalogOptions.USERNAME), + options.get(JdbcCatalogOptions.PASSWORD), + urlInfo, + options.get(JdbcCatalogOptions.SCHEMA)); + } + + @Override + public OptionRule optionRule() { + return JdbcCatalogOptions.BASE_RULE.build(); + } +} diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/xugu/XuguCreateTableSqlBuilder.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/xugu/XuguCreateTableSqlBuilder.java new file mode 100644 index 00000000000..19bce1a8ca0 --- /dev/null +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/xugu/XuguCreateTableSqlBuilder.java @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.xugu; + +import org.apache.seatunnel.api.table.catalog.CatalogTable; +import org.apache.seatunnel.api.table.catalog.Column; +import org.apache.seatunnel.api.table.catalog.PrimaryKey; +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.utils.CatalogUtils; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.DatabaseIdentifier; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.xugu.XuguTypeConverter; + +import org.apache.commons.lang3.StringUtils; + +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +public class XuguCreateTableSqlBuilder { + + private List columns; + private PrimaryKey primaryKey; + private String sourceCatalogName; + private String fieldIde; + + public XuguCreateTableSqlBuilder(CatalogTable catalogTable) { + this.columns = catalogTable.getTableSchema().getColumns(); + this.primaryKey = catalogTable.getTableSchema().getPrimaryKey(); + this.sourceCatalogName = catalogTable.getCatalogName(); + this.fieldIde = catalogTable.getOptions().get("fieldIde"); + } + + public String build(TablePath tablePath) { + StringBuilder createTableSql = new StringBuilder(); + createTableSql + .append("CREATE TABLE ") + .append(tablePath.getSchemaAndTableName("\"")) + .append(" (\n"); + + List columnSqls = + columns.stream() + .map(column -> CatalogUtils.getFieldIde(buildColumnSql(column), fieldIde)) + .collect(Collectors.toList()); + + // Add primary key directly in the create table statement + if (primaryKey != null + && primaryKey.getColumnNames() != null + && primaryKey.getColumnNames().size() > 0) { + columnSqls.add(buildPrimaryKeySql(primaryKey)); + } + + createTableSql.append(String.join(",\n", columnSqls)); + createTableSql.append("\n)"); + + List commentSqls = + columns.stream() + .filter(column -> StringUtils.isNotBlank(column.getComment())) + .map( + column -> + buildColumnCommentSql( + column, tablePath.getSchemaAndTableName("\""))) + .collect(Collectors.toList()); + + if (!commentSqls.isEmpty()) { + createTableSql.append(";\n"); + createTableSql.append(String.join(";\n", commentSqls)); + } + + return createTableSql.toString(); + } + + private String buildColumnSql(Column column) { + StringBuilder columnSql = new StringBuilder(); + columnSql.append("\"").append(column.getName()).append("\" "); + + String columnType = + StringUtils.equalsIgnoreCase(DatabaseIdentifier.XUGU, sourceCatalogName) + ? column.getSourceType() + : XuguTypeConverter.INSTANCE.reconvert(column).getColumnType(); + columnSql.append(columnType); + + if (!column.isNullable()) { + columnSql.append(" NOT NULL"); + } + + return columnSql.toString(); + } + + private String buildPrimaryKeySql(PrimaryKey primaryKey) { + String randomSuffix = UUID.randomUUID().toString().replace("-", "").substring(0, 4); + String columnNamesString = + primaryKey.getColumnNames().stream() + .map(columnName -> "\"" + columnName + "\"") + .collect(Collectors.joining(", ")); + + // In xugu database, the maximum length for an identifier is 30 characters. + String primaryKeyStr = primaryKey.getPrimaryKey(); + if (primaryKeyStr.length() > 25) { + primaryKeyStr = primaryKeyStr.substring(0, 25); + } + + return CatalogUtils.getFieldIde( + "CONSTRAINT " + + primaryKeyStr + + "_" + + randomSuffix + + " PRIMARY KEY (" + + columnNamesString + + ")", + fieldIde); + } + + private String buildColumnCommentSql(Column column, String tableName) { + StringBuilder columnCommentSql = new StringBuilder(); + columnCommentSql + .append(CatalogUtils.quoteIdentifier("COMMENT ON COLUMN ", fieldIde)) + .append(tableName) + .append("."); + columnCommentSql + .append(CatalogUtils.quoteIdentifier(column.getName(), fieldIde, "\"")) + .append(CatalogUtils.quoteIdentifier(" IS '", fieldIde)) + .append(column.getComment()) + .append("'"); + return columnCommentSql.toString(); + } +} diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/DatabaseIdentifier.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/DatabaseIdentifier.java index 3b1738afb27..2f6aabc502c 100644 --- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/DatabaseIdentifier.java +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/DatabaseIdentifier.java @@ -38,4 +38,5 @@ public class DatabaseIdentifier { public static final String VERTICA = "Vertica"; public static final String OCENABASE = "OceanBase"; public static final String TIDB = "TiDB"; + public static final String XUGU = "XUGU"; } diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguDialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguDialect.java new file mode 100644 index 00000000000..1ef617b3936 --- /dev/null +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguDialect.java @@ -0,0 +1,231 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.xugu; + +import org.apache.seatunnel.api.table.catalog.TablePath; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.converter.JdbcRowConverter; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.DatabaseIdentifier; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialect; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialectTypeMapper; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.dialectenum.FieldIdeEnum; +import org.apache.seatunnel.connectors.seatunnel.jdbc.source.JdbcSourceTable; + +import org.apache.commons.lang3.StringUtils; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +public class XuguDialect implements JdbcDialect { + + private static final int DEFAULT_XUGU_FETCH_SIZE = 500; + public String fieldIde = FieldIdeEnum.ORIGINAL.getValue(); + + public XuguDialect(String fieldIde) { + this.fieldIde = fieldIde; + } + + public XuguDialect() {} + + @Override + public String dialectName() { + return DatabaseIdentifier.XUGU; + } + + @Override + public JdbcRowConverter getRowConverter() { + return new XuguJdbcRowConverter(); + } + + @Override + public JdbcDialectTypeMapper getJdbcDialectTypeMapper() { + return new XuguTypeMapper(); + } + + @Override + public String quoteIdentifier(String identifier) { + if (identifier.contains(".")) { + String[] parts = identifier.split("\\."); + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < parts.length - 1; i++) { + sb.append("\"").append(parts[i]).append("\"").append("."); + } + return sb.append("\"") + .append(getFieldIde(parts[parts.length - 1], fieldIde)) + .append("\"") + .toString(); + } + + return "\"" + getFieldIde(identifier, fieldIde) + "\""; + } + + @Override + public String tableIdentifier(String database, String tableName) { + return quoteIdentifier(tableName); + } + + @Override + public String extractTableName(TablePath tablePath) { + return tablePath.getSchemaAndTableName(); + } + + @Override + public TablePath parse(String tablePath) { + return TablePath.of(tablePath, true); + } + + @Override + public String tableIdentifier(TablePath tablePath) { + return tablePath.getSchemaAndTableName(); + } + + @Override + public Optional getUpsertStatement( + String database, String tableName, String[] fieldNames, String[] uniqueKeyFields) { + List nonUniqueKeyFields = + Arrays.stream(fieldNames) + .filter(fieldName -> !Arrays.asList(uniqueKeyFields).contains(fieldName)) + .collect(Collectors.toList()); + String valuesBinding = + Arrays.stream(fieldNames) + .map(fieldName -> ":" + fieldName + " " + quoteIdentifier(fieldName)) + .collect(Collectors.joining(", ")); + + String usingClause = String.format("SELECT %s FROM DUAL", valuesBinding); + String onConditions = + Arrays.stream(uniqueKeyFields) + .map( + fieldName -> + String.format( + "TARGET.%s=SOURCE.%s", + quoteIdentifier(fieldName), + quoteIdentifier(fieldName))) + .collect(Collectors.joining(" AND ")); + String updateSetClause = + nonUniqueKeyFields.stream() + .map( + fieldName -> + String.format( + "TARGET.%s=SOURCE.%s", + quoteIdentifier(fieldName), + quoteIdentifier(fieldName))) + .collect(Collectors.joining(", ")); + String insertFields = + Arrays.stream(fieldNames) + .map(this::quoteIdentifier) + .collect(Collectors.joining(", ")); + String insertValues = + Arrays.stream(fieldNames) + .map(fieldName -> "SOURCE." + quoteIdentifier(fieldName)) + .collect(Collectors.joining(", ")); + + String upsertSQL = + String.format( + " MERGE INTO %s TARGET" + + " USING (%s) SOURCE" + + " ON (%s) " + + " WHEN MATCHED THEN" + + " UPDATE SET %s" + + " WHEN NOT MATCHED THEN" + + " INSERT (%s) VALUES (%s)", + tableIdentifier(database, tableName), + usingClause, + onConditions, + updateSetClause, + insertFields, + insertValues); + + return Optional.of(upsertSQL); + } + + @Override + public PreparedStatement creatPreparedStatement( + Connection connection, String queryTemplate, int fetchSize) throws SQLException { + PreparedStatement statement = + connection.prepareStatement( + queryTemplate, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); + if (fetchSize > 0) { + statement.setFetchSize(fetchSize); + } else { + statement.setFetchSize(DEFAULT_XUGU_FETCH_SIZE); + } + return statement; + } + + @Override + public Object queryNextChunkMax( + Connection connection, + JdbcSourceTable table, + String columnName, + int chunkSize, + Object includedLowerBound) + throws SQLException { + String quotedColumn = quoteIdentifier(columnName); + String sqlQuery; + if (StringUtils.isNotBlank(table.getQuery())) { + sqlQuery = + String.format( + "SELECT MAX(%s) FROM (" + + "SELECT %s FROM (%s) WHERE %s >= ? ORDER BY %s ASC " + + ") WHERE ROWNUM <= %s", + quotedColumn, + quotedColumn, + table.getQuery(), + quotedColumn, + quotedColumn, + chunkSize); + } else { + sqlQuery = + String.format( + "SELECT MAX(%s) FROM (" + + "SELECT %s FROM %s WHERE %s >= ? ORDER BY %s ASC " + + ") WHERE ROWNUM <= %s", + quotedColumn, + quotedColumn, + table.getTablePath().getSchemaAndTableName(), + quotedColumn, + quotedColumn, + chunkSize); + } + + try (PreparedStatement ps = connection.prepareStatement(sqlQuery)) { + ps.setObject(1, includedLowerBound); + try (ResultSet rs = ps.executeQuery()) { + if (!rs.next()) { + // this should never happen + throw new SQLException( + String.format("No result returned after running query [%s]", sqlQuery)); + } + return rs.getObject(1); + } + } + } + + @Override + public ResultSetMetaData getResultSetMetaData(Connection conn, String query) + throws SQLException { + PreparedStatement ps = conn.prepareStatement(query); + return ps.executeQuery().getMetaData(); + } +} diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguDialectFactory.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguDialectFactory.java new file mode 100644 index 00000000000..0e489b728b5 --- /dev/null +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguDialectFactory.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.xugu; + +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialect; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialectFactory; + +import com.google.auto.service.AutoService; + +import javax.annotation.Nonnull; + +/** Factory for {@link XuguDialect}. */ +@AutoService(JdbcDialectFactory.class) +public class XuguDialectFactory implements JdbcDialectFactory { + + @Override + public boolean acceptsURL(String url) { + return url.startsWith("jdbc:xugu:"); + } + + @Override + public JdbcDialect create() { + return new XuguDialect(); + } + + @Override + public JdbcDialect create(@Nonnull String compatibleMode, String fieldIde) { + return new XuguDialect(fieldIde); + } +} diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguJdbcRowConverter.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguJdbcRowConverter.java new file mode 100644 index 00000000000..4590761965c --- /dev/null +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguJdbcRowConverter.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.xugu; + +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.converter.AbstractJdbcRowConverter; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.DatabaseIdentifier; + +public class XuguJdbcRowConverter extends AbstractJdbcRowConverter { + + @Override + public String converterName() { + return DatabaseIdentifier.XUGU; + } +} diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguTypeConverter.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguTypeConverter.java new file mode 100644 index 00000000000..54a8805f3bc --- /dev/null +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguTypeConverter.java @@ -0,0 +1,385 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.xugu; + +import org.apache.seatunnel.api.table.catalog.Column; +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.converter.BasicTypeDefine; +import org.apache.seatunnel.api.table.converter.TypeConverter; +import org.apache.seatunnel.api.table.type.BasicType; +import org.apache.seatunnel.api.table.type.DecimalType; +import org.apache.seatunnel.api.table.type.LocalTimeType; +import org.apache.seatunnel.api.table.type.PrimitiveByteArrayType; +import org.apache.seatunnel.common.exception.CommonError; +import org.apache.seatunnel.connectors.seatunnel.common.source.TypeDefineUtils; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.DatabaseIdentifier; + +import com.google.auto.service.AutoService; +import lombok.extern.slf4j.Slf4j; + +// reference +// https://docs.xugudb.com/%E8%99%9A%E8%B0%B7%E6%95%B0%E6%8D%AE%E5%BA%93%E5%AF%B9%E5%A4%96%E5%8F%91%E5%B8%83/06%E5%8F%82%E8%80%83%E6%8C%87%E5%8D%97/SQL%E8%AF%AD%E6%B3%95%E5%8F%82%E8%80%83/%E6%95%B0%E6%8D%AE%E7%B1%BB%E5%9E%8B/%E6%A6%82%E8%BF%B0/ +@Slf4j +@AutoService(TypeConverter.class) +public class XuguTypeConverter implements TypeConverter { + // ============================data types===================== + // -------------------------number---------------------------- + public static final String XUGU_NUMERIC = "NUMERIC"; + public static final String XUGU_NUMBER = "NUMBER"; + public static final String XUGU_DECIMAL = "DECIMAL"; + public static final String XUGU_INTEGER = "INTEGER"; + public static final String XUGU_INT = "INT"; + public static final String XUGU_BIGINT = "BIGINT"; + public static final String XUGU_TINYINT = "TINYINT"; + public static final String XUGU_SMALLINT = "SMALLINT"; + public static final String XUGU_FLOAT = "FLOAT"; + public static final String XUGU_DOUBLE = "DOUBLE"; + + // ----------------------------string------------------------- + public static final String XUGU_CHAR = "CHAR"; + public static final String XUGU_NCHAR = "NCHAR"; + public static final String XUGU_VARCHAR = "VARCHAR"; + public static final String XUGU_VARCHAR2 = "VARCHAR2"; + public static final String XUGU_CLOB = "CLOB"; + + // ------------------------------time------------------------- + public static final String XUGU_DATE = "DATE"; + public static final String XUGU_TIME = "TIME"; + public static final String XUGU_TIMESTAMP = "TIMESTAMP"; + public static final String XUGU_DATETIME = "DATETIME"; + public static final String XUGU_DATETIME_WITH_TIME_ZONE = "DATETIME WITH TIME ZONE"; + public static final String XUGU_TIME_WITH_TIME_ZONE = "TIME WITH TIME ZONE"; + public static final String XUGU_TIMESTAMP_WITH_TIME_ZONE = "TIMESTAMP WITH TIME ZONE"; + + // ---------------------------binary--------------------------- + public static final String XUGU_BINARY = "BINARY"; + public static final String XUGU_BLOB = "BLOB"; + + // ---------------------------other--------------------------- + public static final String XUGU_GUID = "GUID"; + public static final String XUGU_BOOLEAN = "BOOLEAN"; + public static final String XUGU_BOOL = "BOOL"; + public static final String XUGU_JSON = "JSON"; + + public static final int MAX_PRECISION = 38; + public static final int DEFAULT_PRECISION = MAX_PRECISION; + public static final int MAX_SCALE = 38; + public static final int DEFAULT_SCALE = 18; + public static final int TIMESTAMP_DEFAULT_SCALE = 3; + public static final int MAX_TIMESTAMP_SCALE = 6; + public static final int MAX_TIME_SCALE = 3; + public static final long MAX_VARCHAR_LENGTH = 60000; + public static final long POWER_2_16 = (long) Math.pow(2, 16); + public static final long BYTES_2GB = (long) Math.pow(2, 31); + public static final long MAX_BINARY_LENGTH = POWER_2_16 - 4; + public static final XuguTypeConverter INSTANCE = new XuguTypeConverter(); + + @Override + public String identifier() { + return DatabaseIdentifier.XUGU; + } + + @Override + public Column convert(BasicTypeDefine typeDefine) { + PhysicalColumn.PhysicalColumnBuilder builder = + PhysicalColumn.builder() + .name(typeDefine.getName()) + .sourceType(typeDefine.getColumnType()) + .nullable(typeDefine.isNullable()) + .defaultValue(typeDefine.getDefaultValue()) + .comment(typeDefine.getComment()); + + String xuguDataType = typeDefine.getDataType().toUpperCase(); + switch (xuguDataType) { + case XUGU_BOOLEAN: + case XUGU_BOOL: + builder.dataType(BasicType.BOOLEAN_TYPE); + break; + case XUGU_TINYINT: + builder.dataType(BasicType.BYTE_TYPE); + break; + case XUGU_SMALLINT: + builder.dataType(BasicType.SHORT_TYPE); + break; + case XUGU_INT: + case XUGU_INTEGER: + builder.dataType(BasicType.INT_TYPE); + break; + case XUGU_BIGINT: + builder.dataType(BasicType.LONG_TYPE); + break; + case XUGU_FLOAT: + builder.dataType(BasicType.FLOAT_TYPE); + break; + case XUGU_DOUBLE: + builder.dataType(BasicType.DOUBLE_TYPE); + break; + case XUGU_NUMBER: + case XUGU_DECIMAL: + case XUGU_NUMERIC: + DecimalType decimalType; + if (typeDefine.getPrecision() != null && typeDefine.getPrecision() > 0) { + decimalType = + new DecimalType( + typeDefine.getPrecision().intValue(), typeDefine.getScale()); + } else { + decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE); + } + builder.dataType(decimalType); + builder.columnLength(Long.valueOf(decimalType.getPrecision())); + builder.scale(decimalType.getScale()); + break; + + case XUGU_CHAR: + case XUGU_NCHAR: + builder.dataType(BasicType.STRING_TYPE); + if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { + builder.columnLength(TypeDefineUtils.charTo4ByteLength(1L)); + } else { + builder.columnLength(typeDefine.getLength()); + } + break; + case XUGU_VARCHAR: + case XUGU_VARCHAR2: + builder.dataType(BasicType.STRING_TYPE); + if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { + builder.columnLength(TypeDefineUtils.charTo4ByteLength(MAX_VARCHAR_LENGTH)); + } else { + builder.columnLength(typeDefine.getLength()); + } + break; + case XUGU_CLOB: + builder.dataType(BasicType.STRING_TYPE); + builder.columnLength(BYTES_2GB - 1); + break; + case XUGU_JSON: + case XUGU_GUID: + builder.dataType(BasicType.STRING_TYPE); + break; + case XUGU_BINARY: + builder.dataType(PrimitiveByteArrayType.INSTANCE); + builder.columnLength(MAX_BINARY_LENGTH); + break; + case XUGU_BLOB: + builder.dataType(PrimitiveByteArrayType.INSTANCE); + builder.columnLength(BYTES_2GB - 1); + break; + case XUGU_DATE: + builder.dataType(LocalTimeType.LOCAL_DATE_TYPE); + break; + case XUGU_TIME: + case XUGU_TIME_WITH_TIME_ZONE: + builder.dataType(LocalTimeType.LOCAL_TIME_TYPE); + break; + case XUGU_DATETIME: + case XUGU_DATETIME_WITH_TIME_ZONE: + builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); + break; + case XUGU_TIMESTAMP: + case XUGU_TIMESTAMP_WITH_TIME_ZONE: + builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); + if (typeDefine.getScale() == null) { + builder.scale(TIMESTAMP_DEFAULT_SCALE); + } else { + builder.scale(typeDefine.getScale()); + } + break; + default: + throw CommonError.convertToSeaTunnelTypeError( + DatabaseIdentifier.XUGU, xuguDataType, typeDefine.getName()); + } + return builder.build(); + } + + @Override + public BasicTypeDefine reconvert(Column column) { + BasicTypeDefine.BasicTypeDefineBuilder builder = + BasicTypeDefine.builder() + .name(column.getName()) + .nullable(column.isNullable()) + .comment(column.getComment()) + .defaultValue(column.getDefaultValue()); + switch (column.getDataType().getSqlType()) { + case BOOLEAN: + builder.columnType(XUGU_BOOLEAN); + builder.dataType(XUGU_BOOLEAN); + break; + case TINYINT: + builder.columnType(XUGU_TINYINT); + builder.dataType(XUGU_TINYINT); + break; + case SMALLINT: + builder.columnType(XUGU_SMALLINT); + builder.dataType(XUGU_SMALLINT); + break; + case INT: + builder.columnType(XUGU_INTEGER); + builder.dataType(XUGU_INTEGER); + break; + case BIGINT: + builder.columnType(XUGU_BIGINT); + builder.dataType(XUGU_BIGINT); + break; + case FLOAT: + builder.columnType(XUGU_FLOAT); + builder.dataType(XUGU_FLOAT); + break; + case DOUBLE: + builder.columnType(XUGU_DOUBLE); + builder.dataType(XUGU_DOUBLE); + break; + case DECIMAL: + DecimalType decimalType = (DecimalType) column.getDataType(); + long precision = decimalType.getPrecision(); + int scale = decimalType.getScale(); + if (precision <= 0) { + precision = DEFAULT_PRECISION; + scale = DEFAULT_SCALE; + log.warn( + "The decimal column {} type decimal({},{}) is out of range, " + + "which is precision less than 0, " + + "it will be converted to decimal({},{})", + column.getName(), + decimalType.getPrecision(), + decimalType.getScale(), + precision, + scale); + } else if (precision > MAX_PRECISION) { + scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); + precision = MAX_PRECISION; + log.warn( + "The decimal column {} type decimal({},{}) is out of range, " + + "which exceeds the maximum precision of {}, " + + "it will be converted to decimal({},{})", + column.getName(), + decimalType.getPrecision(), + decimalType.getScale(), + MAX_PRECISION, + precision, + scale); + } + if (scale < 0) { + scale = 0; + log.warn( + "The decimal column {} type decimal({},{}) is out of range, " + + "which is scale less than 0, " + + "it will be converted to decimal({},{})", + column.getName(), + decimalType.getPrecision(), + decimalType.getScale(), + precision, + scale); + } else if (scale > MAX_SCALE) { + scale = MAX_SCALE; + log.warn( + "The decimal column {} type decimal({},{}) is out of range, " + + "which exceeds the maximum scale of {}, " + + "it will be converted to decimal({},{})", + column.getName(), + decimalType.getPrecision(), + decimalType.getScale(), + MAX_SCALE, + precision, + scale); + } + builder.columnType(String.format("%s(%s,%s)", XUGU_NUMERIC, precision, scale)); + builder.dataType(XUGU_NUMERIC); + builder.precision(precision); + builder.scale(scale); + break; + case BYTES: + if (column.getColumnLength() == null || column.getColumnLength() <= 0) { + builder.columnType(XUGU_BLOB); + builder.dataType(XUGU_BLOB); + } else if (column.getColumnLength() <= MAX_BINARY_LENGTH) { + builder.columnType(XUGU_BINARY); + builder.dataType(XUGU_BINARY); + } else { + builder.columnType(XUGU_BLOB); + builder.dataType(XUGU_BLOB); + } + break; + case STRING: + if (column.getColumnLength() == null || column.getColumnLength() <= 0) { + builder.columnType(String.format("%s(%s)", XUGU_VARCHAR, MAX_VARCHAR_LENGTH)); + builder.dataType(XUGU_VARCHAR); + } else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) { + builder.columnType( + String.format("%s(%s)", XUGU_VARCHAR, column.getColumnLength())); + builder.dataType(XUGU_VARCHAR); + } else { + builder.columnType(XUGU_CLOB); + builder.dataType(XUGU_CLOB); + } + break; + case DATE: + builder.columnType(XUGU_DATE); + builder.dataType(XUGU_DATE); + break; + case TIME: + builder.dataType(XUGU_TIME); + if (column.getScale() != null && column.getScale() > 0) { + Integer timeScale = column.getScale(); + if (timeScale > MAX_TIME_SCALE) { + timeScale = MAX_TIME_SCALE; + log.warn( + "The time column {} type time({}) is out of range, " + + "which exceeds the maximum scale of {}, " + + "it will be converted to time({})", + column.getName(), + column.getScale(), + MAX_SCALE, + timeScale); + } + builder.columnType(String.format("%s(%s)", XUGU_TIME, timeScale)); + builder.scale(timeScale); + } else { + builder.columnType(XUGU_TIME); + } + break; + case TIMESTAMP: + if (column.getScale() == null || column.getScale() <= 0) { + builder.columnType(XUGU_TIMESTAMP); + } else { + int timestampScale = column.getScale(); + if (column.getScale() > MAX_TIMESTAMP_SCALE) { + timestampScale = MAX_TIMESTAMP_SCALE; + log.warn( + "The timestamp column {} type timestamp({}) is out of range, " + + "which exceeds the maximum scale of {}, " + + "it will be converted to timestamp({})", + column.getName(), + column.getScale(), + MAX_TIMESTAMP_SCALE, + timestampScale); + } + builder.columnType(String.format("TIMESTAMP(%s)", timestampScale)); + builder.scale(timestampScale); + } + builder.dataType(XUGU_TIMESTAMP); + break; + default: + throw CommonError.convertToConnectorTypeError( + DatabaseIdentifier.XUGU, + column.getDataType().getSqlType().name(), + column.getName()); + } + + return builder.build(); + } +} diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguTypeMapper.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguTypeMapper.java new file mode 100644 index 00000000000..e517f56af0c --- /dev/null +++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguTypeMapper.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.xugu; + +import org.apache.seatunnel.api.table.catalog.Column; +import org.apache.seatunnel.api.table.converter.BasicTypeDefine; +import org.apache.seatunnel.connectors.seatunnel.common.source.TypeDefineUtils; +import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialectTypeMapper; + +import lombok.extern.slf4j.Slf4j; + +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.util.Arrays; + +@Slf4j +public class XuguTypeMapper implements JdbcDialectTypeMapper { + + @Override + public Column mappingColumn(BasicTypeDefine typeDefine) { + return XuguTypeConverter.INSTANCE.convert(typeDefine); + } + + @Override + public Column mappingColumn(ResultSetMetaData metadata, int colIndex) throws SQLException { + String columnName = metadata.getColumnLabel(colIndex); + String nativeType = metadata.getColumnTypeName(colIndex); + int isNullable = metadata.isNullable(colIndex); + long precision = metadata.getPrecision(colIndex); + int scale = metadata.getScale(colIndex); + if (Arrays.asList("CHAR", "NCHAR", "VARCHAR", "VARCHAR2").contains(nativeType)) { + long octetByteLength = TypeDefineUtils.charTo4ByteLength(precision); + precision = octetByteLength; + } + + BasicTypeDefine typeDefine = + BasicTypeDefine.builder() + .name(columnName) + .columnType(nativeType) + .dataType(nativeType) + .nullable(isNullable == ResultSetMetaData.columnNullable) + .length(precision) + .precision(precision) + .scale(scale) + .build(); + return mappingColumn(typeDefine); + } +} diff --git a/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguTypeConverterTest.java b/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguTypeConverterTest.java new file mode 100644 index 00000000000..9dfd7079dfd --- /dev/null +++ b/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/xugu/XuguTypeConverterTest.java @@ -0,0 +1,660 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.xugu; + +import org.apache.seatunnel.api.table.catalog.Column; +import org.apache.seatunnel.api.table.catalog.PhysicalColumn; +import org.apache.seatunnel.api.table.converter.BasicTypeDefine; +import org.apache.seatunnel.api.table.type.BasicType; +import org.apache.seatunnel.api.table.type.DecimalType; +import org.apache.seatunnel.api.table.type.LocalTimeType; +import org.apache.seatunnel.api.table.type.MapType; +import org.apache.seatunnel.api.table.type.PrimitiveByteArrayType; +import org.apache.seatunnel.common.exception.SeaTunnelRuntimeException; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import static org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.xugu.XuguTypeConverter.BYTES_2GB; +import static org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.xugu.XuguTypeConverter.MAX_BINARY_LENGTH; + +public class XuguTypeConverterTest { + @Test + public void testConvertUnsupported() { + BasicTypeDefine typeDefine = + BasicTypeDefine.builder().name("test").columnType("aaa").dataType("aaa").build(); + try { + XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.fail(); + } catch (SeaTunnelRuntimeException e) { + // ignore + } catch (Throwable e) { + Assertions.fail(); + } + } + + @Test + public void testReconvertUnsupported() { + Column column = + PhysicalColumn.of( + "test", + new MapType<>(BasicType.STRING_TYPE, BasicType.STRING_TYPE), + (Long) null, + true, + null, + null); + try { + XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.fail(); + } catch (SeaTunnelRuntimeException e) { + // ignore + } catch (Throwable e) { + Assertions.fail(); + } + } + + @Test + public void testConvertBoolean() { + BasicTypeDefine typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("bool") + .dataType("boolean") + .nullable(true) + .defaultValue("1") + .comment("test") + .build(); + Column column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(BasicType.BOOLEAN_TYPE, column.getDataType()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + Assertions.assertEquals(typeDefine.isNullable(), column.isNullable()); + Assertions.assertEquals(typeDefine.getDefaultValue(), column.getDefaultValue()); + Assertions.assertEquals(typeDefine.getComment(), column.getComment()); + } + + @Test + public void testConvertTinyint() { + BasicTypeDefine typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("tinyint") + .dataType("tinyint") + .build(); + Column column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(BasicType.BYTE_TYPE, column.getDataType()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + } + + @Test + public void testConvertSmallint() { + BasicTypeDefine typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("smallint") + .dataType("smallint") + .build(); + Column column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(BasicType.SHORT_TYPE, column.getDataType()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + } + + @Test + public void testConvertInt() { + BasicTypeDefine typeDefine = + BasicTypeDefine.builder().name("test").columnType("int").dataType("int").build(); + Column column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(BasicType.INT_TYPE, column.getDataType()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + } + + @Test + public void testConvertBigint() { + BasicTypeDefine typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("bigint") + .dataType("bigint") + .build(); + Column column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(BasicType.LONG_TYPE, column.getDataType()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + } + + @Test + public void testConvertFloat() { + BasicTypeDefine typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("float") + .dataType("float") + .build(); + Column column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(BasicType.FLOAT_TYPE, column.getDataType()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + } + + @Test + public void testConvertDouble() { + BasicTypeDefine typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("double") + .dataType("double") + .build(); + Column column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(BasicType.DOUBLE_TYPE, column.getDataType()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + } + + @Test + public void testConvertDecimal() { + BasicTypeDefine typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("numeric(38,2)") + .dataType("numeric") + .precision(38L) + .scale(2) + .build(); + Column column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(new DecimalType(38, 2), column.getDataType()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + + typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("numeric") + .dataType("numeric") + .build(); + column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(new DecimalType(38, 18), column.getDataType()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + } + + @Test + public void testConvertChar() { + BasicTypeDefine typeDefine = + BasicTypeDefine.builder().name("test").columnType("char").dataType("char").build(); + Column column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType()); + Assertions.assertEquals(4, column.getColumnLength()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + + typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("char(10)") + .dataType("char") + .length(10L) + .build(); + column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType()); + Assertions.assertEquals(10, column.getColumnLength()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + } + + @Test + public void testConvertVarchar() { + BasicTypeDefine typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("varchar") + .dataType("varchar") + .build(); + Column column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType()); + Assertions.assertEquals(240000, column.getColumnLength()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + + typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("varchar(10)") + .dataType("varchar") + .length(10L) + .build(); + column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType()); + Assertions.assertEquals(10, column.getColumnLength()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + + typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("varchar2(20)") + .dataType("varchar2") + .length(20L) + .build(); + column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType()); + Assertions.assertEquals(20, column.getColumnLength()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + } + + @Test + public void testConvertOtherString() { + BasicTypeDefine typeDefine = + BasicTypeDefine.builder().name("test").columnType("clob").dataType("clob").build(); + Column column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType()); + Assertions.assertEquals(BYTES_2GB - 1, column.getColumnLength()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + + typeDefine = + BasicTypeDefine.builder().name("test").columnType("json").dataType("json").build(); + column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType()); + Assertions.assertEquals(null, column.getColumnLength()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + } + + @Test + public void testConvertBinary() { + BasicTypeDefine typeDefine = + BasicTypeDefine.builder().name("test").columnType("blob").dataType("blob").build(); + Column column = XuguTypeConverter.INSTANCE.convert(typeDefine); + + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(PrimitiveByteArrayType.INSTANCE, column.getDataType()); + Assertions.assertEquals(BYTES_2GB - 1, column.getColumnLength()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + } + + @Test + public void testConvertDate() { + BasicTypeDefine typeDefine = + BasicTypeDefine.builder().name("test").columnType("date").dataType("date").build(); + Column column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(LocalTimeType.LOCAL_DATE_TYPE, column.getDataType()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + } + + @Test + public void testConvertTime() { + BasicTypeDefine typeDefine = + BasicTypeDefine.builder().name("test").columnType("time").dataType("time").build(); + Column column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(LocalTimeType.LOCAL_TIME_TYPE, column.getDataType()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + + typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("time with time zone") + .dataType("time with time zone") + .build(); + column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(LocalTimeType.LOCAL_TIME_TYPE, column.getDataType()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + } + + @Test + public void testConvertTimestamp() { + BasicTypeDefine typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("datetime") + .dataType("datetime") + .build(); + Column column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(LocalTimeType.LOCAL_DATE_TIME_TYPE, column.getDataType()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + + typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("datetime with time zone") + .dataType("datetime with time zone") + .build(); + column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(LocalTimeType.LOCAL_DATE_TIME_TYPE, column.getDataType()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + + typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("timestamp") + .dataType("timestamp") + .build(); + column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(LocalTimeType.LOCAL_DATE_TIME_TYPE, column.getDataType()); + Assertions.assertEquals(3, column.getScale()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + + typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("timestamp(6)") + .dataType("timestamp") + .scale(6) + .build(); + column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(LocalTimeType.LOCAL_DATE_TIME_TYPE, column.getDataType()); + Assertions.assertEquals(typeDefine.getScale(), column.getScale()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + + typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("timestamp with time zone") + .dataType("timestamp with time zone") + .build(); + column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(LocalTimeType.LOCAL_DATE_TIME_TYPE, column.getDataType()); + Assertions.assertEquals(3, column.getScale()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + + typeDefine = + BasicTypeDefine.builder() + .name("test") + .columnType("timestamp(3) with time zone") + .dataType("timestamp with time zone") + .scale(3) + .build(); + column = XuguTypeConverter.INSTANCE.convert(typeDefine); + Assertions.assertEquals(typeDefine.getName(), column.getName()); + Assertions.assertEquals(LocalTimeType.LOCAL_DATE_TIME_TYPE, column.getDataType()); + Assertions.assertEquals(typeDefine.getScale(), column.getScale()); + Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); + } + + @Test + public void testReconvertBoolean() { + Column column = + PhysicalColumn.builder() + .name("test") + .dataType(BasicType.BOOLEAN_TYPE) + .nullable(true) + .defaultValue(true) + .comment("test") + .build(); + + BasicTypeDefine typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals(XuguTypeConverter.XUGU_BOOLEAN, typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_BOOLEAN, typeDefine.getDataType()); + Assertions.assertEquals(column.isNullable(), typeDefine.isNullable()); + Assertions.assertEquals(column.getDefaultValue(), typeDefine.getDefaultValue()); + Assertions.assertEquals(column.getComment(), typeDefine.getComment()); + } + + @Test + public void testReconvertByte() { + Column column = PhysicalColumn.builder().name("test").dataType(BasicType.BYTE_TYPE).build(); + + BasicTypeDefine typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals(XuguTypeConverter.XUGU_TINYINT, typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_TINYINT, typeDefine.getDataType()); + } + + @Test + public void testReconvertShort() { + Column column = + PhysicalColumn.builder().name("test").dataType(BasicType.SHORT_TYPE).build(); + + BasicTypeDefine typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals(XuguTypeConverter.XUGU_SMALLINT, typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_SMALLINT, typeDefine.getDataType()); + } + + @Test + public void testReconvertInt() { + Column column = PhysicalColumn.builder().name("test").dataType(BasicType.INT_TYPE).build(); + + BasicTypeDefine typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals(XuguTypeConverter.XUGU_INTEGER, typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_INTEGER, typeDefine.getDataType()); + } + + @Test + public void testReconvertLong() { + Column column = PhysicalColumn.builder().name("test").dataType(BasicType.LONG_TYPE).build(); + + BasicTypeDefine typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals(XuguTypeConverter.XUGU_BIGINT, typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_BIGINT, typeDefine.getDataType()); + } + + @Test + public void testReconvertFloat() { + Column column = + PhysicalColumn.builder().name("test").dataType(BasicType.FLOAT_TYPE).build(); + + BasicTypeDefine typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals(XuguTypeConverter.XUGU_FLOAT, typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_FLOAT, typeDefine.getDataType()); + } + + @Test + public void testReconvertDouble() { + Column column = + PhysicalColumn.builder().name("test").dataType(BasicType.DOUBLE_TYPE).build(); + + BasicTypeDefine typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals(XuguTypeConverter.XUGU_DOUBLE, typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_DOUBLE, typeDefine.getDataType()); + } + + @Test + public void testReconvertDecimal() { + Column column = + PhysicalColumn.builder().name("test").dataType(new DecimalType(0, 0)).build(); + + BasicTypeDefine typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals( + String.format( + "%s(%s,%s)", + XuguTypeConverter.XUGU_NUMERIC, + XuguTypeConverter.DEFAULT_PRECISION, + XuguTypeConverter.DEFAULT_SCALE), + typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_NUMERIC, typeDefine.getDataType()); + + column = PhysicalColumn.builder().name("test").dataType(new DecimalType(10, 2)).build(); + + typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals( + String.format("%s(%s,%s)", XuguTypeConverter.XUGU_NUMERIC, 10, 2), + typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_NUMERIC, typeDefine.getDataType()); + } + + @Test + public void testReconvertBytes() { + Column column = + PhysicalColumn.builder() + .name("test") + .dataType(PrimitiveByteArrayType.INSTANCE) + .columnLength(null) + .build(); + + BasicTypeDefine typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals(XuguTypeConverter.XUGU_BLOB, typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_BLOB, typeDefine.getDataType()); + + column = + PhysicalColumn.builder() + .name("test") + .dataType(PrimitiveByteArrayType.INSTANCE) + .columnLength(MAX_BINARY_LENGTH) + .build(); + + typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals(XuguTypeConverter.XUGU_BINARY, typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_BINARY, typeDefine.getDataType()); + } + + @Test + public void testReconvertString() { + Column column = + PhysicalColumn.builder() + .name("test") + .dataType(BasicType.STRING_TYPE) + .columnLength(null) + .build(); + + BasicTypeDefine typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals("VARCHAR(60000)", typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_VARCHAR, typeDefine.getDataType()); + + column = + PhysicalColumn.builder() + .name("test") + .dataType(BasicType.STRING_TYPE) + .columnLength(1L) + .build(); + + typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals( + String.format("%s(%s)", XuguTypeConverter.XUGU_VARCHAR, column.getColumnLength()), + typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_VARCHAR, typeDefine.getDataType()); + + column = + PhysicalColumn.builder() + .name("test") + .dataType(BasicType.STRING_TYPE) + .columnLength(60000L) + .build(); + + typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals( + String.format("%s(%s)", XuguTypeConverter.XUGU_VARCHAR, column.getColumnLength()), + typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_VARCHAR, typeDefine.getDataType()); + + column = + PhysicalColumn.builder() + .name("test") + .dataType(BasicType.STRING_TYPE) + .columnLength(60001L) + .build(); + + typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals(XuguTypeConverter.XUGU_CLOB, typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_CLOB, typeDefine.getDataType()); + } + + @Test + public void testReconvertDate() { + Column column = + PhysicalColumn.builder() + .name("test") + .dataType(LocalTimeType.LOCAL_DATE_TYPE) + .build(); + + BasicTypeDefine typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals(XuguTypeConverter.XUGU_DATE, typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_DATE, typeDefine.getDataType()); + } + + @Test + public void testReconvertTime() { + Column column = + PhysicalColumn.builder() + .name("test") + .dataType(LocalTimeType.LOCAL_TIME_TYPE) + .build(); + + BasicTypeDefine typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals(XuguTypeConverter.XUGU_TIME, typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_TIME, typeDefine.getDataType()); + } + + @Test + public void testReconvertDatetime() { + Column column = + PhysicalColumn.builder() + .name("test") + .dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE) + .build(); + + BasicTypeDefine typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals(XuguTypeConverter.XUGU_TIMESTAMP, typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_TIMESTAMP, typeDefine.getDataType()); + + column = + PhysicalColumn.builder() + .name("test") + .dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE) + .scale(3) + .build(); + + typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals( + String.format("%s(%s)", XuguTypeConverter.XUGU_TIMESTAMP, column.getScale()), + typeDefine.getColumnType()); + Assertions.assertEquals(XuguTypeConverter.XUGU_TIMESTAMP, typeDefine.getDataType()); + Assertions.assertEquals(column.getScale(), typeDefine.getScale()); + + column = + PhysicalColumn.builder() + .name("test") + .dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE) + .scale(6) + .build(); + + typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); + Assertions.assertEquals(column.getName(), typeDefine.getName()); + Assertions.assertEquals( + String.format("%s(%s)", XuguTypeConverter.XUGU_TIMESTAMP, 6), + typeDefine.getColumnType()); + } +} diff --git a/seatunnel-dist/src/main/assembly/assembly-bin-ci.xml b/seatunnel-dist/src/main/assembly/assembly-bin-ci.xml index 5c1171a82da..1da0d3014b6 100644 --- a/seatunnel-dist/src/main/assembly/assembly-bin-ci.xml +++ b/seatunnel-dist/src/main/assembly/assembly-bin-ci.xml @@ -203,6 +203,7 @@ com.teradata.jdbc:terajdbc4:jar com.amazon.redshift:redshift-jdbc42:jar net.snowflake.snowflake-jdbc:jar + com.xugudb:xugu-jdbc:jar ${artifact.file.name} /lib diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-7/pom.xml b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-7/pom.xml index 09d511594f4..af4c61d5b65 100644 --- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-7/pom.xml +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-7/pom.xml @@ -91,6 +91,11 @@ vertica-jdbc test + + com.xugudb + xugu-jdbc + test + diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-7/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/JdbcXuguIT.java b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-7/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/JdbcXuguIT.java new file mode 100644 index 00000000000..5fdae0ad939 --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-7/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/JdbcXuguIT.java @@ -0,0 +1,246 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.seatunnel.jdbc; + +import org.apache.seatunnel.api.table.type.SeaTunnelRow; +import org.apache.seatunnel.common.utils.JdbcUrlUtil; +import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.xugu.XuguCatalog; + +import org.apache.commons.lang3.tuple.Pair; + +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.output.Slf4jLogConsumer; +import org.testcontainers.utility.DockerLoggerFactory; + +import com.google.common.collect.Lists; +import lombok.extern.slf4j.Slf4j; + +import java.math.BigDecimal; +import java.sql.Date; +import java.sql.Time; +import java.sql.Timestamp; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +@Slf4j +public class JdbcXuguIT extends AbstractJdbcIT { + + private static final String XUGU_IMAGE = "xugudb/xugudb:v12"; + private static final String XUGU_CONTAINER_HOST = "e2e_xugudb"; + private static final String XUGU_SCHEMA = "SYSDBA"; + private static final String XUGU_DATABASE = "SYSTEM"; + private static final String XUGU_SOURCE = "e2e_table_source"; + private static final String XUGU_SINK = "e2e_table_sink"; + private static final String CATALOG_DATABASE = "catalog_database"; + private static final String CATALOG_TABLE = "e2e_table_catalog"; + private static final String XUGU_USERNAME = "SYSDBA"; + private static final String XUGU_PASSWORD = "SYSDBA"; + private static final int XUGU_PORT = 5138; + private static final String XUGU_URL = "jdbc:xugu://" + HOST + ":%s/%s"; + + private static final String DRIVER_CLASS = "com.xugu.cloudjdbc.Driver"; + + private static final List CONFIG_FILE = + Lists.newArrayList( + "/jdbc_xugu_source_and_upsert_sink.conf", "/jdbc_xugu_source_and_sink.conf"); + private static final String CREATE_SQL = + "create table if not exists %s" + + "(\n" + + " XUGU_NUMERIC NUMERIC(10,2),\n" + + " XUGU_NUMBER NUMBER(10,2),\n" + + " XUGU_INTEGER INTEGER,\n" + + " XUGU_INT INT,\n" + + " XUGU_BIGINT BIGINT,\n" + + " XUGU_TINYINT TINYINT,\n" + + " XUGU_SMALLINT SMALLINT,\n" + + " XUGU_FLOAT FLOAT,\n" + + " XUGU_DOUBLE DOUBLE,\n" + + " XUGU_CHAR CHAR,\n" + + " XUGU_NCHAR NCHAR,\n" + + " XUGU_VARCHAR VARCHAR,\n" + + " XUGU_VARCHAR2 VARCHAR2,\n" + + " XUGU_CLOB CLOB,\n" + + " XUGU_DATE DATE,\n" + + " XUGU_TIME TIME,\n" + + " XUGU_TIMESTAMP TIMESTAMP,\n" + + " XUGU_DATETIME DATETIME,\n" + + " XUGU_TIME_WITH_TIME_ZONE TIME WITH TIME ZONE,\n" + + " XUGU_TIMESTAMP_WITH_TIME_ZONE TIMESTAMP WITH TIME ZONE,\n" + + " XUGU_BINARY BINARY,\n" + + " XUGU_BLOB BLOB,\n" + + " XUGU_GUID GUID,\n" + + " XUGU_BOOLEAN BOOLEAN,\n" + + " CONSTRAINT \"XUGU_PK\" PRIMARY KEY(XUGU_INT)" + + ")"; + private static final String[] fieldNames = + new String[] { + "XUGU_NUMERIC", + "XUGU_NUMBER", + "XUGU_INTEGER", + "XUGU_INT", + "XUGU_BIGINT", + "XUGU_TINYINT", + "XUGU_SMALLINT", + "XUGU_FLOAT", + "XUGU_DOUBLE", + "XUGU_CHAR", + "XUGU_NCHAR", + "XUGU_VARCHAR", + "XUGU_VARCHAR2", + "XUGU_CLOB", + "XUGU_DATE", + "XUGU_TIME", + "XUGU_TIMESTAMP", + "XUGU_DATETIME", + "XUGU_TIME_WITH_TIME_ZONE", + "XUGU_TIMESTAMP_WITH_TIME_ZONE", + "XUGU_BINARY", + "XUGU_BLOB", + "XUGU_GUID", + "XUGU_BOOLEAN" + }; + + @Override + JdbcCase getJdbcCase() { + Map containerEnv = new HashMap<>(); + String jdbcUrl = String.format(XUGU_URL, XUGU_PORT, XUGU_DATABASE); + Pair> testDataSet = initTestData(); + String[] fieldNames = testDataSet.getKey(); + + String insertSql = insertTable(XUGU_SCHEMA, XUGU_SOURCE, fieldNames); + + return JdbcCase.builder() + .dockerImage(XUGU_IMAGE) + .networkAliases(XUGU_CONTAINER_HOST) + .containerEnv(containerEnv) + .driverClass(DRIVER_CLASS) + .host(HOST) + .port(XUGU_PORT) + .localPort(XUGU_PORT) + .jdbcTemplate(XUGU_URL) + .jdbcUrl(jdbcUrl) + .userName(XUGU_USERNAME) + .password(XUGU_PASSWORD) + .schema(XUGU_SCHEMA) + .database(XUGU_DATABASE) + .sourceTable(XUGU_SOURCE) + .sinkTable(XUGU_SINK) + .catalogDatabase(CATALOG_DATABASE) + .catalogSchema(XUGU_SCHEMA) + .catalogTable(CATALOG_TABLE) + .createSql(CREATE_SQL) + .configFile(CONFIG_FILE) + .insertSql(insertSql) + .testData(testDataSet) + .build(); + } + + @Override + void compareResult(String executeKey) { + defaultCompare(executeKey, fieldNames, "XUGU_INT"); + } + + @Override + String driverUrl() { + return "https://repo1.maven.org/maven2/com/xugudb/xugu-jdbc/12.2.0/xugu-jdbc-12.2.0.jar"; + } + + @Override + Pair> initTestData() { + List rows = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + SeaTunnelRow row = + new SeaTunnelRow( + new Object[] { + BigDecimal.valueOf(1.12), + BigDecimal.valueOf(i, 2), + i, + i, + Long.parseLong("1"), + i, + i, + Float.parseFloat("1.1"), + Double.parseDouble("1.1"), + String.format("f1_%s", i), + String.format("f1_%s", i), + String.format("f1_%s", i), + String.format("f1_%s", i), + String.format("f1_%s", i), + Date.valueOf(LocalDate.now()), + Time.valueOf(LocalTime.now()), + new Timestamp(System.currentTimeMillis()), + Timestamp.valueOf(LocalDateTime.now()), + Time.valueOf(LocalTime.now()), + new Timestamp(System.currentTimeMillis()), + null, + null, + null, + false + }); + rows.add(row); + } + + return Pair.of(fieldNames, rows); + } + + @Override + protected GenericContainer initContainer() { + GenericContainer container = + new GenericContainer<>(XUGU_IMAGE) + .withNetwork(NETWORK) + .withNetworkAliases(XUGU_CONTAINER_HOST) + .withLogConsumer( + new Slf4jLogConsumer(DockerLoggerFactory.getLogger(XUGU_IMAGE))); + container.setPortBindings(Lists.newArrayList(String.format("%s:%s", XUGU_PORT, XUGU_PORT))); + + return container; + } + + @Override + public String quoteIdentifier(String field) { + return "\"" + field + "\""; + } + + @Override + protected void clearTable(String database, String schema, String table) { + clearTable(schema, table); + } + + @Override + protected String buildTableInfoWithSchema(String database, String schema, String table) { + return buildTableInfoWithSchema(schema, table); + } + + @Override + protected void initCatalog() { + String jdbcUrl = jdbcCase.getJdbcUrl().replace(HOST, dbServer.getHost()); + catalog = + new XuguCatalog( + "xugu", + jdbcCase.getUserName(), + jdbcCase.getPassword(), + JdbcUrlUtil.getUrlInfo(jdbcUrl), + XUGU_SCHEMA); + catalog.open(); + } +} diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-7/src/test/resources/jdbc_xugu_source_and_sink.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-7/src/test/resources/jdbc_xugu_source_and_sink.conf new file mode 100644 index 00000000000..09154809f36 --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-7/src/test/resources/jdbc_xugu_source_and_sink.conf @@ -0,0 +1,47 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +env { + execution.parallelism = 1 + job.mode = "BATCH" +} + +source { + jdbc { + url = "jdbc:xugu://e2e_xugudb:5138/SYSTEM?batch_mode=false" + driver = "com.xugu.cloudjdbc.Driver" + connection_check_timeout_sec = 100 + user = "SYSDBA" + password = "SYSDBA" + query = "select * from e2e_table_source;" + } +} + +transform { +} + +sink { + jdbc { + url = "jdbc:xugu://e2e_xugudb:5138/SYSTEM?batch_mode=false" + driver = "com.xugu.cloudjdbc.Driver" + user = "SYSDBA" + password = "SYSDBA" + query = """INSERT INTO SYSDBA.e2e_table_sink + (XUGU_NUMERIC, XUGU_NUMBER, XUGU_INTEGER, XUGU_INT, XUGU_BIGINT, XUGU_TINYINT, XUGU_SMALLINT, XUGU_FLOAT, XUGU_DOUBLE, XUGU_CHAR, XUGU_NCHAR, XUGU_VARCHAR, XUGU_VARCHAR2, XUGU_CLOB, XUGU_DATE, XUGU_TIME, XUGU_TIMESTAMP, XUGU_DATETIME, XUGU_TIME_WITH_TIME_ZONE, XUGU_TIMESTAMP_WITH_TIME_ZONE, XUGU_BINARY, XUGU_BLOB, XUGU_GUID, XUGU_BOOLEAN) + VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);""" + } +} diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-7/src/test/resources/jdbc_xugu_source_and_upsert_sink.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-7/src/test/resources/jdbc_xugu_source_and_upsert_sink.conf new file mode 100644 index 00000000000..669118f1660 --- /dev/null +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-7/src/test/resources/jdbc_xugu_source_and_upsert_sink.conf @@ -0,0 +1,48 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +env { + execution.parallelism = 1 + job.mode = "BATCH" +} + +source { + jdbc { + url = "jdbc:xugu://e2e_xugudb:5138/SYSTEM" + driver = "com.xugu.cloudjdbc.Driver" + connection_check_timeout_sec = 100 + user = "SYSDBA" + password = "SYSDBA" + query = "select * from e2e_table_source;" + } +} + +transform { +} + +sink { + jdbc { + url = "jdbc:xugu://e2e_xugudb:5138/SYSTEM?batch_mode=false" + driver = "com.xugu.cloudjdbc.Driver" + user = "SYSDBA" + password = "SYSDBA" + generate_sink_sql = true + primary_keys = ["XUGU_INT"] + table = "SYSDBA.e2e_table_sink" + database = "SYSTEM" + } +} From de4242cafc0ee0dd7689cc9b72f142f06071ce65 Mon Sep 17 00:00:00 2001 From: hailin0 Date: Wed, 3 Apr 2024 15:58:58 +0800 Subject: [PATCH 54/59] [E2E] Fix AmazondynamodbIT unstable (#6640) --- .../e2e/connector/amazondynamodb/AmazondynamodbIT.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-amazondynamodb-e2e/src/test/java/org/apache/seatunnel/e2e/connector/amazondynamodb/AmazondynamodbIT.java b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-amazondynamodb-e2e/src/test/java/org/apache/seatunnel/e2e/connector/amazondynamodb/AmazondynamodbIT.java index 5ae72f6d956..020d510b442 100644 --- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-amazondynamodb-e2e/src/test/java/org/apache/seatunnel/e2e/connector/amazondynamodb/AmazondynamodbIT.java +++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-amazondynamodb-e2e/src/test/java/org/apache/seatunnel/e2e/connector/amazondynamodb/AmazondynamodbIT.java @@ -171,7 +171,9 @@ private void clearSinkTable() { } private void assertHasData(String tableName) { - ScanResponse scan = dynamoDbClient.scan(ScanRequest.builder().tableName(tableName).build()); + ScanResponse scan = + dynamoDbClient.scan( + ScanRequest.builder().tableName(tableName).consistentRead(true).build()); Assertions.assertTrue( !scan.items().isEmpty(), String.format("table %s is empty.", tableName)); } From 50c6c94d27ef4fb3e99c71a7ef451af9aa2e4258 Mon Sep 17 00:00:00 2001 From: xiaochen <598457447@qq.com> Date: Sun, 7 Apr 2024 16:48:51 +0800 Subject: [PATCH 55/59] [Improve][Transform] Remove Fallback during parsing Transform process (#6644) --- .../api/transform/SeaTunnelTransform.java | 20 +++-------- .../execution/TransformExecuteProcessor.java | 28 +++++++-------- .../engine/core/parse/JobConfigParser.java | 35 ------------------ .../parse/MultipleTableJobConfigParser.java | 36 +++---------------- .../engine/server/master/JobMetricsTest.java | 10 +++--- .../AbstractCatalogSupportTransform.java | 10 ------ .../common/AbstractSeaTunnelTransform.java | 6 ---- 7 files changed, 27 insertions(+), 118 deletions(-) diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/transform/SeaTunnelTransform.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/transform/SeaTunnelTransform.java index a7ccd081cee..a64e1b7c7d5 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/transform/SeaTunnelTransform.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/transform/SeaTunnelTransform.java @@ -18,7 +18,6 @@ package org.apache.seatunnel.api.transform; import org.apache.seatunnel.api.common.PluginIdentifierInterface; -import org.apache.seatunnel.api.common.SeaTunnelPluginLifeCycle; import org.apache.seatunnel.api.source.SeaTunnelJobAware; import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; @@ -26,10 +25,7 @@ import java.io.Serializable; public interface SeaTunnelTransform - extends Serializable, - PluginIdentifierInterface, - SeaTunnelPluginLifeCycle, - SeaTunnelJobAware { + extends Serializable, PluginIdentifierInterface, SeaTunnelJobAware { /** call it when Transformer initialed */ default void open() {} @@ -45,22 +41,14 @@ default void setTypeInfo(SeaTunnelDataType inputDataType) { throw new UnsupportedOperationException("setTypeInfo method is not supported"); } - /** - * Get the data type of the records produced by this transform. - * - * @deprecated Please use {@link #getProducedCatalogTable} - * @return Produced data type. - */ - @Deprecated - SeaTunnelDataType getProducedType(); - /** Get the catalog table output by this transform */ CatalogTable getProducedCatalogTable(); /** - * Transform input data to {@link this#getProducedType()} types data. + * Transform input data to {@link this#getProducedCatalogTable().getSeaTunnelRowType()} types + * data. * - * @param row the data need be transform. + * @param row the data need be transformed. * @return transformed data. */ T map(T row); diff --git a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/TransformExecuteProcessor.java b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/TransformExecuteProcessor.java index 450599ff7b1..d91bb9d3da7 100644 --- a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/TransformExecuteProcessor.java +++ b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/TransformExecuteProcessor.java @@ -37,7 +37,6 @@ import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.types.Row; -import org.apache.flink.util.Collector; import java.net.URL; import java.util.Collections; @@ -119,24 +118,25 @@ public List execute(List upstreamDataS protected DataStream flinkTransform( SeaTunnelRowType sourceType, SeaTunnelTransform transform, DataStream stream) { - TypeInformation rowTypeInfo = TypeConverterUtils.convert(transform.getProducedType()); + TypeInformation rowTypeInfo = + TypeConverterUtils.convert( + transform.getProducedCatalogTable().getSeaTunnelRowType()); FlinkRowConverter transformInputRowConverter = new FlinkRowConverter(sourceType); FlinkRowConverter transformOutputRowConverter = new FlinkRowConverter(transform.getProducedCatalogTable().getSeaTunnelRowType()); DataStream output = stream.flatMap( - new FlatMapFunction() { - @Override - public void flatMap(Row value, Collector out) throws Exception { - SeaTunnelRow seaTunnelRow = - transformInputRowConverter.reconvert(value); - SeaTunnelRow dataRow = (SeaTunnelRow) transform.map(seaTunnelRow); - if (dataRow != null) { - Row copy = transformOutputRowConverter.convert(dataRow); - out.collect(copy); - } - } - }, + (FlatMapFunction) + (value, out) -> { + SeaTunnelRow seaTunnelRow = + transformInputRowConverter.reconvert(value); + SeaTunnelRow dataRow = + (SeaTunnelRow) transform.map(seaTunnelRow); + if (dataRow != null) { + Row copy = transformOutputRowConverter.convert(dataRow); + out.collect(copy); + } + }, rowTypeInfo); return output; } diff --git a/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/JobConfigParser.java b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/JobConfigParser.java index 2ef1a28aff8..981b85049aa 100644 --- a/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/JobConfigParser.java +++ b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/JobConfigParser.java @@ -23,10 +23,8 @@ import org.apache.seatunnel.api.source.SeaTunnelSource; import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.CatalogTableUtil; -import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; -import org.apache.seatunnel.api.transform.SeaTunnelTransform; import org.apache.seatunnel.common.constants.CollectionConstants; import org.apache.seatunnel.core.starter.execution.PluginUtil; import org.apache.seatunnel.engine.common.config.JobConfig; @@ -34,7 +32,6 @@ import org.apache.seatunnel.engine.core.dag.actions.Action; import org.apache.seatunnel.engine.core.dag.actions.SinkAction; import org.apache.seatunnel.engine.core.dag.actions.SourceAction; -import org.apache.seatunnel.engine.core.dag.actions.TransformAction; import org.apache.commons.lang3.tuple.ImmutablePair; @@ -99,38 +96,6 @@ public Tuple2 parseSource( return new Tuple2<>(catalogTable, action); } - public Tuple2 parseTransform( - Config config, - JobConfig jobConfig, - String tableId, - int parallelism, - SeaTunnelRowType rowType, - Set inputActions) { - final ImmutablePair, Set> tuple = - ConnectorInstanceLoader.loadTransformInstance( - config, jobConfig.getJobContext(), commonPluginJars); - final SeaTunnelTransform transform = tuple.getLeft(); - // old logic: prepare(initialization) -> set job context -> set row type (There is a logical - // judgment that depends on before and after, not a simple set) - transform.prepare(config); - transform.setJobContext(jobConfig.getJobContext()); - transform.setTypeInfo((SeaTunnelDataType) rowType); - final String actionName = createTransformActionName(0, tuple.getLeft().getPluginName()); - final TransformAction action = - new TransformAction( - idGenerator.getNextId(), - actionName, - new ArrayList<>(inputActions), - transform, - tuple.getRight(), - new HashSet<>()); - action.setParallelism(parallelism); - CatalogTable catalogTable = - CatalogTableUtil.getCatalogTable( - tableId, (SeaTunnelRowType) transform.getProducedType()); - return new Tuple2<>(catalogTable, action); - } - public List> parseSinks( int configIndex, List>> inputVertices, diff --git a/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java index f988f293a5f..883c7c59fa0 100644 --- a/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java +++ b/seatunnel-engine/seatunnel-engine-core/src/main/java/org/apache/seatunnel/engine/core/parse/MultipleTableJobConfigParser.java @@ -34,9 +34,7 @@ import org.apache.seatunnel.api.table.factory.FactoryUtil; import org.apache.seatunnel.api.table.factory.TableSinkFactory; import org.apache.seatunnel.api.table.factory.TableSourceFactory; -import org.apache.seatunnel.api.table.factory.TableTransformFactory; import org.apache.seatunnel.api.table.type.SeaTunnelDataType; -import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.api.transform.SeaTunnelTransform; import org.apache.seatunnel.common.Constants; import org.apache.seatunnel.common.config.TypesafeConfigUtils; @@ -405,35 +403,14 @@ private void parseTransform( final String tableId = readonlyConfig.getOptional(CommonOptions.RESULT_TABLE_NAME).orElse(DEFAULT_ID); - boolean fallback = - isFallback( - classLoader, - TableTransformFactory.class, - factoryId, - (factory) -> factory.createTransform(null)); - Set inputActions = inputs.stream() .map(Tuple2::_2) .collect(Collectors.toCollection(LinkedHashSet::new)); - SeaTunnelDataType expectedType = getProducedType(inputs.get(0)._2()); checkProducedTypeEquals(inputActions); int spareParallelism = inputs.get(0)._2().getParallelism(); int parallelism = readonlyConfig.getOptional(CommonOptions.PARALLELISM).orElse(spareParallelism); - if (fallback) { - Tuple2 tuple = - fallbackParser.parseTransform( - config, - jobConfig, - tableId, - parallelism, - (SeaTunnelRowType) expectedType, - inputActions); - tableWithActionMap.put(tableId, Collections.singletonList(tuple)); - return; - } - CatalogTable catalogTable = inputs.get(0)._1(); SeaTunnelTransform transform = FactoryUtil.createAndPrepareTransform( @@ -470,15 +447,10 @@ public static SeaTunnelDataType getProducedType(Action action) { return ((SourceAction) action).getSource().getProducedType(); } } else if (action instanceof TransformAction) { - try { - return ((TransformAction) action) - .getTransform() - .getProducedCatalogTable() - .getSeaTunnelRowType(); - } catch (UnsupportedOperationException e) { - // TODO remove it when all connector use `getProducedCatalogTables` - return ((TransformAction) action).getTransform().getProducedType(); - } + return ((TransformAction) action) + .getTransform() + .getProducedCatalogTable() + .getSeaTunnelRowType(); } throw new UnsupportedOperationException(); } diff --git a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/master/JobMetricsTest.java b/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/master/JobMetricsTest.java index 3b81f2a6555..ed12a565d71 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/master/JobMetricsTest.java +++ b/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/master/JobMetricsTest.java @@ -137,7 +137,7 @@ public void testMetricsOnJobRestart() throws InterruptedException { server.getCoordinatorService().getJobStatus(jobId3))); // check metrics - await().atMost(60000, TimeUnit.MILLISECONDS) + await().atMost(600000, TimeUnit.MILLISECONDS) .untilAsserted( () -> { JobMetrics jobMetrics = coordinatorService.getJobMetrics(jobId3); @@ -161,12 +161,12 @@ public void testMetricsOnJobRestart() throws InterruptedException { server.getCoordinatorService().cancelJob(jobId3); } - private void startJob(Long jobid, String path, boolean isStartWithSavePoint) { - LogicalDag testLogicalDag = TestUtils.createTestLogicalPlan(path, jobid.toString(), jobid); + private void startJob(Long jobId, String path, boolean isStartWithSavePoint) { + LogicalDag testLogicalDag = TestUtils.createTestLogicalPlan(path, jobId.toString(), jobId); JobImmutableInformation jobImmutableInformation = new JobImmutableInformation( - jobid, + jobId, "Test", isStartWithSavePoint, nodeEngine.getSerializationService().toData(testLogicalDag), @@ -177,7 +177,7 @@ private void startJob(Long jobid, String path, boolean isStartWithSavePoint) { Data data = nodeEngine.getSerializationService().toData(jobImmutableInformation); PassiveCompletableFuture voidPassiveCompletableFuture = - server.getCoordinatorService().submitJob(jobid, data); + server.getCoordinatorService().submitJob(jobId, data); voidPassiveCompletableFuture.join(); } } diff --git a/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/common/AbstractCatalogSupportTransform.java b/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/common/AbstractCatalogSupportTransform.java index 78fe02094f6..5670bcc1296 100644 --- a/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/common/AbstractCatalogSupportTransform.java +++ b/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/common/AbstractCatalogSupportTransform.java @@ -20,8 +20,6 @@ import org.apache.seatunnel.api.table.catalog.CatalogTable; import org.apache.seatunnel.api.table.catalog.TableIdentifier; import org.apache.seatunnel.api.table.catalog.TableSchema; -import org.apache.seatunnel.api.table.type.SeaTunnelDataType; -import org.apache.seatunnel.api.table.type.SeaTunnelRow; import lombok.NonNull; @@ -61,12 +59,4 @@ private CatalogTable transformCatalogTable() { protected abstract TableSchema transformTableSchema(); protected abstract TableIdentifier transformTableIdentifier(); - - @Override - public SeaTunnelDataType getProducedType() { - if (outputRowType != null) { - return outputRowType; - } - return getProducedCatalogTable().getTableSchema().toPhysicalRowDataType(); - } } diff --git a/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/common/AbstractSeaTunnelTransform.java b/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/common/AbstractSeaTunnelTransform.java index b710034cad9..1892881c277 100644 --- a/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/common/AbstractSeaTunnelTransform.java +++ b/seatunnel-transforms-v2/src/main/java/org/apache/seatunnel/transform/common/AbstractSeaTunnelTransform.java @@ -18,7 +18,6 @@ package org.apache.seatunnel.transform.common; import org.apache.seatunnel.api.table.catalog.CatalogTable; -import org.apache.seatunnel.api.table.type.SeaTunnelDataType; import org.apache.seatunnel.api.table.type.SeaTunnelRow; import org.apache.seatunnel.api.table.type.SeaTunnelRowType; import org.apache.seatunnel.api.transform.SeaTunnelTransform; @@ -30,11 +29,6 @@ public abstract class AbstractSeaTunnelTransform implements SeaTunnelTransform getProducedType() { - return outputRowType; - } - @Override public SeaTunnelRow map(SeaTunnelRow row) { return transformRow(row); From 64ebdb753e3f9f857f2623c3f37c34129c41509f Mon Sep 17 00:00:00 2001 From: Jarvis Date: Sun, 7 Apr 2024 18:53:33 +0800 Subject: [PATCH 56/59] [Docs] update debezium json document (#6652) --- docs/en/connector-v2/formats/debezium-json.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/en/connector-v2/formats/debezium-json.md b/docs/en/connector-v2/formats/debezium-json.md index a01e6c70d65..d1deebfef91 100644 --- a/docs/en/connector-v2/formats/debezium-json.md +++ b/docs/en/connector-v2/formats/debezium-json.md @@ -68,6 +68,12 @@ The MySQL products table has 4 columns (id, name, description and weight). The above JSON message is an update change event on the products table where the weight value of the row with id = 111 is changed from 5.18 to 5.15. Assuming the messages have been synchronized to Kafka topic products_binlog, then we can use the following Seatunnel conf to consume this topic and interpret the change events by Debezium format. +**In this config, you must specify the `schema` and `debezium_record_include_schema` options ** +- `schema` should same with your table format +- if your json data contains `schema` field, `debezium_record_include_schema` should be true, and if your json data doesn't contains `schema` field, `debezium_record_include_schema` should be false +- `{"schema" : {}, "payload": { "before" : {}, "after": {} ... } }` --> `true` +- `{"before" : {}, "after": {} ... }` --> `false` + ```bash env { parallelism = 1 @@ -88,6 +94,7 @@ source { weight = "string" } } + debezium_record_include_schema = false format = debezium_json } From 0643eeb0349b4ffdd4916e66dbf4c7272cda018a Mon Sep 17 00:00:00 2001 From: Jia Fan Date: Sun, 7 Apr 2024 19:04:36 +0800 Subject: [PATCH 57/59] [Fix][Doc] Fix FTP sink config key `username` to `user` (#6627) * [Fix][Doc] Fix FTP sink config key `username` to `user` * [Fix][Doc] Fix FTP sink config key `username` to `user` --- docs/en/connector-v2/sink/FtpFile.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/connector-v2/sink/FtpFile.md b/docs/en/connector-v2/sink/FtpFile.md index 98080012f9f..8757fd7aa26 100644 --- a/docs/en/connector-v2/sink/FtpFile.md +++ b/docs/en/connector-v2/sink/FtpFile.md @@ -35,7 +35,7 @@ By default, we use 2PC commit to ensure `exactly-once` |----------------------------------|---------|----------|--------------------------------------------|-------------------------------------------------------------------------------------------------------------------| | host | string | yes | - | | | port | int | yes | - | | -| username | string | yes | - | | +| user | string | yes | - | | | password | string | yes | - | | | path | string | yes | - | | | tmp_path | string | yes | /tmp/seatunnel | The result file will write to a tmp path first and then use `mv` to submit tmp dir to target dir. Need a FTP dir. | @@ -70,7 +70,7 @@ The target ftp host is required The target ftp port is required -### username [string] +### user [string] The target ftp username is required @@ -225,7 +225,7 @@ For text file format simple config FtpFile { host = "xxx.xxx.xxx.xxx" port = 21 - username = "username" + user = "username" password = "password" path = "/data/ftp" file_format_type = "text" @@ -243,7 +243,7 @@ For text file format with `have_partition` and `custom_filename` and `sink_colum FtpFile { host = "xxx.xxx.xxx.xxx" port = 21 - username = "username" + user = "username" password = "password" path = "/data/ftp/seatunnel/job1" tmp_path = "/data/ftp/seatunnel/tmp" From f1601e3ea662135c9aae86ace6a44aef35d5e027 Mon Sep 17 00:00:00 2001 From: Jarvis Date: Sun, 7 Apr 2024 19:26:59 +0800 Subject: [PATCH 58/59] [Docs] fix kafka format typo (#6633) --- docs/en/connector-v2/sink/Kafka.md | 26 ++++++++++----------- docs/en/connector-v2/source/kafka.md | 34 ++++++++++++++-------------- 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/docs/en/connector-v2/sink/Kafka.md b/docs/en/connector-v2/sink/Kafka.md index c28dd6a08ec..2919eab9883 100644 --- a/docs/en/connector-v2/sink/Kafka.md +++ b/docs/en/connector-v2/sink/Kafka.md @@ -30,19 +30,19 @@ They can be downloaded via install-plugin.sh or from the Maven central repositor ## Sink Options -| Name | Type | Required | Default | Description | -|----------------------|--------|----------|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| topic | String | Yes | - | When the table is used as sink, the topic name is the topic to write data to. | -| bootstrap.servers | String | Yes | - | Comma separated list of Kafka brokers. | -| kafka.config | Map | No | - | In addition to the above parameters that must be specified by the `Kafka producer` client, the user can also specify multiple non-mandatory parameters for the `producer` client, covering [all the producer parameters specified in the official Kafka document](https://kafka.apache.org/documentation.html#producerconfigs). | -| semantics | String | No | NON | Semantics that can be chosen EXACTLY_ONCE/AT_LEAST_ONCE/NON, default NON. | -| partition_key_fields | Array | No | - | Configure which fields are used as the key of the kafka message. | -| partition | Int | No | - | We can specify the partition, all messages will be sent to this partition. | -| assign_partitions | Array | No | - | We can decide which partition to send based on the content of the message. The function of this parameter is to distribute information. | -| transaction_prefix | String | No | - | If semantic is specified as EXACTLY_ONCE, the producer will write all messages in a Kafka transaction,kafka distinguishes different transactions by different transactionId. This parameter is prefix of kafka transactionId, make sure different job use different prefix. | -| format | String | No | json | Data format. The default format is json. Optional text format, canal-json, debezium-json and avro.If you use json or text format. The default field separator is ", ". If you customize the delimiter, add the "field_delimiter" option.If you use canal format, please refer to [canal-json](../formats/canal-json.md) for details.If you use debezium format, please refer to [debezium-json](../formats/debezium-json.md) for details. | -| field_delimiter | String | No | , | Customize the field delimiter for data format. | -| common-options | | No | - | Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details | +| Name | Type | Required | Default | Description | +|----------------------|--------|----------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| topic | String | Yes | - | When the table is used as sink, the topic name is the topic to write data to. | +| bootstrap.servers | String | Yes | - | Comma separated list of Kafka brokers. | +| kafka.config | Map | No | - | In addition to the above parameters that must be specified by the `Kafka producer` client, the user can also specify multiple non-mandatory parameters for the `producer` client, covering [all the producer parameters specified in the official Kafka document](https://kafka.apache.org/documentation.html#producerconfigs). | +| semantics | String | No | NON | Semantics that can be chosen EXACTLY_ONCE/AT_LEAST_ONCE/NON, default NON. | +| partition_key_fields | Array | No | - | Configure which fields are used as the key of the kafka message. | +| partition | Int | No | - | We can specify the partition, all messages will be sent to this partition. | +| assign_partitions | Array | No | - | We can decide which partition to send based on the content of the message. The function of this parameter is to distribute information. | +| transaction_prefix | String | No | - | If semantic is specified as EXACTLY_ONCE, the producer will write all messages in a Kafka transaction,kafka distinguishes different transactions by different transactionId. This parameter is prefix of kafka transactionId, make sure different job use different prefix. | +| format | String | No | json | Data format. The default format is json. Optional text format, canal_json, debezium_json, ogg_json and avro.If you use json or text format. The default field separator is ", ". If you customize the delimiter, add the "field_delimiter" option.If you use canal format, please refer to [canal-json](../formats/canal-json.md) for details.If you use debezium format, please refer to [debezium-json](../formats/debezium-json.md) for details. | +| field_delimiter | String | No | , | Customize the field delimiter for data format. | +| common-options | | No | - | Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details | ## Parameter Interpretation diff --git a/docs/en/connector-v2/source/kafka.md b/docs/en/connector-v2/source/kafka.md index ebee2bb3d54..982c62e5fb0 100644 --- a/docs/en/connector-v2/source/kafka.md +++ b/docs/en/connector-v2/source/kafka.md @@ -32,23 +32,23 @@ They can be downloaded via install-plugin.sh or from the Maven central repositor ## Source Options -| Name | Type | Required | Default | Description | -|-------------------------------------|-----------------------------------------------------------------------------|----------|--------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| topic | String | Yes | - | Topic name(s) to read data from when the table is used as source. It also supports topic list for source by separating topic by comma like 'topic-1,topic-2'. | -| bootstrap.servers | String | Yes | - | Comma separated list of Kafka brokers. | -| pattern | Boolean | No | false | If `pattern` is set to `true`,the regular expression for a pattern of topic names to read from. All topics in clients with names that match the specified regular expression will be subscribed by the consumer. | -| consumer.group | String | No | SeaTunnel-Consumer-Group | `Kafka consumer group id`, used to distinguish different consumer groups. | -| commit_on_checkpoint | Boolean | No | true | If true the consumer's offset will be periodically committed in the background. | -| kafka.config | Map | No | - | In addition to the above necessary parameters that must be specified by the `Kafka consumer` client, users can also specify multiple `consumer` client non-mandatory parameters, covering [all consumer parameters specified in the official Kafka document](https://kafka.apache.org/documentation.html#consumerconfigs). | -| schema | Config | No | - | The structure of the data, including field names and field types. | -| format | String | No | json | Data format. The default format is json. Optional text format, canal-json, debezium-json and avro.If you use json or text format. The default field separator is ", ". If you customize the delimiter, add the "field_delimiter" option.If you use canal format, please refer to [canal-json](../formats/canal-json.md) for details.If you use debezium format, please refer to [debezium-json](../formats/debezium-json.md) for details. | -| format_error_handle_way | String | No | fail | The processing method of data format error. The default value is fail, and the optional value is (fail, skip). When fail is selected, data format error will block and an exception will be thrown. When skip is selected, data format error will skip this line data. | -| field_delimiter | String | No | , | Customize the field delimiter for data format. | -| start_mode | StartMode[earliest],[group_offsets],[latest],[specific_offsets],[timestamp] | No | group_offsets | The initial consumption pattern of consumers. | -| start_mode.offsets | Config | No | - | The offset required for consumption mode to be specific_offsets. | -| start_mode.timestamp | Long | No | - | The time required for consumption mode to be "timestamp". | -| partition-discovery.interval-millis | Long | No | -1 | The interval for dynamically discovering topics and partitions. | -| common-options | | No | - | Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details | +| Name | Type | Required | Default | Description | +|-------------------------------------|-----------------------------------------------------------------------------|----------|--------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| topic | String | Yes | - | Topic name(s) to read data from when the table is used as source. It also supports topic list for source by separating topic by comma like 'topic-1,topic-2'. | +| bootstrap.servers | String | Yes | - | Comma separated list of Kafka brokers. | +| pattern | Boolean | No | false | If `pattern` is set to `true`,the regular expression for a pattern of topic names to read from. All topics in clients with names that match the specified regular expression will be subscribed by the consumer. | +| consumer.group | String | No | SeaTunnel-Consumer-Group | `Kafka consumer group id`, used to distinguish different consumer groups. | +| commit_on_checkpoint | Boolean | No | true | If true the consumer's offset will be periodically committed in the background. | +| kafka.config | Map | No | - | In addition to the above necessary parameters that must be specified by the `Kafka consumer` client, users can also specify multiple `consumer` client non-mandatory parameters, covering [all consumer parameters specified in the official Kafka document](https://kafka.apache.org/documentation.html#consumerconfigs). | +| schema | Config | No | - | The structure of the data, including field names and field types. | +| format | String | No | json | Data format. The default format is json. Optional text format, canal_json, debezium_json, ogg_json and avro.If you use json or text format. The default field separator is ", ". If you customize the delimiter, add the "field_delimiter" option.If you use canal format, please refer to [canal-json](../formats/canal-json.md) for details.If you use debezium format, please refer to [debezium-json](../formats/debezium-json.md) for details. | +| format_error_handle_way | String | No | fail | The processing method of data format error. The default value is fail, and the optional value is (fail, skip). When fail is selected, data format error will block and an exception will be thrown. When skip is selected, data format error will skip this line data. | +| field_delimiter | String | No | , | Customize the field delimiter for data format. | +| start_mode | StartMode[earliest],[group_offsets],[latest],[specific_offsets],[timestamp] | No | group_offsets | The initial consumption pattern of consumers. | +| start_mode.offsets | Config | No | - | The offset required for consumption mode to be specific_offsets. | +| start_mode.timestamp | Long | No | - | The time required for consumption mode to be "timestamp". | +| partition-discovery.interval-millis | Long | No | -1 | The interval for dynamically discovering topics and partitions. | +| common-options | | No | - | Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details | ## Task Example From 01159ec923358fa13237245c050c1c851aaec07f Mon Sep 17 00:00:00 2001 From: hailin0 Date: Sun, 7 Apr 2024 19:38:41 +0800 Subject: [PATCH 59/59] [Feature] Support listening for message delayed events in cdc source (#6634) --- .../apache/seatunnel/api/event/EventType.java | 1 + .../api/source/event/MessageDelayedEvent.java | 51 ++++++++++++++ .../IncrementalSourceRecordEmitter.java | 10 +++ .../utils/MessageDelayedEventLimiter.java | 50 +++++++++++++ .../utils/MessageDelayedEventLimiterTest.java | 70 +++++++++++++++++++ .../seatunnel/SeaTunnelContainer.java | 1 + .../engine/server/TaskExecutionService.java | 55 +++++++++++++++ .../event/JobEventHttpReportHandler.java | 4 +- .../engine/server/event/JobEventListener.java | 4 +- .../server/event/JobEventReportOperation.java | 13 ++-- .../event/JobEventHttpReportHandlerTest.java | 48 ++++++++----- 11 files changed, 281 insertions(+), 26 deletions(-) create mode 100644 seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/MessageDelayedEvent.java create mode 100644 seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/utils/MessageDelayedEventLimiter.java create mode 100644 seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/utils/MessageDelayedEventLimiterTest.java diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventType.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventType.java index 46acd316b4d..edb1b72f36b 100644 --- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventType.java +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/event/EventType.java @@ -29,4 +29,5 @@ public enum EventType { LIFECYCLE_READER_OPEN, LIFECYCLE_READER_CLOSE, LIFECYCLE_WRITER_CLOSE, + READER_MESSAGE_DELAYED, } diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/MessageDelayedEvent.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/MessageDelayedEvent.java new file mode 100644 index 00000000000..f27cc7e9e58 --- /dev/null +++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/source/event/MessageDelayedEvent.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.api.source.event; + +import org.apache.seatunnel.api.event.Event; +import org.apache.seatunnel.api.event.EventType; + +import lombok.AllArgsConstructor; +import lombok.Getter; +import lombok.NoArgsConstructor; +import lombok.Setter; +import lombok.ToString; + +@Getter +@Setter +@ToString +@AllArgsConstructor +@NoArgsConstructor +public class MessageDelayedEvent implements Event { + private long createdTime; + private String jobId; + private EventType eventType = EventType.READER_MESSAGE_DELAYED; + + private long delayTime; + private String record; + + public MessageDelayedEvent(long delayTime) { + this(delayTime, null); + } + + public MessageDelayedEvent(long delayTime, String record) { + this.delayTime = delayTime; + this.record = record; + this.createdTime = System.currentTimeMillis(); + } +} diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/IncrementalSourceRecordEmitter.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/IncrementalSourceRecordEmitter.java index a98a9d09591..7c25d3ce5c4 100644 --- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/IncrementalSourceRecordEmitter.java +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/reader/IncrementalSourceRecordEmitter.java @@ -21,6 +21,7 @@ import org.apache.seatunnel.api.event.EventListener; import org.apache.seatunnel.api.source.Collector; import org.apache.seatunnel.api.source.SourceReader; +import org.apache.seatunnel.api.source.event.MessageDelayedEvent; import org.apache.seatunnel.api.table.event.SchemaChangeEvent; import org.apache.seatunnel.connectors.cdc.base.source.event.CompletedSnapshotPhaseEvent; import org.apache.seatunnel.connectors.cdc.base.source.offset.Offset; @@ -28,6 +29,7 @@ import org.apache.seatunnel.connectors.cdc.base.source.split.SourceRecords; import org.apache.seatunnel.connectors.cdc.base.source.split.state.IncrementalSplitState; import org.apache.seatunnel.connectors.cdc.base.source.split.state.SourceSplitStateBase; +import org.apache.seatunnel.connectors.cdc.base.utils.MessageDelayedEventLimiter; import org.apache.seatunnel.connectors.cdc.debezium.DebeziumDeserializationSchema; import org.apache.seatunnel.connectors.seatunnel.common.source.reader.RecordEmitter; @@ -35,6 +37,7 @@ import lombok.extern.slf4j.Slf4j; +import java.time.Duration; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -71,6 +74,8 @@ public class IncrementalSourceRecordEmitter protected final Counter recordFetchDelay; protected final Counter recordEmitDelay; protected final EventListener eventListener; + protected final MessageDelayedEventLimiter delayedEventLimiter = + new MessageDelayedEventLimiter(Duration.ofSeconds(1), 0.5d); public IncrementalSourceRecordEmitter( DebeziumDeserializationSchema debeziumDeserializationSchema, @@ -113,6 +118,11 @@ protected void reportMetrics(SourceRecord element) { // report emit delay long emitDelay = now - messageTimestamp; recordEmitDelay.set(emitDelay > 0 ? emitDelay : 0); + + // limit the emit event frequency + if (delayedEventLimiter.acquire(messageTimestamp)) { + eventListener.onEvent(new MessageDelayedEvent(emitDelay, element.toString())); + } } } diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/utils/MessageDelayedEventLimiter.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/utils/MessageDelayedEventLimiter.java new file mode 100644 index 00000000000..af1c72035d8 --- /dev/null +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/utils/MessageDelayedEventLimiter.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.cdc.base.utils; + +import org.apache.seatunnel.shade.com.google.common.util.concurrent.RateLimiter; + +import lombok.AllArgsConstructor; + +import java.time.Duration; + +@AllArgsConstructor +public class MessageDelayedEventLimiter { + private final long delayMs; + private final RateLimiter eventRateLimiter; + + public MessageDelayedEventLimiter(Duration delayThreshold) { + this(delayThreshold, 1); + } + + public MessageDelayedEventLimiter(Duration delayThreshold, double permitsPerSecond) { + this.delayMs = delayThreshold.toMillis(); + this.eventRateLimiter = RateLimiter.create(permitsPerSecond); + } + + public boolean acquire(long messageCreateTime) { + if (isDelayed(messageCreateTime)) { + return eventRateLimiter.tryAcquire(); + } + return false; + } + + private boolean isDelayed(long messageCreateTime) { + return delayMs != 0 && System.currentTimeMillis() - messageCreateTime >= delayMs; + } +} diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/utils/MessageDelayedEventLimiterTest.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/utils/MessageDelayedEventLimiterTest.java new file mode 100644 index 00000000000..5e0332ed956 --- /dev/null +++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/test/java/org/apache/seatunnel/connectors/cdc/base/utils/MessageDelayedEventLimiterTest.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.seatunnel.connectors.cdc.base.utils; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.time.Duration; +import java.util.concurrent.TimeUnit; + +public class MessageDelayedEventLimiterTest { + + @Test + public void testAcquire() throws InterruptedException { + double permitsPerSecond = 0.5; + Duration delayThreshold = Duration.ofMillis(1000); + MessageDelayedEventLimiter delayedEventLimiter = + new MessageDelayedEventLimiter(delayThreshold, permitsPerSecond); + + long endTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10); + long actualAcquiredCount = 0; + while (System.currentTimeMillis() < endTime) { + boolean acquired = + delayedEventLimiter.acquire( + System.currentTimeMillis() - (delayThreshold.toMillis() * 10)); + if (acquired) { + actualAcquiredCount++; + } + Thread.sleep(1); + } + long expectedAcquiredCount = (long) (TimeUnit.SECONDS.toSeconds(10) * permitsPerSecond); + + Assertions.assertTrue(expectedAcquiredCount >= actualAcquiredCount); + } + + @Test + public void testNoAcquire() throws InterruptedException { + double permitsPerSecond = 0.5; + Duration delayThreshold = Duration.ofMillis(1000); + MessageDelayedEventLimiter delayedEventLimiter = + new MessageDelayedEventLimiter(delayThreshold, permitsPerSecond); + + long endTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10); + long actualAcquiredCount = 0; + while (System.currentTimeMillis() < endTime) { + boolean acquired = delayedEventLimiter.acquire(System.currentTimeMillis()); + if (acquired) { + actualAcquiredCount++; + } + Thread.sleep(1); + } + + Assertions.assertTrue(actualAcquiredCount == 0); + } +} diff --git a/seatunnel-e2e/seatunnel-e2e-common/src/test/java/org/apache/seatunnel/e2e/common/container/seatunnel/SeaTunnelContainer.java b/seatunnel-e2e/seatunnel-e2e-common/src/test/java/org/apache/seatunnel/e2e/common/container/seatunnel/SeaTunnelContainer.java index ef83f83257f..ca55adbe89e 100644 --- a/seatunnel-e2e/seatunnel-e2e-common/src/test/java/org/apache/seatunnel/e2e/common/container/seatunnel/SeaTunnelContainer.java +++ b/seatunnel-e2e/seatunnel-e2e-common/src/test/java/org/apache/seatunnel/e2e/common/container/seatunnel/SeaTunnelContainer.java @@ -250,6 +250,7 @@ private static boolean isSystemThread(String s) { || s.contains("Java2D Disposer") || s.contains("OkHttp ConnectionPool") || s.startsWith("http-report-event-scheduler") + || s.startsWith("event-forwarder") || s.contains( "org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner") || s.startsWith("Log4j2-TF-") diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/TaskExecutionService.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/TaskExecutionService.java index 197833903d5..049c9c374a9 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/TaskExecutionService.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/TaskExecutionService.java @@ -18,7 +18,9 @@ package org.apache.seatunnel.engine.server; import org.apache.seatunnel.api.common.metrics.MetricTags; +import org.apache.seatunnel.api.event.Event; import org.apache.seatunnel.common.utils.ExceptionUtils; +import org.apache.seatunnel.common.utils.RetryUtils; import org.apache.seatunnel.common.utils.StringFormatUtils; import org.apache.seatunnel.engine.common.Constant; import org.apache.seatunnel.engine.common.config.ConfigProvider; @@ -28,6 +30,7 @@ import org.apache.seatunnel.engine.common.utils.PassiveCompletableFuture; import org.apache.seatunnel.engine.core.classloader.ClassLoaderService; import org.apache.seatunnel.engine.core.job.ConnectorJarIdentifier; +import org.apache.seatunnel.engine.server.event.JobEventReportOperation; import org.apache.seatunnel.engine.server.exception.TaskGroupContextNotFoundException; import org.apache.seatunnel.engine.server.execution.ExecutionState; import org.apache.seatunnel.engine.server.execution.ProgressState; @@ -46,10 +49,12 @@ import org.apache.seatunnel.engine.server.task.SeaTunnelTask; import org.apache.seatunnel.engine.server.task.TaskGroupImmutableInformation; import org.apache.seatunnel.engine.server.task.operation.NotifyTaskStatusOperation; +import org.apache.seatunnel.engine.server.utils.NodeEngineUtil; import org.apache.commons.collections4.CollectionUtils; import com.google.common.collect.Lists; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.hazelcast.instance.impl.NodeState; import com.hazelcast.internal.metrics.DynamicMetricsProvider; import com.hazelcast.internal.metrics.MetricDescriptor; @@ -68,6 +73,7 @@ import java.io.IOException; import java.net.URL; +import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; @@ -75,6 +81,7 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; @@ -140,6 +147,9 @@ public class TaskExecutionService implements DynamicMetricsProvider { private final ServerConnectorPackageClient serverConnectorPackageClient; + private final BlockingQueue eventBuffer; + private final ExecutorService eventForwardService; + public TaskExecutionService( ClassLoaderService classLoaderService, NodeEngineImpl nodeEngine, @@ -165,6 +175,43 @@ public TaskExecutionService( serverConnectorPackageClient = new ServerConnectorPackageClient(nodeEngine, seaTunnelConfig); + + eventBuffer = new ArrayBlockingQueue<>(2048); + eventForwardService = + Executors.newSingleThreadExecutor( + new ThreadFactoryBuilder().setNameFormat("event-forwarder-%d").build()); + eventForwardService.submit( + () -> { + List events = new ArrayList<>(); + RetryUtils.RetryMaterial retryMaterial = + new RetryUtils.RetryMaterial(2, true, e -> true); + while (!Thread.currentThread().isInterrupted()) { + try { + events.clear(); + + Event first = eventBuffer.take(); + events.add(first); + + eventBuffer.drainTo(events, 500); + JobEventReportOperation operation = new JobEventReportOperation(events); + + RetryUtils.retryWithException( + () -> + NodeEngineUtil.sendOperationToMasterNode( + nodeEngine, operation) + .join(), + retryMaterial); + + logger.fine("Event forward success, events " + events.size()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.info("Event forward thread interrupted"); + } catch (Throwable t) { + logger.warning( + "Event forward failed, discard events " + events.size(), t); + } + } + }); } public void start() { @@ -175,6 +222,7 @@ public void shutdown() { isRunning = false; executorService.shutdownNow(); scheduledExecutorService.shutdown(); + eventForwardService.shutdownNow(); } public TaskGroupContext getExecutionContext(TaskGroupLocation taskGroupLocation) { @@ -619,6 +667,13 @@ public void printTaskExecutionRuntimeInfo() { } } + public void reportEvent(Event e) { + while (!eventBuffer.offer(e)) { + eventBuffer.poll(); + logger.warning("Event buffer is full, discard the oldest event"); + } + } + private final class BlockingWorker implements Runnable { private final TaskTracker tracker; diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventHttpReportHandler.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventHttpReportHandler.java index f1e0fe9ac8f..fab3201e5ed 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventHttpReportHandler.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventHttpReportHandler.java @@ -18,6 +18,7 @@ package org.apache.seatunnel.engine.server.event; import org.apache.seatunnel.shade.com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.seatunnel.shade.com.google.common.annotations.VisibleForTesting; import org.apache.seatunnel.shade.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.seatunnel.api.event.Event; @@ -104,7 +105,8 @@ public void handle(Event event) { completionStage.toCompletableFuture().join(); } - private void report() throws IOException { + @VisibleForTesting + synchronized void report() throws IOException { long headSequence = ringbuffer.headSequence(); if (headSequence > committedEventIndex) { log.warn( diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventListener.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventListener.java index 4e834b828c4..9b252d35c21 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventListener.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventListener.java @@ -32,7 +32,7 @@ public class JobEventListener implements EventListener { @Override public void onEvent(Event event) { event.setJobId(String.valueOf(taskLocation.getJobId())); - JobEventReportOperation evenCollectOperation = new JobEventReportOperation(event); - taskExecutionContext.sendToMaster(evenCollectOperation).join(); + + taskExecutionContext.getTaskExecutionService().reportEvent(event); } } diff --git a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventReportOperation.java b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventReportOperation.java index cc2500f3b31..9e3da06cf05 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventReportOperation.java +++ b/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/event/JobEventReportOperation.java @@ -18,6 +18,7 @@ package org.apache.seatunnel.engine.server.event; import org.apache.seatunnel.api.event.Event; +import org.apache.seatunnel.api.event.EventProcessor; import org.apache.seatunnel.engine.server.SeaTunnelServer; import org.apache.seatunnel.engine.server.serializable.TaskDataSerializerHook; @@ -33,24 +34,28 @@ import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; +import java.util.List; @NoArgsConstructor @AllArgsConstructor public class JobEventReportOperation extends Operation implements IdentifiedDataSerializable { - private Event event; + private List events; @Override public void run() throws Exception { SeaTunnelServer server = getService(); - server.getCoordinatorService().getEventProcessor().process(event); + EventProcessor processor = server.getCoordinatorService().getEventProcessor(); + for (Event event : events) { + processor.process(event); + } } @Override protected void writeInternal(ObjectDataOutput out) throws IOException { try (ByteArrayOutputStream byteOut = new ByteArrayOutputStream(); ObjectOutputStream objectOut = new ObjectOutputStream(byteOut)) { - objectOut.writeObject(event); + objectOut.writeObject(events); objectOut.flush(); out.writeByteArray(byteOut.toByteArray()); } @@ -60,7 +65,7 @@ protected void writeInternal(ObjectDataOutput out) throws IOException { protected void readInternal(ObjectDataInput in) throws IOException { try (ByteArrayInputStream byteIn = new ByteArrayInputStream(in.readByteArray()); ObjectInputStream objectIn = new ObjectInputStream(byteIn)) { - event = (Event) objectIn.readObject(); + events = (List) objectIn.readObject(); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } diff --git a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/event/JobEventHttpReportHandlerTest.java b/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/event/JobEventHttpReportHandlerTest.java index e5a978d486e..72e0907490b 100644 --- a/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/event/JobEventHttpReportHandlerTest.java +++ b/seatunnel-engine/seatunnel-engine-server/src/test/java/org/apache/seatunnel/engine/server/event/JobEventHttpReportHandlerTest.java @@ -28,6 +28,8 @@ import org.junit.jupiter.api.Test; import com.hazelcast.config.Config; +import com.hazelcast.config.RingbufferConfig; +import com.hazelcast.config.RingbufferStoreConfig; import com.hazelcast.core.Hazelcast; import com.hazelcast.core.HazelcastInstance; import com.hazelcast.ringbuffer.Ringbuffer; @@ -38,38 +40,60 @@ import lombok.Getter; import lombok.NoArgsConstructor; import lombok.Setter; +import lombok.extern.slf4j.Slf4j; import okio.Buffer; import java.io.IOException; import java.time.Duration; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.concurrent.TimeUnit; import static org.awaitility.Awaitility.given; +@Slf4j public class JobEventHttpReportHandlerTest { + private static final String ringBufferName = "test"; + private static final int capacity = 1000; private static HazelcastInstance hazelcast; private static MockWebServer mockWebServer; @BeforeAll public static void before() throws IOException { - hazelcast = Hazelcast.newHazelcastInstance(); + Config config = new Config(); + config.setRingbufferConfigs( + Collections.singletonMap( + ringBufferName, + new RingbufferConfig(ringBufferName) + .setCapacity(capacity) + .setBackupCount(0) + .setAsyncBackupCount(1) + .setTimeToLiveSeconds(0) + .setRingbufferStoreConfig( + new RingbufferStoreConfig().setEnabled(false)))); + hazelcast = Hazelcast.newHazelcastInstance(config); mockWebServer = new MockWebServer(); mockWebServer.start(); - mockWebServer.enqueue(new MockResponse().setResponseCode(200)); + for (int i = 0; i < capacity; i++) { + mockWebServer.enqueue(new MockResponse().setResponseCode(200)); + } } @AfterAll public static void after() throws IOException { hazelcast.shutdown(); - mockWebServer.shutdown(); + try { + mockWebServer.shutdown(); + } catch (Exception e) { + log.error("Failed to shutdown mockWebServer", e); + } } @Test public void testReportEvent() throws IOException, InterruptedException { int maxEvents = 1000; - Ringbuffer ringbuffer = createRingBuffer(maxEvents); + Ringbuffer ringbuffer = hazelcast.getRingbuffer(ringBufferName); JobEventHttpReportHandler handler = new JobEventHttpReportHandler( mockWebServer.url("/api").toString(), Duration.ofSeconds(1), ringbuffer); @@ -80,6 +104,7 @@ public void testReportEvent() throws IOException, InterruptedException { .await() .atMost(10, TimeUnit.SECONDS) .until(() -> mockWebServer.getRequestCount(), count -> count > 0); + handler.report(); handler.close(); List events = new ArrayList<>(); @@ -100,21 +125,6 @@ public void testReportEvent() throws IOException, InterruptedException { } } - private Ringbuffer createRingBuffer(int capacity) { - String ringBufferName = "test"; - hazelcast - .getConfig() - .addRingBufferConfig( - new Config() - .getRingbufferConfig(ringBufferName) - .setCapacity(capacity) - .setBackupCount(0) - .setAsyncBackupCount(1) - .setTimeToLiveSeconds(0)); - Ringbuffer ringbuffer = hazelcast.getRingbuffer(ringBufferName); - return ringbuffer; - } - @Getter @Setter @NoArgsConstructor