diff --git a/.github/workflows/backend.yml b/.github/workflows/backend.yml
index bdf29edb9be..b188c661483 100644
--- a/.github/workflows/backend.yml
+++ b/.github/workflows/backend.yml
@@ -16,13 +16,9 @@
#
name: Backend
+
on:
- push:
- pull_request:
- branches:
- - dev
- paths-ignore:
- - 'seatunnel-ui/**'
+ workflow_call:
concurrency:
group: backend-${{ github.event.pull_request.number || github.ref }}
@@ -30,7 +26,6 @@ concurrency:
jobs:
license-header:
- if: github.repository == '${{github.actor}}/seatunnel'
name: License header
runs-on: ubuntu-latest
timeout-minutes: 10
@@ -42,7 +37,6 @@ jobs:
uses: apache/skywalking-eyes@985866ce7e324454f61e22eb2db2e998db09d6f3
code-style:
- if: github.repository == '${{github.actor}}/seatunnel'
name: Code style
runs-on: ubuntu-latest
timeout-minutes: 10
@@ -54,7 +48,6 @@ jobs:
run: ./mvnw --batch-mode --quiet --no-snapshot-updates clean spotless:check
dead-link:
- if: github.repository == '${{github.actor}}/seatunnel'
name: Dead links
runs-on: ubuntu-latest
timeout-minutes: 30
@@ -67,7 +60,6 @@ jobs:
done
sanity-check:
- if: github.repository == '${{github.actor}}/seatunnel'
name: Sanity check results
needs: [ license-header, code-style, dead-link ]
runs-on: ubuntu-latest
@@ -81,7 +73,6 @@ jobs:
changes:
runs-on: ubuntu-latest
- if: github.repository == '${{github.actor}}/seatunnel'
timeout-minutes: 10
outputs:
api: ${{ steps.filter.outputs.api }}
@@ -309,7 +300,7 @@ jobs:
- name: run updated modules integration test (part-1)
if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != ''
run: |
- sub_modules=`python tools/update_modules_check/update_modules_check.py sub_update_it_module ${{needs.changes.outputs.it-modules}} 5 0`
+ sub_modules=`python tools/update_modules_check/update_modules_check.py sub_update_it_module ${{needs.changes.outputs.it-modules}} 7 0`
./mvnw -T 1C -B verify -DskipUT=true -DskipIT=false -D"license.skipAddThirdParty"=true --no-snapshot-updates -pl $sub_modules -am -Pci
env:
MAVEN_OPTS: -Xmx2048m
@@ -334,7 +325,7 @@ jobs:
- name: run updated modules integration test (part-2)
if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != ''
run: |
- sub_modules=`python tools/update_modules_check/update_modules_check.py sub_update_it_module ${{needs.changes.outputs.it-modules}} 5 1`
+ sub_modules=`python tools/update_modules_check/update_modules_check.py sub_update_it_module ${{needs.changes.outputs.it-modules}} 7 1`
if [ ! -z $sub_modules ]; then
./mvnw -T 1C -B verify -DskipUT=true -DskipIT=false -D"license.skipAddThirdParty"=true --no-snapshot-updates -pl $sub_modules -am -Pci
else
@@ -363,7 +354,7 @@ jobs:
- name: run updated modules integration test (part-3)
if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != ''
run: |
- sub_modules=`python tools/update_modules_check/update_modules_check.py sub_update_it_module ${{needs.changes.outputs.it-modules}} 5 2`
+ sub_modules=`python tools/update_modules_check/update_modules_check.py sub_update_it_module ${{needs.changes.outputs.it-modules}} 7 2`
if [ ! -z $sub_modules ]; then
./mvnw -T 1C -B verify -DskipUT=true -DskipIT=false -D"license.skipAddThirdParty"=true --no-snapshot-updates -pl $sub_modules -am -Pci
else
@@ -392,7 +383,7 @@ jobs:
- name: run updated modules integration test (part-4)
if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != ''
run: |
- sub_modules=`python tools/update_modules_check/update_modules_check.py sub_update_it_module ${{needs.changes.outputs.it-modules}} 5 3`
+ sub_modules=`python tools/update_modules_check/update_modules_check.py sub_update_it_module ${{needs.changes.outputs.it-modules}} 7 3`
if [ ! -z $sub_modules ]; then
./mvnw -T 1C -B verify -DskipUT=true -DskipIT=false -D"license.skipAddThirdParty"=true --no-snapshot-updates -pl $sub_modules -am -Pci
else
@@ -401,33 +392,89 @@ jobs:
env:
MAVEN_OPTS: -Xmx2048m
updated-modules-integration-test-part-5:
- needs: [ changes, sanity-check ]
- if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != ''
- runs-on: ${{ matrix.os }}
- strategy:
- matrix:
- java: [ '8', '11' ]
- os: [ 'ubuntu-latest' ]
- timeout-minutes: 90
- steps:
- - uses: actions/checkout@v2
- - name: Set up JDK ${{ matrix.java }}
- uses: actions/setup-java@v3
- with:
- java-version: ${{ matrix.java }}
- distribution: 'temurin'
- cache: 'maven'
- - name: run updated modules integration test (part-5)
- if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != ''
- run: |
- sub_modules=`python tools/update_modules_check/update_modules_check.py sub_update_it_module ${{needs.changes.outputs.it-modules}} 5 4`
- if [ ! -z $sub_modules ]; then
- ./mvnw -T 1C -B verify -DskipUT=true -DskipIT=false -D"license.skipAddThirdParty"=true --no-snapshot-updates -pl $sub_modules -am -Pci
- else
- echo "sub modules is empty, skipping"
- fi
- env:
- MAVEN_OPTS: -Xmx2048m
+ needs: [ changes, sanity-check ]
+ if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != ''
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ java: [ '8', '11' ]
+ os: [ 'ubuntu-latest' ]
+ timeout-minutes: 90
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up JDK ${{ matrix.java }}
+ uses: actions/setup-java@v3
+ with:
+ java-version: ${{ matrix.java }}
+ distribution: 'temurin'
+ cache: 'maven'
+ - name: run updated modules integration test (part-5)
+ if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != ''
+ run: |
+ sub_modules=`python tools/update_modules_check/update_modules_check.py sub_update_it_module ${{needs.changes.outputs.it-modules}} 7 4`
+ if [ ! -z $sub_modules ]; then
+ ./mvnw -T 1C -B verify -DskipUT=true -DskipIT=false -D"license.skipAddThirdParty"=true --no-snapshot-updates -pl $sub_modules -am -Pci
+ else
+ echo "sub modules is empty, skipping"
+ fi
+ env:
+ MAVEN_OPTS: -Xmx2048m
+ updated-modules-integration-test-part-6:
+ needs: [ changes, sanity-check ]
+ if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != ''
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ java: [ '8', '11' ]
+ os: [ 'ubuntu-latest' ]
+ timeout-minutes: 90
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up JDK ${{ matrix.java }}
+ uses: actions/setup-java@v3
+ with:
+ java-version: ${{ matrix.java }}
+ distribution: 'temurin'
+ cache: 'maven'
+ - name: run updated modules integration test (part-6)
+ if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != ''
+ run: |
+ sub_modules=`python tools/update_modules_check/update_modules_check.py sub_update_it_module ${{needs.changes.outputs.it-modules}} 7 5`
+ if [ ! -z $sub_modules ]; then
+ ./mvnw -T 1C -B verify -DskipUT=true -DskipIT=false -D"license.skipAddThirdParty"=true --no-snapshot-updates -pl $sub_modules -am -Pci
+ else
+ echo "sub modules is empty, skipping"
+ fi
+ env:
+ MAVEN_OPTS: -Xmx2048m
+ updated-modules-integration-test-part-7:
+ needs: [ changes, sanity-check ]
+ if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != ''
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ java: [ '8', '11' ]
+ os: [ 'ubuntu-latest' ]
+ timeout-minutes: 90
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up JDK ${{ matrix.java }}
+ uses: actions/setup-java@v3
+ with:
+ java-version: ${{ matrix.java }}
+ distribution: 'temurin'
+ cache: 'maven'
+ - name: run updated modules integration test (part-7)
+ if: needs.changes.outputs.api == 'false' && needs.changes.outputs.it-modules != ''
+ run: |
+ sub_modules=`python tools/update_modules_check/update_modules_check.py sub_update_it_module ${{needs.changes.outputs.it-modules}} 7 6`
+ if [ ! -z $sub_modules ]; then
+ ./mvnw -T 1C -B verify -DskipUT=true -DskipIT=false -D"license.skipAddThirdParty"=true --no-snapshot-updates -pl $sub_modules -am -Pci
+ else
+ echo "sub modules is empty, skipping"
+ fi
+ env:
+ MAVEN_OPTS: -Xmx2048m
engine-v2-it:
needs: [ changes, sanity-check ]
if: needs.changes.outputs.api == 'true'
diff --git a/.github/workflows/build_main.yml b/.github/workflows/build_main.yml
new file mode 100644
index 00000000000..f4816940a88
--- /dev/null
+++ b/.github/workflows/build_main.yml
@@ -0,0 +1,32 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+name: "Build"
+
+on:
+ push:
+ branches:
+ - '**'
+
+jobs:
+ call-build-and-test:
+ permissions:
+ packages: write
+ name: Run
+ uses: ./.github/workflows/backend.yml
diff --git a/.github/workflows/license.yml b/.github/workflows/license.yml
deleted file mode 100644
index 032d1d82ec8..00000000000
--- a/.github/workflows/license.yml
+++ /dev/null
@@ -1,73 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-name: License
-
-on:
- push:
- pull_request:
- branches:
- - dev
- paths-ignore:
- - '**/*.md'
-
-concurrency:
- group: lc-${{ github.event.pull_request.number || github.ref }}
- cancel-in-progress: true
-
-jobs:
- check-license:
- runs-on: ubuntu-latest
- timeout-minutes: 30
- steps:
- - uses: actions/checkout@v2
- - name: Check License Header
- uses: apache/skywalking-eyes/header@501a28d2fb4a9b962661987e50cf0219631b32ff
- auto-license:
- name: Auto License
- runs-on: ubuntu-latest
- timeout-minutes: 30
- # Have a buggy in https://github.com/apache/seatunnel/pull/1642, Can trigger when commit message contains
- # keyword `[ci-auto-license]`.
- if: "contains(toJSON(github.event.commits.*.message), '[ci-auto-license]')"
- env:
- MAVEN_OPTS: -Xmx2G -Xms2G
- steps:
- - uses: actions/checkout@v2
- - name: Set up JDK 1.8
- uses: actions/setup-java@v2
- with:
- java-version: 8
- distribution: 'adopt'
- - name: Set up Python 3
- uses: actions/setup-python@v2
- with:
- python-version: '3.x'
- architecture: 'x64'
- - name: Cache local Maven repository
- uses: actions/cache@v2
- with:
- path: ~/.m2/repository
- key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
- restore-keys: |
- ${{ runner.os }}-maven-
- - name: Generate THIRD-PARTY
- run: |
- ./mvnw clean license:aggregate-add-third-party -DskipTests -U
- - name: Check LICENSE file
- run: |
- python3 tools/dependencies/license.py seatunnel-dist/target/THIRD-PARTY.txt seatunnel-dist/release-docs/LICENSE true
diff --git a/.github/workflows/notify_test_workflow.yml b/.github/workflows/notify_test_workflow.yml
new file mode 100644
index 00000000000..1fcd1427ba9
--- /dev/null
+++ b/.github/workflows/notify_test_workflow.yml
@@ -0,0 +1,152 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Intentionally has a general name.
+# because the test status check created in GitHub Actions
+# currently randomly picks any associated workflow.
+# So, the name was changed to make sense in that context too.
+# See also https://github.community/t/specify-check-suite-when-creating-a-checkrun/118380/10
+name: On pull request update
+on:
+ pull_request_target:
+ types: [opened, reopened, synchronize]
+
+jobs:
+ notify:
+ name: Notify test workflow
+ runs-on: ubuntu-20.04
+ permissions:
+ actions: read
+ checks: write
+ steps:
+ - name: "Notify test workflow"
+ uses: actions/github-script@v6
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ const endpoint = 'GET /repos/:owner/:repo/actions/workflows/:id/runs?&branch=:branch'
+ const check_run_endpoint = 'GET /repos/:owner/:repo/commits/:ref/check-runs'
+
+ // TODO: Should use pull_request.user and pull_request.user.repos_url?
+ // If a different person creates a commit to another forked repo,
+ // it wouldn't be able to detect.
+ const params = {
+ owner: context.payload.pull_request.head.repo.owner.login,
+ repo: context.payload.pull_request.head.repo.name,
+ id: 'build_main.yml',
+ branch: context.payload.pull_request.head.ref,
+ }
+ const check_run_params = {
+ owner: context.payload.pull_request.head.repo.owner.login,
+ repo: context.payload.pull_request.head.repo.name,
+ ref: context.payload.pull_request.head.ref,
+ }
+
+ console.log('Ref: ' + context.payload.pull_request.head.ref)
+ console.log('SHA: ' + context.payload.pull_request.head.sha)
+
+ // Wait 3 seconds to make sure the fork repository triggered a workflow.
+ await new Promise(r => setTimeout(r, 3000))
+
+ let runs
+ try {
+ runs = await github.request(endpoint, params)
+ } catch (error) {
+ console.error(error)
+ // Assume that runs were not found.
+ }
+
+ const name = 'Build'
+ const head_sha = context.payload.pull_request.head.sha
+ let status = 'queued'
+ console.log('runs: ' + runs)
+ if (!runs || runs.data.workflow_runs.length === 0) {
+ status = 'completed'
+ const conclusion = 'action_required'
+
+ github.rest.checks.create({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ name: name,
+ head_sha: head_sha,
+ status: status,
+ conclusion: conclusion,
+ output: {
+ title: 'Workflow run detection failed',
+ summary: `
+ Unable to detect the workflow run for testing the changes in your PR.
+
+ 1. If you did not enable GitHub Actions in your forked repository, please enable it by clicking the button as shown in the image below. See also [Disabling or limiting GitHub Actions for a repository](https://docs.github.com/en/github/administering-a-repository/disabling-or-limiting-github-actions-for-a-repository) for more details.
+ 2. It is possible your branch is based on the old \`dev\` branch in Apache SeaTunnel, please sync your branch to the latest dev branch. For example as below:
+ \`\`\`bash
+ git fetch upstream
+ git rebase upstream/dev
+ git push origin YOUR_BRANCH --force
+ \`\`\``,
+ images: [
+ {
+ alt: 'enabling workflows button',
+ image_url: 'https://raw.githubusercontent.com/apache/spark/master/.github/workflows/images/workflow-enable-button.png'
+ }
+ ]
+ }
+ })
+ } else {
+ const run_id = runs.data.workflow_runs[0].id
+
+ if (runs.data.workflow_runs[0].head_sha != context.payload.pull_request.head.sha) {
+ throw new Error('There was a new unsynced commit pushed. Please retrigger the workflow.');
+ }
+
+ // Here we get check run ID to provide Check run view instead of Actions view, see also SPARK-37879.
+ const check_runs = await github.request(check_run_endpoint, check_run_params)
+ const check_run_head = check_runs.data.check_runs.filter(r => r.name === "Run / License header")[0]
+
+ if (check_run_head.head_sha != context.payload.pull_request.head.sha) {
+ throw new Error('There was a new unsynced commit pushed. Please retrigger the workflow.');
+ }
+
+ const check_run_url = 'https://github.com/'
+ + context.payload.pull_request.head.repo.full_name
+ + '/runs/'
+ + check_run_head.id
+
+ const actions_url = 'https://github.com/'
+ + context.payload.pull_request.head.repo.full_name
+ + '/actions/runs/'
+ + run_id
+
+ github.rest.checks.create({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ name: name,
+ head_sha: head_sha,
+ status: status,
+ output: {
+ title: 'Test results',
+ summary: '[See test results](' + check_run_url + ')',
+ text: JSON.stringify({
+ owner: context.payload.pull_request.head.repo.owner.login,
+ repo: context.payload.pull_request.head.repo.name,
+ run_id: run_id
+ })
+ },
+ details_url: actions_url,
+ })
+ }
diff --git a/.github/workflows/update_build_status.yml b/.github/workflows/update_build_status.yml
new file mode 100644
index 00000000000..05cf4914a25
--- /dev/null
+++ b/.github/workflows/update_build_status.yml
@@ -0,0 +1,108 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+name: Update build status workflow
+
+on:
+ schedule:
+ - cron: "*/15 * * * *"
+
+jobs:
+ update:
+ name: Update build status
+ runs-on: ubuntu-20.04
+ permissions:
+ actions: read
+ checks: write
+ steps:
+ - name: "Update build status"
+ uses: actions/github-script@v6
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ const endpoint = 'GET /repos/:owner/:repo/pulls?state=:state'
+ const params = {
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ state: 'open'
+ }
+
+ // See https://docs.github.com/en/graphql/reference/enums#mergestatestatus
+ const maybeReady = ['behind', 'clean', 'draft', 'has_hooks', 'unknown', 'unstable'];
+
+ // Iterate open PRs
+ for await (const prs of github.paginate.iterator(endpoint,params)) {
+ // Each page
+ for await (const pr of prs.data) {
+ console.log('SHA: ' + pr.head.sha)
+ console.log(' Mergeable status: ' + pr.mergeable_state)
+ if (pr.mergeable_state == null || maybeReady.includes(pr.mergeable_state)) {
+ const checkRuns = await github.request('GET /repos/{owner}/{repo}/commits/{ref}/check-runs', {
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ ref: pr.head.sha
+ })
+
+ // Iterator GitHub Checks in the PR
+ for await (const cr of checkRuns.data.check_runs) {
+ if (cr.name == 'Build' && cr.conclusion != "action_required") {
+ // text contains parameters to make request in JSON.
+ const params = JSON.parse(cr.output.text)
+
+ // Get the workflow run in the forked repository
+ let run
+ try {
+ run = await github.request('GET /repos/{owner}/{repo}/actions/runs/{run_id}', params)
+ } catch (error) {
+ console.error(error)
+ // Run not found. This can happen when the PR author removes GitHub Actions runs or
+ // disalbes GitHub Actions.
+ continue
+ }
+
+ // Keep syncing the status of the checks
+ if (run.data.status == 'completed') {
+ console.log(' Run ' + cr.id + ': set status (' + run.data.status + ') and conclusion (' + run.data.conclusion + ')')
+ const response = await github.request('PATCH /repos/{owner}/{repo}/check-runs/{check_run_id}', {
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ check_run_id: cr.id,
+ output: cr.output,
+ status: run.data.status,
+ conclusion: run.data.conclusion,
+ details_url: run.data.details_url
+ })
+ } else {
+ console.log(' Run ' + cr.id + ': set status (' + run.data.status + ')')
+ const response = await github.request('PATCH /repos/{owner}/{repo}/check-runs/{check_run_id}', {
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ check_run_id: cr.id,
+ output: cr.output,
+ status: run.data.status,
+ details_url: run.data.details_url
+ })
+ }
+
+ break
+ }
+ }
+ }
+ }
+ }
diff --git a/.gitignore b/.gitignore
index 25977068e4f..74311a0fa05 100644
--- a/.gitignore
+++ b/.gitignore
@@ -48,4 +48,5 @@ test.conf
spark-warehouse
*.flattened-pom.xml
-seatunnel-examples
\ No newline at end of file
+seatunnel-examples
+/lib/*
\ No newline at end of file
diff --git a/bin/install-plugin.cmd b/bin/install-plugin.cmd
new file mode 100644
index 00000000000..35cadf94c2d
--- /dev/null
+++ b/bin/install-plugin.cmd
@@ -0,0 +1,54 @@
+@echo off
+REM Licensed to the Apache Software Foundation (ASF) under one or more
+REM contributor license agreements. See the NOTICE file distributed with
+REM this work for additional information regarding copyright ownership.
+REM The ASF licenses this file to You under the Apache License, Version 2.0
+REM (the "License"); you may not use this file except in compliance with
+REM the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
+
+REM This script is used to download the connector plug-ins required during the running process.
+REM All are downloaded by default. You can also choose what you need.
+REM You only need to configure the plug-in name in config\plugin_config.txt.
+
+REM Get seatunnel home
+set "SEATUNNEL_HOME=%~dp0..\"
+echo Set SEATUNNEL_HOME to [%SEATUNNEL_HOME%]
+
+REM Connector default version is 2.3.3, you can also choose a custom version. eg: 2.1.2: install-plugin.bat 2.1.2
+set "version=2.3.3"
+if not "%~1"=="" set "version=%~1"
+echo Install hadoop shade jar, usage version is %version%
+
+REM Create the lib directory
+if not exist "%SEATUNNEL_HOME%\lib" (
+ mkdir "%SEATUNNEL_HOME%\lib"
+ echo create lib directory
+)
+
+call "%SEATUNNEL_HOME%\mvnw.cmd" dependency:get -DgroupId="org.apache.seatunnel" -Dclassifier="optional" -DartifactId="seatunnel-hadoop3-3.1.4-uber" -Dversion="%version%" -Ddest="%SEATUNNEL_HOME%\lib"
+
+echo Install SeaTunnel connectors plugins, usage version is %version%
+
+REM Create the connectors directory
+if not exist "%SEATUNNEL_HOME%\connectors" (
+ mkdir "%SEATUNNEL_HOME%\connectors"
+ echo create connectors directory
+)
+
+for /f "usebackq delims=" %%a in ("%SEATUNNEL_HOME%\config\plugin_config") do (
+ set "line=%%a"
+ setlocal enabledelayedexpansion
+ if "!line:~0,1!" neq "-" if "!line:~0,1!" neq "#" (
+ echo install connector : !line!
+ call "%SEATUNNEL_HOME%\mvnw.cmd" dependency:get -DgroupId="org.apache.seatunnel" -DartifactId="!line!" -Dversion="%version%" -Ddest="%SEATUNNEL_HOME%\connectors"
+ )
+ endlocal
+)
diff --git a/bin/install-plugin.sh b/bin/install-plugin.sh
index b8a1cca7149..f8dd59b3679 100755
--- a/bin/install-plugin.sh
+++ b/bin/install-plugin.sh
@@ -43,18 +43,11 @@ if [ ! -d ${SEATUNNEL_HOME}/connectors ];
echo "create connectors directory"
fi
-# create the seatunnel connectors directory (for v2)
-if [ ! -d ${SEATUNNEL_HOME}/connectors/seatunnel ];
- then
- mkdir ${SEATUNNEL_HOME}/connectors/seatunnel
- echo "create seatunnel connectors directory"
-fi
-
while read line; do
if [ ${line:0:1} != "-" ] && [ ${line:0:1} != "#" ]
then
echo "install connector : " $line
- ${SEATUNNEL_HOME}/mvnw dependency:get -DgroupId=org.apache.seatunnel -DartifactId=${line} -Dversion=${version} -Ddest=${SEATUNNEL_HOME}/connectors/seatunnel
+ ${SEATUNNEL_HOME}/mvnw dependency:get -DgroupId=org.apache.seatunnel -DartifactId=${line} -Dversion=${version} -Ddest=${SEATUNNEL_HOME}/connectors
fi
done < ${SEATUNNEL_HOME}/config/plugin_config
\ No newline at end of file
diff --git a/config/seatunnel-env.cmd b/config/seatunnel-env.cmd
new file mode 100644
index 00000000000..79c2d3c117c
--- /dev/null
+++ b/config/seatunnel-env.cmd
@@ -0,0 +1,21 @@
+@echo off
+REM Licensed to the Apache Software Foundation (ASF) under one or more
+REM contributor license agreements. See the NOTICE file distributed with
+REM this work for additional information regarding copyright ownership.
+REM The ASF licenses this file to You under the Apache License, Version 2.0
+REM (the "License"); you may not use this file except in compliance with
+REM the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
+
+REM Home directory of spark distribution.
+if "%SPARK_HOME%" == "" set "SPARK_HOME=C:\Program Files\spark"
+
+REM Home directory of flink distribution.
+if "%FLINK_HOME%" == "" set "FLINK_HOME=C:\Program Files\flink"
\ No newline at end of file
diff --git a/docs/en/concept/speed-limit.md b/docs/en/concept/speed-limit.md
new file mode 100644
index 00000000000..0bb451b38b2
--- /dev/null
+++ b/docs/en/concept/speed-limit.md
@@ -0,0 +1,42 @@
+# Speed Control
+
+## Introduction
+
+The SeaTunnel provides a powerful speed control feature that allows you to manage the rate at which data is synchronized.
+This functionality is essential when you need to ensure efficient and controlled data transfer between systems.
+The speed control is primarily governed by two key parameters: `read_limit.rows_per_second` and `read_limit.bytes_per_second`.
+This document will guide you through the usage of these parameters and how to leverage them effectively.
+
+## Support Those Engines
+
+> SeaTunnel Zeta
+
+## Configuration
+
+To use the speed control feature, you need to configure the `read_limit.rows_per_second` or `read_limit.bytes_per_second` parameters in your job config.
+
+Example env config in your config file:
+
+```hocon
+env {
+ job.mode=STREAMING
+ job.name=SeaTunnel_Job
+ read_limit.bytes_per_second=7000000
+ read_limit.rows_per_second=400
+}
+source {
+ MySQL-CDC {
+ // ignore...
+ }
+}
+transform {
+}
+sink {
+ Console {
+ }
+}
+```
+
+We have placed `read_limit.bytes_per_second` and `read_limit.rows_per_second` in the `env` parameters, completing the speed control configuration.
+You can configure both of these parameters simultaneously or choose to configure only one of them. The value of each `value` represents the maximum rate at which each thread is restricted.
+Therefore, when configuring the respective values, please take into account the parallelism of your tasks.
diff --git a/docs/en/connector-v2/sink/Jdbc.md b/docs/en/connector-v2/sink/Jdbc.md
index 755de8bb9a7..394fadde801 100644
--- a/docs/en/connector-v2/sink/Jdbc.md
+++ b/docs/en/connector-v2/sink/Jdbc.md
@@ -47,6 +47,7 @@ support `Xa transactions`. You can set `is_exactly_once=true` to enable it.
| max_commit_attempts | Int | No | 3 |
| transaction_timeout_sec | Int | No | -1 |
| auto_commit | Boolean | No | true |
+| field_ide | String | No | - |
| common-options | | no | - |
### driver [string]
@@ -136,6 +137,12 @@ exactly-once semantics
Automatic transaction commit is enabled by default
+### field_ide [String]
+
+The field "field_ide" is used to identify whether the field needs to be converted to uppercase or lowercase when
+synchronizing from the source to the sink. "ORIGINAL" indicates no conversion is needed, "UPPERCASE" indicates
+conversion to uppercase, and "LOWERCASE" indicates conversion to lowercase.
+
### common options
Sink plugin common parameters, please refer to [Sink Common Options](common-options.md) for details
@@ -169,6 +176,7 @@ there are some reference value for params above.
| Redshift | com.amazon.redshift.jdbc42.Driver | jdbc:redshift://localhost:5439/testdb | com.amazon.redshift.xa.RedshiftXADataSource | https://mvnrepository.com/artifact/com.amazon.redshift/redshift-jdbc42 |
| Snowflake | net.snowflake.client.jdbc.SnowflakeDriver | jdbc:snowflake://.snowflakecomputing.com | / | https://mvnrepository.com/artifact/net.snowflake/snowflake-jdbc |
| Vertica | com.vertica.jdbc.Driver | jdbc:vertica://localhost:5433 | / | https://repo1.maven.org/maven2/com/vertica/jdbc/vertica-jdbc/12.0.3-0/vertica-jdbc-12.0.3-0.jar |
+| Kingbase | com.kingbase8.Driver | jdbc:kingbase8://localhost:54321/db_test | / | https://repo1.maven.org/maven2/cn/com/kingbase/kingbase8/8.6.0/kingbase8-8.6.0.jar |
| OceanBase | com.oceanbase.jdbc.Driver | jdbc:oceanbase://localhost:2881 | / | https://repo1.maven.org/maven2/com/oceanbase/oceanbase-client/2.4.3/oceanbase-client-2.4.3.jar |
## Example
diff --git a/docs/en/connector-v2/sink/Kingbase.md b/docs/en/connector-v2/sink/Kingbase.md
new file mode 100644
index 00000000000..b92b12fc420
--- /dev/null
+++ b/docs/en/connector-v2/sink/Kingbase.md
@@ -0,0 +1,168 @@
+# Kingbase
+
+> JDBC Kingbase Sink Connector
+
+## Support Connector Version
+
+- 8.6
+
+## Support Those Engines
+
+> Spark
+> Flink
+> SeaTunnel Zeta
+
+## Key Features
+
+- [ ] [exactly-once](../../concept/connector-v2-features.md)
+- [ ] [cdc](../../concept/connector-v2-features.md)
+
+## Description
+
+> Use `Xa transactions` to ensure `exactly-once`. So only support `exactly-once` for the database which is
+> support `Xa transactions`. You can set `is_exactly_once=true` to enable it.Kingbase currently does not support
+
+## Supported DataSource Info
+
+| Datasource | Supported versions | Driver | Url | Maven |
+|------------|--------------------|----------------------|------------------------------------------|------------------------------------------------------------------------------------------------|
+| Kingbase | 8.6 | com.kingbase8.Driver | jdbc:kingbase8://localhost:54321/db_test | [Download](https://repo1.maven.org/maven2/cn/com/kingbase/kingbase8/8.6.0/kingbase8-8.6.0.jar) |
+
+## Database Dependency
+
+> Please download the support list corresponding to 'Maven' and copy it to the '$SEATNUNNEL_HOME/plugins/jdbc/lib/'
+> working directory
+> For example: cp kingbase8-8.6.0.jar $SEATNUNNEL_HOME/plugins/jdbc/lib/
+
+## Data Type Mapping
+
+| Kingbase Data type | SeaTunnel Data type |
+|----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------|
+| BOOL | BOOLEAN |
+| INT2 | SHORT |
+| SMALLSERIAL
SERIAL
INT4 | INT |
+| INT8
BIGSERIAL | BIGINT |
+| FLOAT4 | FLOAT |
+| FLOAT8 | DOUBLE |
+| NUMERIC | DECIMAL((Get the designated column's specified column size),
(Gets the designated column's number of digits to right of the decimal point.))) |
+| BPCHAR
CHARACTER
VARCHAR
TEXT | STRING |
+| TIMESTAMP | LOCALDATETIME |
+| TIME | LOCALTIME |
+| DATE | LOCALDATE |
+| Other data type | Not supported yet |
+
+## Sink Options
+
+| Name | Type | Required | Default | Description |
+|-------------------------------------------|---------|----------|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| url | String | Yes | - | The URL of the JDBC connection. Refer to a case: jdbc:db2://127.0.0.1:50000/dbname |
+| driver | String | Yes | - | The jdbc class name used to connect to the remote data source,
if you use DB2 the value is `com.ibm.db2.jdbc.app.DB2Driver`. |
+| user | String | No | - | Connection instance user name |
+| password | String | No | - | Connection instance password |
+| query | String | No | - | Use this sql write upstream input datas to database. e.g `INSERT ...`,`query` have the higher priority |
+| database | String | No | - | Use this `database` and `table-name` auto-generate sql and receive upstream input datas write to database.
This option is mutually exclusive with `query` and has a higher priority. |
+| table | String | No | - | Use database and this table-name auto-generate sql and receive upstream input datas write to database.
This option is mutually exclusive with `query` and has a higher priority. |
+| primary_keys | Array | No | - | This option is used to support operations such as `insert`, `delete`, and `update` when automatically generate sql. |
+| support_upsert_by_query_primary_key_exist | Boolean | No | false | Choose to use INSERT sql, UPDATE sql to process update events(INSERT, UPDATE_AFTER) based on query primary key exists. This configuration is only used when database unsupport upsert syntax. **Note**: that this method has low performance |
+| connection_check_timeout_sec | Int | No | 30 | The time in seconds to wait for the database operation used to validate the connection to complete. |
+| max_retries | Int | No | 0 | The number of retries to submit failed (executeBatch) |
+| batch_size | Int | No | 1000 | For batch writing, when the number of buffered records reaches the number of `batch_size` or the time reaches `checkpoint.interval`
, the data will be flushed into the database |
+| is_exactly_once | Boolean | No | false | Whether to enable exactly-once semantics, which will use Xa transactions. If on, you need to
set `xa_data_source_class_name`. Kingbase currently does not support |
+| generate_sink_sql | Boolean | No | false | Generate sql statements based on the database table you want to write to |
+| xa_data_source_class_name | String | No | - | The xa data source class name of the database Driver,Kingbase currently does not support |
+| max_commit_attempts | Int | No | 3 | The number of retries for transaction commit failures |
+| transaction_timeout_sec | Int | No | -1 | The timeout after the transaction is opened, the default is -1 (never timeout). Note that setting the timeout may affect
exactly-once semantics |
+| auto_commit | Boolean | No | true | Automatic transaction commit is enabled by default |
+| common-options | | no | - | Sink plugin common parameters, please refer to [Sink Common Options](common-options.md) for details |
+
+### Tips
+
+> If partition_column is not set, it will run in single concurrency, and if partition_column is set, it will be executed
+> in parallel according to the concurrency of tasks.
+
+## Task Example
+
+### Simple:
+
+> This example defines a SeaTunnel synchronization task that automatically generates data through FakeSource and sends
+> it to JDBC Sink. FakeSource generates a total of 16 rows of data (row.num=16), with each row having 12 fields. The final target table is test_table will also be 16 rows of data in the table.
+> Before
+> run this job, you need create database test and table test_table in your Kingbase. And if you have not yet installed and
+> deployed SeaTunnel, you need to follow the instructions in [Install SeaTunnel](../../start-v2/locally/deployment.md)
+> to
+> install and deploy SeaTunnel. And then follow the instructions
+> in [Quick Start With SeaTunnel Engine](../../start-v2/locally/quick-start-seatunnel-engine.md) to run this job.
+
+```
+# Defining the runtime environment
+env {
+ # You can set flink configuration here
+ execution.parallelism = 1
+ job.mode = "BATCH"
+}
+
+source {
+ # This is a example source plugin **only for test and demonstrate the feature source plugin**
+ FakeSource {
+ parallelism = 1
+ result_table_name = "fake"
+ row.num = 16
+ schema = {
+ fields {
+ c_string = string
+ c_boolean = boolean
+ c_tinyint = tinyint
+ c_smallint = smallint
+ c_int = int
+ c_bigint = bigint
+ c_float = float
+ c_double = double
+ c_decimal = "decimal(30, 8)"
+ c_date = date
+ c_time = time
+ c_timestamp = timestamp
+ }
+ }
+ }
+ # If you would like to get more information about how to configure seatunnel and see full list of source plugins,
+ # please go to https://seatunnel.apache.org/docs/category/source-v2
+}
+
+transform {
+ # If you would like to get more information about how to configure seatunnel and see full list of transform plugins,
+ # please go to https://seatunnel.apache.org/docs/category/transform-v2
+}
+
+sink {
+ jdbc {
+ url = "jdbc:kingbase8://127.0.0.1:54321/dbname"
+ driver = "com.kingbase8.Driver"
+ user = "root"
+ password = "123456"
+ query = "insert into test_table(c_string,c_boolean,c_tinyint,c_smallint,c_int,c_bigint,c_float,c_double,c_decimal,c_date,c_time,c_timestamp) values(?,?,?,?,?,?,?,?,?,?,?,?)"
+ }
+ # If you would like to get more information about how to configure seatunnel and see full list of sink plugins,
+ # please go to https://seatunnel.apache.org/docs/category/sink-v2
+}
+```
+
+### Generate Sink SQL
+
+> This example not need to write complex sql statements, you can configure the database name table name to automatically
+> generate add statements for you
+
+```
+sink {
+ jdbc {
+ url = "jdbc:kingbase8://127.0.0.1:54321/dbname"
+ driver = "com.kingbase8.Driver"
+ user = "root"
+ password = "123456"
+ # Automatically generate sql statements based on database table names
+ generate_sink_sql = true
+ database = test
+ table = test_table
+ }
+}
+```
+
diff --git a/docs/en/connector-v2/sink/Mysql.md b/docs/en/connector-v2/sink/Mysql.md
index 6c01c35ee8c..860f071df0e 100644
--- a/docs/en/connector-v2/sink/Mysql.md
+++ b/docs/en/connector-v2/sink/Mysql.md
@@ -78,6 +78,7 @@ semantics (using XA transaction guarantee).
| max_commit_attempts | Int | No | 3 | The number of retries for transaction commit failures |
| transaction_timeout_sec | Int | No | -1 | The timeout after the transaction is opened, the default is -1 (never timeout). Note that setting the timeout may affect
exactly-once semantics |
| auto_commit | Boolean | No | true | Automatic transaction commit is enabled by default |
+| field_ide | String | No | - | Identify whether the field needs to be converted when synchronizing from the source to the sink. `ORIGINAL` indicates no conversion is needed;`UPPERCASE` indicates conversion to uppercase;`LOWERCASE` indicates conversion to lowercase. |
| common-options | | no | - | Sink plugin common parameters, please refer to [Sink Common Options](common-options.md) for details |
### Tips
@@ -122,7 +123,7 @@ transform {
sink {
jdbc {
- url = "jdbc:mysql://localhost:3306/test"
+ url = "jdbc:mysql://localhost:3306/test?useUnicode=true&characterEncoding=UTF-8&rewriteBatchedStatements=true"
driver = "com.mysql.cj.jdbc.Driver"
user = "root"
password = "123456"
@@ -140,7 +141,7 @@ sink {
```
sink {
jdbc {
- url = "jdbc:mysql://localhost:3306/test"
+ url = "jdbc:mysql://localhost:3306/test?useUnicode=true&characterEncoding=UTF-8&rewriteBatchedStatements=true"
driver = "com.mysql.cj.jdbc.Driver"
user = "root"
password = "123456"
@@ -159,7 +160,7 @@ sink {
```
sink {
jdbc {
- url = "jdbc:mysql://localhost:3306/test"
+ url = "jdbc:mysql://localhost:3306/test?useUnicode=true&characterEncoding=UTF-8&rewriteBatchedStatements=true"
driver = "com.mysql.cj.jdbc.Driver"
max_retries = 0
@@ -181,7 +182,7 @@ sink {
```
sink {
jdbc {
- url = "jdbc:mysql://localhost:3306/test"
+ url = "jdbc:mysql://localhost:3306/test?useUnicode=true&characterEncoding=UTF-8&rewriteBatchedStatements=true"
driver = "com.mysql.cj.jdbc.Driver"
user = "root"
password = "123456"
@@ -191,6 +192,7 @@ sink {
database = test
table = sink_table
primary_keys = ["id","name"]
+ field_ide = UPPERCASE
}
}
```
diff --git a/docs/en/connector-v2/sink/PostgreSql.md b/docs/en/connector-v2/sink/PostgreSql.md
index 67e2ed64d95..bcc5616f5ea 100644
--- a/docs/en/connector-v2/sink/PostgreSql.md
+++ b/docs/en/connector-v2/sink/PostgreSql.md
@@ -81,6 +81,7 @@ semantics (using XA transaction guarantee).
| max_commit_attempts | Int | No | 3 | The number of retries for transaction commit failures |
| transaction_timeout_sec | Int | No | -1 | The timeout after the transaction is opened, the default is -1 (never timeout). Note that setting the timeout may affect
exactly-once semantics |
| auto_commit | Boolean | No | true | Automatic transaction commit is enabled by default |
+| field_ide | String | No | - | Identify whether the field needs to be converted when synchronizing from the source to the sink. `ORIGINAL` indicates no conversion is needed;`UPPERCASE` indicates conversion to uppercase;`LOWERCASE` indicates conversion to lowercase. |
| common-options | | no | - | Sink plugin common parameters, please refer to [Sink Common Options](common-options.md) for details |
### Tips
@@ -197,6 +198,7 @@ sink {
database = test
table = sink_table
primary_keys = ["id","name"]
+ field_ide = UPPERCASE
}
}
```
diff --git a/docs/en/connector-v2/source/Clickhouse.md b/docs/en/connector-v2/source/Clickhouse.md
index 7596bf72a8f..d70a8f0e33f 100644
--- a/docs/en/connector-v2/source/Clickhouse.md
+++ b/docs/en/connector-v2/source/Clickhouse.md
@@ -66,7 +66,7 @@ The following example demonstrates how to create a data synchronization job that
```bash
# Set the basic configuration of the task to be performed
env {
- execution.parallelism = 1
+ execution.parallelism = 10
job.mode = "BATCH"
}
diff --git a/docs/en/connector-v2/source/Github.md b/docs/en/connector-v2/source/Github.md
index 5cc6beea76b..900a207e697 100644
--- a/docs/en/connector-v2/source/Github.md
+++ b/docs/en/connector-v2/source/Github.md
@@ -28,7 +28,7 @@ Used to read data from Github.
| body | String | No | - |
| json_field | Config | No | - |
| content_json | String | No | - |
-| poll_interval_ms | int | No | - |
+| poll_interval_millis | int | No | - |
| retry | int | No | - |
| retry_backoff_multiplier_ms | int | No | 100 |
| retry_backoff_max_ms | int | No | 10000 |
@@ -55,7 +55,7 @@ http params
http body
-### poll_interval_ms [int]
+### poll_interval_millis [int]
request http api interval(millis) in stream mode
diff --git a/docs/en/connector-v2/source/Gitlab.md b/docs/en/connector-v2/source/Gitlab.md
index b2c17c9f246..ff3b6bc6423 100644
--- a/docs/en/connector-v2/source/Gitlab.md
+++ b/docs/en/connector-v2/source/Gitlab.md
@@ -28,7 +28,7 @@ Used to read data from Gitlab.
| body | String | No | - |
| json_field | Config | No | - |
| content_json | String | No | - |
-| poll_interval_ms | int | No | - |
+| poll_interval_millis | int | No | - |
| retry | int | No | - |
| retry_backoff_multiplier_ms | int | No | 100 |
| retry_backoff_max_ms | int | No | 10000 |
@@ -55,7 +55,7 @@ http params
http body
-### poll_interval_ms [int]
+### poll_interval_millis [int]
request http api interval(millis) in stream mode
diff --git a/docs/en/connector-v2/source/Http.md b/docs/en/connector-v2/source/Http.md
index aa4067fe438..f3e6a221bb0 100644
--- a/docs/en/connector-v2/source/Http.md
+++ b/docs/en/connector-v2/source/Http.md
@@ -54,7 +54,7 @@ They can be downloaded via install-plugin.sh or from the Maven central repositor
| headers | Map | No | - | Http headers. |
| params | Map | No | - | Http params,the program will automatically add http header application/x-www-form-urlencoded. |
| body | String | No | - | Http body,the program will automatically add http header application/json,body is jsonbody. |
-| poll_interval_ms | Int | No | - | Request http api interval(millis) in stream mode. |
+| poll_interval_millis | Int | No | - | Request http api interval(millis) in stream mode. |
| retry | Int | No | - | The max retry times if request http return to `IOException`. |
| retry_backoff_multiplier_ms | Int | No | 100 | The retry-backoff times(millis) multiplier if request http failed. |
| retry_backoff_max_ms | Int | No | 10000 | The maximum retry-backoff times(millis) if request http failed |
diff --git a/docs/en/connector-v2/source/Jdbc.md b/docs/en/connector-v2/source/Jdbc.md
index d82df87a02e..b86a7b33854 100644
--- a/docs/en/connector-v2/source/Jdbc.md
+++ b/docs/en/connector-v2/source/Jdbc.md
@@ -125,6 +125,7 @@ there are some reference value for params above.
| Snowflake | net.snowflake.client.jdbc.SnowflakeDriver | jdbc:snowflake://.snowflakecomputing.com | https://mvnrepository.com/artifact/net.snowflake/snowflake-jdbc |
| Redshift | com.amazon.redshift.jdbc42.Driver | jdbc:redshift://localhost:5439/testdb?defaultRowFetchSize=1000 | https://mvnrepository.com/artifact/com.amazon.redshift/redshift-jdbc42 |
| Vertica | com.vertica.jdbc.Driver | jdbc:vertica://localhost:5433 | https://repo1.maven.org/maven2/com/vertica/jdbc/vertica-jdbc/12.0.3-0/vertica-jdbc-12.0.3-0.jar |
+| Kingbase | com.kingbase8.Driver | jdbc:kingbase8://localhost:54321/db_test | https://repo1.maven.org/maven2/cn/com/kingbase/kingbase8/8.6.0/kingbase8-8.6.0.jar |
| OceanBase | com.oceanbase.jdbc.Driver | jdbc:oceanbase://localhost:2881 | https://repo1.maven.org/maven2/com/oceanbase/oceanbase-client/2.4.3/oceanbase-client-2.4.3.jar |
## Example
@@ -145,15 +146,25 @@ Jdbc {
parallel:
```
-Jdbc {
- url = "jdbc:mysql://localhost/test?serverTimezone=GMT%2b8"
- driver = "com.mysql.cj.jdbc.Driver"
- connection_check_timeout_sec = 100
- user = "root"
- password = "123456"
- query = "select * from type_bin"
- partition_column = "id"
- partition_num = 10
+env {
+ execution.parallelism = 10
+ job.mode = "BATCH"
+}
+source {
+ Jdbc {
+ url = "jdbc:mysql://localhost/test?serverTimezone=GMT%2b8"
+ driver = "com.mysql.cj.jdbc.Driver"
+ connection_check_timeout_sec = 100
+ user = "root"
+ password = "123456"
+ query = "select * from type_bin"
+ partition_column = "id"
+ partition_num = 10
+ }
+}
+
+sink {
+ Console {}
}
```
diff --git a/docs/en/connector-v2/source/Jira.md b/docs/en/connector-v2/source/Jira.md
index 6452b66c931..dcfe6cc11d3 100644
--- a/docs/en/connector-v2/source/Jira.md
+++ b/docs/en/connector-v2/source/Jira.md
@@ -29,7 +29,7 @@ Used to read data from Jira.
| body | String | No | - |
| json_field | Config | No | - |
| content_json | String | No | - |
-| poll_interval_ms | int | No | - |
+| poll_interval_millis | int | No | - |
| retry | int | No | - |
| retry_backoff_multiplier_ms | int | No | 100 |
| retry_backoff_max_ms | int | No | 10000 |
@@ -62,7 +62,7 @@ http params
http body
-### poll_interval_ms [int]
+### poll_interval_millis [int]
request http api interval(millis) in stream mode
diff --git a/docs/en/connector-v2/source/Kingbase.md b/docs/en/connector-v2/source/Kingbase.md
new file mode 100644
index 00000000000..62e280675dd
--- /dev/null
+++ b/docs/en/connector-v2/source/Kingbase.md
@@ -0,0 +1,148 @@
+# Kingbase
+
+> JDBC Kingbase Source Connector
+
+## Support Connector Version
+
+- 8.6
+
+## Support Those Engines
+
+> Spark
+> Flink
+> SeaTunnel Zeta
+
+## Key Features
+
+- [x] [batch](../../concept/connector-v2-features.md)
+- [ ] [stream](../../concept/connector-v2-features.md)
+- [ ] [exactly-once](../../concept/connector-v2-features.md)
+- [x] [column projection](../../concept/connector-v2-features.md)
+- [x] [parallelism](../../concept/connector-v2-features.md)
+- [x] [support user-defined split](../../concept/connector-v2-features.md)
+
+## Description
+
+Read external data source data through JDBC.
+
+## Supported DataSource Info
+
+| Datasource | Supported versions | Driver | Url | Maven |
+|------------|--------------------|----------------------|------------------------------------------|------------------------------------------------------------------------------------------------|
+| Kingbase | 8.6 | com.kingbase8.Driver | jdbc:kingbase8://localhost:54321/db_test | [Download](https://repo1.maven.org/maven2/cn/com/kingbase/kingbase8/8.6.0/kingbase8-8.6.0.jar) |
+
+## Database Dependency
+
+> Please download the support list corresponding to 'Maven' and copy it to the '$SEATNUNNEL_HOME/plugins/jdbc/lib/' working directory
+> For example: cp kingbase8-8.6.0.jar $SEATNUNNEL_HOME/plugins/jdbc/lib/
+
+## Data Type Mapping
+
+| Kingbase Data type | SeaTunnel Data type |
+|-------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------|
+| BOOL | BOOLEAN |
+| INT2 | SHORT |
+| SMALLSERIAL
SERIAL
INT4 | INT |
+| INT8
BIGSERIAL | BIGINT |
+| FLOAT4 | FLOAT |
+| FLOAT8 | DOUBLE |
+| NUMERIC | DECIMAL((Get the designated column's specified column size),
(Gets the designated column's number of digits to right of the decimal point.))) |
+| BPCHAR
CHARACTER
VARCHAR
TEXT | STRING |
+| TIMESTAMP | LOCALDATETIME |
+| TIME | LOCALTIME |
+| DATE | LOCALDATE |
+| Other data type | Not supported yet |
+
+## Source Options
+
+| Name | Type | Required | Default | Description |
+|------------------------------|------------|----------|-----------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| url | String | Yes | - | The URL of the JDBC connection. Refer to a case: jdbc:kingbase8://localhost:54321/test |
+| driver | String | Yes | - | The jdbc class name used to connect to the remote data source, should be `com.kingbase8.Driver`. |
+| user | String | No | - | Connection instance user name |
+| password | String | No | - | Connection instance password |
+| query | String | Yes | - | Query statement |
+| connection_check_timeout_sec | Int | No | 30 | The time in seconds to wait for the database operation used to validate the connection to complete |
+| partition_column | String | No | - | The column name for parallelism's partition, only support numeric type column and string type column. |
+| partition_lower_bound | BigDecimal | No | - | The partition_column min value for scan, if not set SeaTunnel will query database get min value. |
+| partition_upper_bound | BigDecimal | No | - | The partition_column max value for scan, if not set SeaTunnel will query database get max value. |
+| partition_num | Int | No | job parallelism | The number of partition count, only support positive integer. Default value is job parallelism. |
+| fetch_size | Int | No | 0 | For queries that return a large number of objects, you can configure
the row fetch size used in the query to improve performance by
reducing the number database hits required to satisfy the selection criteria.
Zero means use jdbc default value. |
+| common-options | | No | - | Source plugin common parameters, please refer to [Source Common Options](common-options.md) for details |
+
+### Tips
+
+> If partition_column is not set, it will run in single concurrency, and if partition_column is set, it will be executed in parallel according to the concurrency of tasks.
+
+## Task Example
+
+### Simple:
+
+```
+env {
+ execution.parallelism = 2
+ job.mode = "BATCH"
+}
+
+source {
+ Jdbc {
+ driver = "com.kingbase8.Driver"
+ url = "jdbc:kingbase8://localhost:54321/db_test"
+ user = "root"
+ password = ""
+ query = "select * from source"
+ }
+}
+
+transform {
+ # If you would like to get more information about how to configure seatunnel and see full list of transform plugins,
+ # please go to https://seatunnel.apache.org/docs/transform/sql
+}
+
+sink {
+ Console {}
+}
+```
+
+### Parallel:
+
+> Read your query table in parallel with the shard field you configured and the shard data. You can do this if you want to read the whole table
+
+```
+source {
+ Jdbc {
+ driver = "com.kingbase8.Driver"
+ url = "jdbc:kingbase8://localhost:54321/db_test"
+ user = "root"
+ password = ""
+ query = "select * from source"
+ # Parallel sharding reads fields
+ partition_column = "id"
+ # Number of fragments
+ partition_num = 10
+ }
+}
+```
+
+### Parallel Boundary:
+
+> It is more efficient to read your data source according to the upper and lower boundaries you configured
+
+```
+source {
+ Jdbc {
+ driver = "com.kingbase8.Driver"
+ url = "jdbc:kingbase8://localhost:54321/db_test"
+ user = "root"
+ password = ""
+ query = "select * from source"
+ partition_column = "id"
+ partition_num = 10
+ # Read start boundary
+ partition_lower_bound = 1
+ # Read end boundary
+ partition_upper_bound = 500
+ }
+}
+```
+
diff --git a/docs/en/connector-v2/source/Klaviyo.md b/docs/en/connector-v2/source/Klaviyo.md
index 20ed8ded501..e80a2434fdf 100644
--- a/docs/en/connector-v2/source/Klaviyo.md
+++ b/docs/en/connector-v2/source/Klaviyo.md
@@ -30,7 +30,7 @@ Used to read data from Klaviyo.
| body | String | No | - |
| json_field | Config | No | - |
| content_json | String | No | - |
-| poll_interval_ms | int | No | - |
+| poll_interval_millis | int | No | - |
| retry | int | No | - |
| retry_backoff_multiplier_ms | int | No | 100 |
| retry_backoff_max_ms | int | No | 10000 |
@@ -63,7 +63,7 @@ http params
http body
-### poll_interval_ms [int]
+### poll_interval_millis [int]
request http api interval(millis) in stream mode
diff --git a/docs/en/connector-v2/source/Lemlist.md b/docs/en/connector-v2/source/Lemlist.md
index 5e7c4138c58..76cac3b9bf8 100644
--- a/docs/en/connector-v2/source/Lemlist.md
+++ b/docs/en/connector-v2/source/Lemlist.md
@@ -28,7 +28,7 @@ Used to read data from Lemlist.
| body | String | No | - |
| json_field | Config | No | - |
| content_json | String | No | - |
-| poll_interval_ms | int | No | - |
+| poll_interval_millis | int | No | - |
| retry | int | No | - |
| retry_backoff_multiplier_ms | int | No | 100 |
| retry_backoff_max_ms | int | No | 10000 |
@@ -57,7 +57,7 @@ http params
http body
-### poll_interval_ms [int]
+### poll_interval_millis [int]
request http api interval(millis) in stream mode
diff --git a/docs/en/connector-v2/source/MongoDB.md b/docs/en/connector-v2/source/MongoDB.md
index 137fb205b8c..d63d303fa24 100644
--- a/docs/en/connector-v2/source/MongoDB.md
+++ b/docs/en/connector-v2/source/MongoDB.md
@@ -283,6 +283,10 @@ By utilizing `flat.sync-string`, only one field attribute value can be set, and
This operation will perform a string mapping on a single MongoDB data entry.
```bash
+env {
+ execution.parallelism = 10
+ job.mode = "BATCH"
+}
source {
MongoDB {
uri = "mongodb://user:password@127.0.0.1:27017"
@@ -296,6 +300,9 @@ source {
}
}
}
+sink {
+ Console {}
+}
```
Use the data samples synchronized with modified parameters, such as the following:
diff --git a/docs/en/connector-v2/source/MyHours.md b/docs/en/connector-v2/source/MyHours.md
index f90d42ab1cb..91321990ab2 100644
--- a/docs/en/connector-v2/source/MyHours.md
+++ b/docs/en/connector-v2/source/MyHours.md
@@ -55,7 +55,7 @@ They can be downloaded via install-plugin.sh or from the Maven central repositor
| headers | Map | No | - | Http headers. |
| params | Map | No | - | Http params. |
| body | String | No | - | Http body. |
-| poll_interval_ms | Int | No | - | Request http api interval(millis) in stream mode. |
+| poll_interval_millis | Int | No | - | Request http api interval(millis) in stream mode. |
| retry | Int | No | - | The max retry times if request http return to `IOException`. |
| retry_backoff_multiplier_ms | Int | No | 100 | The retry-backoff times(millis) multiplier if request http failed. |
| retry_backoff_max_ms | Int | No | 10000 | The maximum retry-backoff times(millis) if request http failed |
diff --git a/docs/en/connector-v2/source/Mysql.md b/docs/en/connector-v2/source/Mysql.md
index 001ef1463da..bdac5c0aec6 100644
--- a/docs/en/connector-v2/source/Mysql.md
+++ b/docs/en/connector-v2/source/Mysql.md
@@ -94,7 +94,7 @@ env {
}
source{
Jdbc {
- url = "jdbc:mysql://localhost:3306/test?serverTimezone=GMT%2b8"
+ url = "jdbc:mysql://localhost:3306/test?serverTimezone=GMT%2b8&useUnicode=true&characterEncoding=UTF-8&rewriteBatchedStatements=true"
driver = "com.mysql.cj.jdbc.Driver"
connection_check_timeout_sec = 100
user = "root"
@@ -118,9 +118,13 @@ sink {
> Read your query table in parallel with the shard field you configured and the shard data You can do this if you want to read the whole table
```
+env {
+ execution.parallelism = 10
+ job.mode = "BATCH"
+}
source {
Jdbc {
- url = "jdbc:mysql://localhost:3306/test?serverTimezone=GMT%2b8"
+ url = "jdbc:mysql://localhost:3306/test?serverTimezone=GMT%2b8&useUnicode=true&characterEncoding=UTF-8&rewriteBatchedStatements=true"
driver = "com.mysql.cj.jdbc.Driver"
connection_check_timeout_sec = 100
user = "root"
@@ -133,6 +137,9 @@ source {
partition_num = 10
}
}
+sink {
+ Console {}
+}
```
### Parallel Boundary:
@@ -142,7 +149,7 @@ source {
```
source {
Jdbc {
- url = "jdbc:mysql://localhost:3306/test?serverTimezone=GMT%2b8"
+ url = "jdbc:mysql://localhost:3306/test?serverTimezone=GMT%2b8&useUnicode=true&characterEncoding=UTF-8&rewriteBatchedStatements=true"
driver = "com.mysql.cj.jdbc.Driver"
connection_check_timeout_sec = 100
user = "root"
diff --git a/docs/en/connector-v2/source/Notion.md b/docs/en/connector-v2/source/Notion.md
index 186294c6874..d138c21c1d6 100644
--- a/docs/en/connector-v2/source/Notion.md
+++ b/docs/en/connector-v2/source/Notion.md
@@ -29,7 +29,7 @@ Used to read data from Notion.
| body | String | No | - |
| json_field | Config | No | - |
| content_json | String | No | - |
-| poll_interval_ms | int | No | - |
+| poll_interval_millis | int | No | - |
| retry | int | No | - |
| retry_backoff_multiplier_ms | int | No | 100 |
| retry_backoff_max_ms | int | No | 10000 |
@@ -62,7 +62,7 @@ http params
http body
-### poll_interval_ms [int]
+### poll_interval_millis [int]
request http api interval(millis) in stream mode
diff --git a/docs/en/connector-v2/source/OceanBase.md b/docs/en/connector-v2/source/OceanBase.md
index bd035793eee..434e25284dd 100644
--- a/docs/en/connector-v2/source/OceanBase.md
+++ b/docs/en/connector-v2/source/OceanBase.md
@@ -127,6 +127,10 @@ sink {
> Read your query table in parallel with the shard field you configured and the shard data. You can do this if you want to read the whole table
```
+env {
+ execution.parallelism = 10
+ job.mode = "BATCH"
+}
source {
Jdbc {
driver = "com.oceanbase.jdbc.Driver"
@@ -141,6 +145,9 @@ source {
partition_num = 10
}
}
+sink {
+ Console {}
+}
```
### Parallel Boundary:
diff --git a/docs/en/connector-v2/source/OneSignal.md b/docs/en/connector-v2/source/OneSignal.md
index 52636cf5bda..9fb6d65379b 100644
--- a/docs/en/connector-v2/source/OneSignal.md
+++ b/docs/en/connector-v2/source/OneSignal.md
@@ -29,7 +29,7 @@ Used to read data from OneSignal.
| body | String | No | - |
| json_field | Config | No | - |
| content_json | String | No | - |
-| poll_interval_ms | int | No | - |
+| poll_interval_millis | int | No | - |
| retry | int | No | - |
| retry_backoff_multiplier_ms | int | No | 100 |
| retry_backoff_max_ms | int | No | 10000 |
@@ -58,7 +58,7 @@ http params
http body
-### poll_interval_ms [int]
+### poll_interval_millis [int]
request http api interval(millis) in stream mode
diff --git a/docs/en/connector-v2/source/Oracle.md b/docs/en/connector-v2/source/Oracle.md
index 385d55ca9e5..f191cda9d99 100644
--- a/docs/en/connector-v2/source/Oracle.md
+++ b/docs/en/connector-v2/source/Oracle.md
@@ -111,6 +111,10 @@ sink {
> Read your query table in parallel with the shard field you configured and the shard data You can do this if you want to read the whole table
```
+env {
+ execution.parallelism = 10
+ job.mode = "BATCH"
+}
source {
Jdbc {
url = "jdbc:oracle:thin:@datasource01:1523:xe"
@@ -126,6 +130,9 @@ source {
partition_num = 10
}
}
+sink {
+ Console {}
+}
```
### Parallel Boundary:
diff --git a/docs/en/connector-v2/source/Persistiq.md b/docs/en/connector-v2/source/Persistiq.md
index e102b8b3edd..c308efbb389 100644
--- a/docs/en/connector-v2/source/Persistiq.md
+++ b/docs/en/connector-v2/source/Persistiq.md
@@ -29,7 +29,7 @@ Used to read data from Persistiq.
| body | String | No | - |
| json_field | Config | No | - |
| content_json | String | No | - |
-| poll_interval_ms | int | No | - |
+| poll_interval_millis | int | No | - |
| retry | int | No | - |
| retry_backoff_multiplier_ms | int | No | 100 |
| retry_backoff_max_ms | int | No | 10000 |
@@ -56,7 +56,7 @@ http params
http body
-### poll_interval_ms [int]
+### poll_interval_millis [int]
request http api interval(millis) in stream mode
diff --git a/docs/en/connector-v2/source/PostgreSQL.md b/docs/en/connector-v2/source/PostgreSQL.md
index 50839780726..63ddbc25ecf 100644
--- a/docs/en/connector-v2/source/PostgreSQL.md
+++ b/docs/en/connector-v2/source/PostgreSQL.md
@@ -120,6 +120,10 @@ sink {
> Read your query table in parallel with the shard field you configured and the shard data You can do this if you want to read the whole table
```
+env {
+ execution.parallelism = 10
+ job.mode = "BATCH"
+}
source{
jdbc{
url = "jdbc:postgresql://localhost:5432/test"
@@ -131,6 +135,9 @@ source{
partition_num = 5
}
}
+sink {
+ Console {}
+}
```
### Parallel Boundary:
diff --git a/docs/sidebars.js b/docs/sidebars.js
index a8f2527413b..764cb34f57a 100644
--- a/docs/sidebars.js
+++ b/docs/sidebars.js
@@ -89,7 +89,8 @@ const sidebars = {
"concept/config",
"concept/connector-v2-features",
'concept/schema-feature',
- 'concept/JobEnvConfig'
+ 'concept/JobEnvConfig',
+ 'concept/speed-limit'
]
},
"Connector-v2-release-state",
diff --git a/release-note.md b/release-note.md
index ee6661caa88..fee605dafe9 100644
--- a/release-note.md
+++ b/release-note.md
@@ -77,6 +77,7 @@
- [zeta] Fix the deadlock issue with JDBC driver loading (#4878)
- [zeta] dynamically replace the value of the variable at runtime (#4950)
- [Zeta] Add from_unixtime function (#5462)
+- [zeta] Fix CDC task restore throw NPE (#5507)
### E2E
@@ -160,6 +161,7 @@
- [Connector-V2] [Paimon] Introduce paimon connector (#4178)
- [Connector V2] [Cassandra] Expose configurable options in Cassandra (#3681)
- [Connector V2] [Jdbc] Supports GEOMETRY data type for PostgreSQL (#4673)
+- [Connector V2] [Jdbc] Supports Kingbase database (#4803)
- [Transform-V2] Add UDF SPI and an example implement for SQL Transform plugin (#4392)
- [Transform-V2] Support copy field list (#4404)
- [Transform-V2] Add support CatalogTable for FieldMapperTransform (#4423)
diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/common/metrics/MetricNames.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/common/metrics/MetricNames.java
index a3511d92b42..b1fc60e0f16 100644
--- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/common/metrics/MetricNames.java
+++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/common/metrics/MetricNames.java
@@ -26,8 +26,11 @@ private MetricNames() {}
public static final String RECEIVED_BATCHES = "receivedBatches";
public static final String SOURCE_RECEIVED_COUNT = "SourceReceivedCount";
-
+ public static final String SOURCE_RECEIVED_BYTES = "SourceReceivedBytes";
public static final String SOURCE_RECEIVED_QPS = "SourceReceivedQPS";
+ public static final String SOURCE_RECEIVED_BYTES_PER_SECONDS = "SourceReceivedBytesPerSeconds";
public static final String SINK_WRITE_COUNT = "SinkWriteCount";
+ public static final String SINK_WRITE_BYTES = "SinkWriteBytes";
public static final String SINK_WRITE_QPS = "SinkWriteQPS";
+ public static final String SINK_WRITE_BYTES_PER_SECONDS = "SinkWriteBytesPerSeconds";
}
diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/env/EnvCommonOptions.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/env/EnvCommonOptions.java
index d076cd5367b..0c010bfb846 100644
--- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/env/EnvCommonOptions.java
+++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/env/EnvCommonOptions.java
@@ -51,6 +51,19 @@ public interface EnvCommonOptions {
.withDescription(
"The interval (in milliseconds) between two consecutive checkpoints.");
+ Option READ_LIMIT_ROW_PER_SECOND =
+ Options.key("read_limit.rows_per_second")
+ .intType()
+ .noDefaultValue()
+ .withDescription(
+ "The each parallelism row limit per second for read data from source.");
+
+ Option READ_LIMIT_BYTES_PER_SECOND =
+ Options.key("read_limit.bytes_per_second")
+ .intType()
+ .noDefaultValue()
+ .withDescription(
+ "The each parallelism bytes limit per second for read data from source.");
Option CHECKPOINT_TIMEOUT =
Options.key("checkpoint.timeout")
.longType()
diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/env/EnvOptionRule.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/env/EnvOptionRule.java
index 09310f080c5..d4caa710d89 100644
--- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/env/EnvOptionRule.java
+++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/env/EnvOptionRule.java
@@ -31,6 +31,8 @@ public static OptionRule getEnvOptionRules() {
EnvCommonOptions.JARS,
EnvCommonOptions.CHECKPOINT_INTERVAL,
EnvCommonOptions.CHECKPOINT_TIMEOUT,
+ EnvCommonOptions.READ_LIMIT_ROW_PER_SECOND,
+ EnvCommonOptions.READ_LIMIT_BYTES_PER_SECOND,
EnvCommonOptions.CUSTOM_PARAMETERS)
.build();
}
diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/TablePath.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/TablePath.java
index 7b2dd6d5533..358e873b991 100644
--- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/TablePath.java
+++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/catalog/TablePath.java
@@ -22,6 +22,8 @@
import lombok.RequiredArgsConstructor;
import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
@Getter
@EqualsAndHashCode
@@ -54,14 +56,15 @@ public static TablePath of(String databaseName, String schemaName, String tableN
}
public String getSchemaAndTableName() {
- return String.format("%s.%s", schemaName, tableName);
+ return getNameCommon(null, schemaName, tableName, null, null);
+ }
+
+ public String getSchemaAndTableName(String quote) {
+ return getNameCommon(null, schemaName, tableName, quote, quote);
}
public String getFullName() {
- if (schemaName == null) {
- return String.format("%s.%s", databaseName, tableName);
- }
- return String.format("%s.%s.%s", databaseName, schemaName, tableName);
+ return getNameCommon(databaseName, schemaName, tableName, null, null);
}
public String getFullNameWithQuoted() {
@@ -69,13 +72,36 @@ public String getFullNameWithQuoted() {
}
public String getFullNameWithQuoted(String quote) {
- if (schemaName == null) {
- return String.format(
- "%s%s%s.%s%s%s", quote, databaseName, quote, quote, tableName, quote);
+ return getNameCommon(databaseName, schemaName, tableName, quote, quote);
+ }
+
+ public String getFullNameWithQuoted(String quoteLeft, String quoteRight) {
+ return getNameCommon(databaseName, schemaName, tableName, quoteLeft, quoteRight);
+ }
+
+ private String getNameCommon(
+ String databaseName,
+ String schemaName,
+ String tableName,
+ String quoteLeft,
+ String quoteRight) {
+ List joinList = new ArrayList<>();
+ quoteLeft = quoteLeft == null ? "" : quoteLeft;
+ quoteRight = quoteRight == null ? "" : quoteRight;
+
+ if (databaseName != null) {
+ joinList.add(quoteLeft + databaseName + quoteRight);
+ }
+
+ if (schemaName != null) {
+ joinList.add(quoteLeft + schemaName + quoteRight);
}
- return String.format(
- "%s%s%s.%s%s%s.%s%s%s",
- quote, databaseName, quote, quote, schemaName, quote, quote, tableName, quote);
+
+ if (tableName != null) {
+ joinList.add(quoteLeft + tableName + quoteRight);
+ }
+
+ return String.join(".", joinList);
}
@Override
diff --git a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/SeaTunnelRow.java b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/SeaTunnelRow.java
index 1966d3142c5..bd05e0808d8 100644
--- a/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/SeaTunnelRow.java
+++ b/seatunnel-api/src/main/java/org/apache/seatunnel/api/table/type/SeaTunnelRow.java
@@ -21,6 +21,7 @@
import java.io.Serializable;
import java.util.Arrays;
+import java.util.Map;
import java.util.Objects;
/** SeaTunnel row type. */
@@ -33,6 +34,8 @@ public final class SeaTunnelRow implements Serializable {
/** The array to store the actual internal format values. */
private final Object[] fields;
+ private volatile int size;
+
public SeaTunnelRow(int arity) {
this.fields = new Object[arity];
}
@@ -97,6 +100,180 @@ public boolean isNullAt(int pos) {
return this.fields[pos] == null;
}
+ public int getBytesSize(SeaTunnelRowType rowType) {
+ if (size == 0) {
+ int s = 0;
+ for (int i = 0; i < fields.length; i++) {
+ s += getBytesForValue(fields[i], rowType.getFieldType(i));
+ }
+ size = s;
+ }
+ return size;
+ }
+
+ /** faster version of {@link #getBytesSize(SeaTunnelRowType)}. */
+ private int getBytesForValue(Object v, SeaTunnelDataType> dataType) {
+ if (v == null) {
+ return 0;
+ }
+ SqlType sqlType = dataType.getSqlType();
+ switch (sqlType) {
+ case STRING:
+ return ((String) v).length();
+ case BOOLEAN:
+ case TINYINT:
+ return 1;
+ case SMALLINT:
+ return 2;
+ case INT:
+ case FLOAT:
+ return 4;
+ case BIGINT:
+ case DOUBLE:
+ return 8;
+ case DECIMAL:
+ return 36;
+ case NULL:
+ return 0;
+ case BYTES:
+ return ((byte[]) v).length;
+ case DATE:
+ return 24;
+ case TIME:
+ return 12;
+ case TIMESTAMP:
+ return 48;
+ case ARRAY:
+ return getBytesForArray(v, ((ArrayType) dataType).getElementType());
+ case MAP:
+ int size = 0;
+ MapType, ?> mapType = ((MapType, ?>) dataType);
+ for (Map.Entry, ?> entry : ((Map, ?>) v).entrySet()) {
+ size +=
+ getBytesForValue(entry.getKey(), mapType.getKeyType())
+ + getBytesForValue(entry.getValue(), mapType.getValueType());
+ }
+ return size;
+ case ROW:
+ int rowSize = 0;
+ SeaTunnelRowType rowType = ((SeaTunnelRowType) dataType);
+ SeaTunnelDataType>[] types = rowType.getFieldTypes();
+ SeaTunnelRow row = (SeaTunnelRow) v;
+ for (int i = 0; i < types.length; i++) {
+ rowSize += getBytesForValue(row.fields[i], types[i]);
+ }
+ return rowSize;
+ default:
+ throw new UnsupportedOperationException("Unsupported type: " + sqlType);
+ }
+ }
+
+ private int getBytesForArray(Object v, BasicType> dataType) {
+ switch (dataType.getSqlType()) {
+ case STRING:
+ int s = 0;
+ for (String i : ((String[]) v)) {
+ s += i.length();
+ }
+ return s;
+ case BOOLEAN:
+ return ((Boolean[]) v).length;
+ case TINYINT:
+ return ((Byte[]) v).length;
+ case SMALLINT:
+ return ((Short[]) v).length * 2;
+ case INT:
+ return ((Integer[]) v).length * 4;
+ case FLOAT:
+ return ((Float[]) v).length * 4;
+ case BIGINT:
+ return ((Long[]) v).length * 8;
+ case DOUBLE:
+ return ((Double[]) v).length * 8;
+ case NULL:
+ default:
+ return 0;
+ }
+ }
+
+ public int getBytesSize() {
+ if (size == 0) {
+ int s = 0;
+ for (Object field : fields) {
+ s += getBytesForValue(field);
+ }
+ size = s;
+ }
+ return size;
+ }
+
+ private int getBytesForValue(Object v) {
+ if (v == null) {
+ return 0;
+ }
+ String clazz = v.getClass().getSimpleName();
+ switch (clazz) {
+ case "String":
+ return ((String) v).length();
+ case "Boolean":
+ case "Byte":
+ return 1;
+ case "Short":
+ return 2;
+ case "Integer":
+ case "Float":
+ return 4;
+ case "Long":
+ case "Double":
+ return 8;
+ case "BigDecimal":
+ return 36;
+ case "byte[]":
+ return ((byte[]) v).length;
+ case "LocalDate":
+ return 24;
+ case "LocalTime":
+ return 12;
+ case "LocalDateTime":
+ return 48;
+ case "String[]":
+ int s = 0;
+ for (String i : ((String[]) v)) {
+ s += i.length();
+ }
+ return s;
+ case "Boolean[]":
+ return ((Boolean[]) v).length;
+ case "Byte[]":
+ return ((Byte[]) v).length;
+ case "Short[]":
+ return ((Short[]) v).length * 2;
+ case "Integer[]":
+ return ((Integer[]) v).length * 4;
+ case "Long[]":
+ return ((Long[]) v).length * 8;
+ case "Float[]":
+ return ((Float[]) v).length * 4;
+ case "Double[]":
+ return ((Double[]) v).length * 8;
+ case "HashMap":
+ int size = 0;
+ for (Map.Entry, ?> entry : ((Map, ?>) v).entrySet()) {
+ size += getBytesForValue(entry.getKey()) + getBytesForValue(entry.getValue());
+ }
+ return size;
+ case "SeaTunnelRow":
+ int rowSize = 0;
+ SeaTunnelRow row = (SeaTunnelRow) v;
+ for (int i = 0; i < row.fields.length; i++) {
+ rowSize += getBytesForValue(row.fields[i]);
+ }
+ return rowSize;
+ default:
+ throw new UnsupportedOperationException("Unsupported type: " + clazz);
+ }
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) {
@@ -106,7 +283,7 @@ public boolean equals(Object o) {
return false;
}
SeaTunnelRow that = (SeaTunnelRow) o;
- return tableId == that.tableId
+ return Objects.equals(tableId, that.tableId)
&& kind == that.kind
&& Arrays.deepEquals(fields, that.fields);
}
diff --git a/seatunnel-api/src/test/java/org/apache/seatunnel/api/table/type/SeaTunnelRowTest.java b/seatunnel-api/src/test/java/org/apache/seatunnel/api/table/type/SeaTunnelRowTest.java
new file mode 100644
index 00000000000..eaad7f95767
--- /dev/null
+++ b/seatunnel-api/src/test/java/org/apache/seatunnel/api/table/type/SeaTunnelRowTest.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.seatunnel.api.table.type;
+
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+import java.math.BigDecimal;
+import java.util.HashMap;
+import java.util.Map;
+
+public class SeaTunnelRowTest {
+
+ @Test
+ void testForRowSize() {
+ Map map = new HashMap<>();
+ map.put(
+ "key1",
+ new SeaTunnelRow(
+ new Object[] {
+ 1, "test", 1L, new BigDecimal("3333.333"),
+ }));
+ map.put(
+ "key2",
+ new SeaTunnelRow(
+ new Object[] {
+ 1, "test", 1L, new BigDecimal("3333.333"),
+ }));
+ SeaTunnelRow row =
+ new SeaTunnelRow(
+ new Object[] {
+ 1,
+ "test",
+ 1L,
+ map,
+ new BigDecimal("3333.333"),
+ new String[] {"test2", "test", "3333.333"}
+ });
+
+ SeaTunnelRowType rowType =
+ new SeaTunnelRowType(
+ new String[] {"f0", "f1", "f2", "f3", "f4", "f5"},
+ new SeaTunnelDataType>[] {
+ BasicType.INT_TYPE,
+ BasicType.STRING_TYPE,
+ BasicType.LONG_TYPE,
+ new MapType<>(
+ BasicType.STRING_TYPE,
+ new SeaTunnelRowType(
+ new String[] {"f0", "f1", "f2", "f3"},
+ new SeaTunnelDataType>[] {
+ BasicType.INT_TYPE,
+ BasicType.STRING_TYPE,
+ BasicType.LONG_TYPE,
+ new DecimalType(10, 3)
+ })),
+ new DecimalType(10, 3),
+ ArrayType.STRING_ARRAY_TYPE
+ });
+
+ Assertions.assertEquals(181, row.getBytesSize(rowType));
+
+ SeaTunnelRow row2 =
+ new SeaTunnelRow(
+ new Object[] {
+ 1,
+ "test",
+ 1L,
+ map,
+ new BigDecimal("3333.333"),
+ new String[] {"test2", "test", "3333.333"}
+ });
+ Assertions.assertEquals(181, row2.getBytesSize());
+ }
+}
diff --git a/seatunnel-common/src/main/java/org/apache/seatunnel/common/config/Common.java b/seatunnel-common/src/main/java/org/apache/seatunnel/common/config/Common.java
index e3e9f374c8c..88a13fe781b 100644
--- a/seatunnel-common/src/main/java/org/apache/seatunnel/common/config/Common.java
+++ b/seatunnel-common/src/main/java/org/apache/seatunnel/common/config/Common.java
@@ -122,11 +122,6 @@ public static Path pluginRootDir() {
return Paths.get(getSeaTunnelHome(), "plugins");
}
- /** Plugin Connector Jar Dir */
- public static Path connectorJarDir(String engine) {
- return Paths.get(getSeaTunnelHome(), "connectors", engine.toLowerCase());
- }
-
/** Plugin Connector Dir */
public static Path connectorDir() {
return Paths.get(getSeaTunnelHome(), "connectors");
diff --git a/seatunnel-connectors-v2/connector-assert/src/main/java/org/apache/seatunnel/connectors/seatunnel/assertion/excecutor/AssertExecutor.java b/seatunnel-connectors-v2/connector-assert/src/main/java/org/apache/seatunnel/connectors/seatunnel/assertion/excecutor/AssertExecutor.java
index c8666cd9a55..5868fba9127 100644
--- a/seatunnel-connectors-v2/connector-assert/src/main/java/org/apache/seatunnel/connectors/seatunnel/assertion/excecutor/AssertExecutor.java
+++ b/seatunnel-connectors-v2/connector-assert/src/main/java/org/apache/seatunnel/connectors/seatunnel/assertion/excecutor/AssertExecutor.java
@@ -20,6 +20,8 @@
import org.apache.seatunnel.api.table.type.SeaTunnelDataType;
import org.apache.seatunnel.api.table.type.SeaTunnelRow;
import org.apache.seatunnel.api.table.type.SeaTunnelRowType;
+import org.apache.seatunnel.connectors.seatunnel.assertion.exception.AssertConnectorErrorCode;
+import org.apache.seatunnel.connectors.seatunnel.assertion.exception.AssertConnectorException;
import org.apache.seatunnel.connectors.seatunnel.assertion.rule.AssertFieldRule;
import org.apache.commons.lang3.StringUtils;
@@ -27,6 +29,7 @@
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
+import java.math.BigDecimal;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
@@ -108,33 +111,8 @@ private boolean pass(Object value, AssertFieldRule.AssertRule valueRule) {
return ((Number) value).doubleValue() >= valueRule.getRuleValue();
}
if (valueRule.getEqualTo() != null) {
- if (value instanceof String) {
- return value.equals(valueRule.getEqualTo());
- }
- if (value instanceof Number) {
- return ((Number) value).doubleValue() == Double.parseDouble(valueRule.getEqualTo());
- }
- if (value instanceof Boolean) {
- return value.equals(Boolean.parseBoolean(valueRule.getEqualTo()));
- }
- if (value instanceof LocalDateTime) {
- TemporalAccessor parsedTimestamp =
- DateTimeFormatter.ISO_LOCAL_DATE_TIME.parse(valueRule.getEqualTo());
- LocalTime localTime = parsedTimestamp.query(TemporalQueries.localTime());
- LocalDate localDate = parsedTimestamp.query(TemporalQueries.localDate());
- return ((LocalDateTime) value).isEqual(LocalDateTime.of(localDate, localTime));
- }
- if (value instanceof LocalDate) {
- DateTimeFormatter fmt = DateTimeFormatter.ofPattern("yyyy-MM-dd");
- return ((LocalDate) value).isEqual(LocalDate.parse(valueRule.getEqualTo(), fmt));
- }
- if (value instanceof LocalTime) {
- DateTimeFormatter fmt = DateTimeFormatter.ofPattern("HH:mm:ss");
- return value.equals(LocalTime.parse(valueRule.getEqualTo(), fmt));
- }
- return false;
+ return compareValue(value, valueRule);
}
-
String valueStr = Objects.isNull(value) ? StringUtils.EMPTY : String.valueOf(value);
if (AssertFieldRule.AssertRuleType.MAX_LENGTH.equals(valueRule.getRuleType())) {
return valueStr.length() <= valueRule.getRuleValue();
@@ -146,6 +124,44 @@ private boolean pass(Object value, AssertFieldRule.AssertRule valueRule) {
return Boolean.TRUE;
}
+ private boolean compareValue(Object value, AssertFieldRule.AssertRule valueRule) {
+ if (value instanceof String) {
+ return value.equals(valueRule.getEqualTo());
+ } else if (value instanceof Integer) {
+ return value.equals(Integer.parseInt(valueRule.getEqualTo()));
+ } else if (value instanceof Long) {
+ return value.equals(Long.parseLong(valueRule.getEqualTo()));
+ } else if (value instanceof Short) {
+ return value.equals(Short.parseShort(valueRule.getEqualTo()));
+ } else if (value instanceof Float) {
+ return value.equals((Float.parseFloat(valueRule.getEqualTo())));
+ } else if (value instanceof Byte) {
+ return value.equals((Byte.parseByte(valueRule.getEqualTo())));
+ } else if (value instanceof Double) {
+ return value.equals(Double.parseDouble(valueRule.getEqualTo()));
+ } else if (value instanceof BigDecimal) {
+ return value.equals(new BigDecimal(valueRule.getEqualTo()));
+ } else if (value instanceof Boolean) {
+ return value.equals(Boolean.parseBoolean(valueRule.getEqualTo()));
+ } else if (value instanceof LocalDateTime) {
+ TemporalAccessor parsedTimestamp =
+ DateTimeFormatter.ISO_LOCAL_DATE_TIME.parse(valueRule.getEqualTo());
+ LocalTime localTime = parsedTimestamp.query(TemporalQueries.localTime());
+ LocalDate localDate = parsedTimestamp.query(TemporalQueries.localDate());
+ return ((LocalDateTime) value).isEqual(LocalDateTime.of(localDate, localTime));
+ } else if (value instanceof LocalDate) {
+ DateTimeFormatter fmt = DateTimeFormatter.ofPattern("yyyy-MM-dd");
+ return ((LocalDate) value).isEqual(LocalDate.parse(valueRule.getEqualTo(), fmt));
+ } else if (value instanceof LocalTime) {
+ DateTimeFormatter fmt = DateTimeFormatter.ofPattern("HH:mm:ss");
+ return value.equals(LocalTime.parse(valueRule.getEqualTo(), fmt));
+ } else {
+ throw new AssertConnectorException(
+ AssertConnectorErrorCode.TYPES_NOT_SUPPORTED_FAILED,
+ String.format(" %s types not supported yet", value.getClass().getSimpleName()));
+ }
+ }
+
private Boolean checkType(Object value, SeaTunnelDataType> fieldType) {
return value.getClass().equals(fieldType.getTypeClass());
}
diff --git a/seatunnel-connectors-v2/connector-assert/src/main/java/org/apache/seatunnel/connectors/seatunnel/assertion/exception/AssertConnectorErrorCode.java b/seatunnel-connectors-v2/connector-assert/src/main/java/org/apache/seatunnel/connectors/seatunnel/assertion/exception/AssertConnectorErrorCode.java
index abb085e2837..16ae8aed1c7 100644
--- a/seatunnel-connectors-v2/connector-assert/src/main/java/org/apache/seatunnel/connectors/seatunnel/assertion/exception/AssertConnectorErrorCode.java
+++ b/seatunnel-connectors-v2/connector-assert/src/main/java/org/apache/seatunnel/connectors/seatunnel/assertion/exception/AssertConnectorErrorCode.java
@@ -20,7 +20,8 @@
import org.apache.seatunnel.common.exception.SeaTunnelErrorCode;
public enum AssertConnectorErrorCode implements SeaTunnelErrorCode {
- RULE_VALIDATION_FAILED("ASSERT-01", "Rule validate failed");
+ RULE_VALIDATION_FAILED("ASSERT-01", "Rule validate failed"),
+ TYPES_NOT_SUPPORTED_FAILED("ASSERT-02", "Types not supported");
private final String code;
private final String description;
diff --git a/seatunnel-connectors-v2/connector-assert/src/main/java/org/apache/seatunnel/connectors/seatunnel/assertion/rule/AssertRuleParser.java b/seatunnel-connectors-v2/connector-assert/src/main/java/org/apache/seatunnel/connectors/seatunnel/assertion/rule/AssertRuleParser.java
index f479dfa5c99..eccf2c68450 100644
--- a/seatunnel-connectors-v2/connector-assert/src/main/java/org/apache/seatunnel/connectors/seatunnel/assertion/rule/AssertRuleParser.java
+++ b/seatunnel-connectors-v2/connector-assert/src/main/java/org/apache/seatunnel/connectors/seatunnel/assertion/rule/AssertRuleParser.java
@@ -20,6 +20,7 @@
import org.apache.seatunnel.shade.com.typesafe.config.Config;
import org.apache.seatunnel.api.table.type.BasicType;
+import org.apache.seatunnel.api.table.type.DecimalType;
import org.apache.seatunnel.api.table.type.LocalTimeType;
import org.apache.seatunnel.api.table.type.SeaTunnelDataType;
@@ -105,5 +106,6 @@ private SeaTunnelDataType> getFieldType(String fieldTypeStr) {
TYPES.put("datetime", LocalTimeType.LOCAL_DATE_TIME_TYPE);
TYPES.put("date", LocalTimeType.LOCAL_DATE_TYPE);
TYPES.put("time", LocalTimeType.LOCAL_TIME_TYPE);
+ TYPES.put("decimal", new DecimalType(38, 18));
}
}
diff --git a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/IncrementalSource.java b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/IncrementalSource.java
index c10ab3e0613..ed04fb0f5d7 100644
--- a/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/IncrementalSource.java
+++ b/seatunnel-connectors-v2/connector-cdc/connector-cdc-base/src/main/java/org/apache/seatunnel/connectors/cdc/base/source/IncrementalSource.java
@@ -27,6 +27,7 @@
import org.apache.seatunnel.api.source.SeaTunnelSource;
import org.apache.seatunnel.api.source.SourceReader;
import org.apache.seatunnel.api.source.SourceSplitEnumerator;
+import org.apache.seatunnel.api.source.SupportCoordinate;
import org.apache.seatunnel.api.table.type.SeaTunnelDataType;
import org.apache.seatunnel.api.table.type.SeaTunnelRow;
import org.apache.seatunnel.connectors.cdc.base.config.SourceConfig;
@@ -76,7 +77,7 @@
@NoArgsConstructor
public abstract class IncrementalSource
- implements SeaTunnelSource {
+ implements SeaTunnelSource, SupportCoordinate {
protected ReadonlyConfig readonlyConfig;
protected SourceConfig.Factory configFactory;
diff --git a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseSinkWriter.java b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseSinkWriter.java
index de29c6cf8b4..6220e4b8071 100644
--- a/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseSinkWriter.java
+++ b/seatunnel-connectors-v2/connector-clickhouse/src/main/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/sink/client/ClickhouseSinkWriter.java
@@ -90,7 +90,14 @@ public void write(SeaTunnelRow element) throws IOException {
@Override
public Optional prepareCommit() throws IOException {
- flush();
+ for (ClickhouseBatchStatement batchStatement : statementMap.values()) {
+ JdbcBatchStatementExecutor statement = batchStatement.getJdbcBatchStatementExecutor();
+ IntHolder intHolder = batchStatement.getIntHolder();
+ if (intHolder.getValue() > 0) {
+ flush(statement);
+ intHolder.setValue(0);
+ }
+ }
return Optional.empty();
}
diff --git a/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/utils/HiveMetaStoreProxy.java b/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/utils/HiveMetaStoreProxy.java
index f6ba5cfb12c..788fe38dc1c 100644
--- a/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/utils/HiveMetaStoreProxy.java
+++ b/seatunnel-connectors-v2/connector-hive/src/main/java/org/apache/seatunnel/connectors/seatunnel/hive/utils/HiveMetaStoreProxy.java
@@ -35,6 +35,8 @@
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
+import java.io.File;
+import java.net.MalformedURLException;
import java.util.List;
import java.util.Objects;
@@ -54,10 +56,11 @@ private HiveMetaStoreProxy(Config config) {
Configuration configuration = new Configuration();
FileSystemUtils.doKerberosAuthentication(configuration, principal, keytabPath);
}
- if (config.hasPath(HiveConfig.HIVE_SITE_PATH.key())) {
- hiveConf.addResource(config.getString(HiveConfig.HIVE_SITE_PATH.key()));
- }
try {
+ if (config.hasPath(HiveConfig.HIVE_SITE_PATH.key())) {
+ String hiveSitePath = config.getString(HiveConfig.HIVE_SITE_PATH.key());
+ hiveConf.addResource(new File(hiveSitePath).toURI().toURL());
+ }
hiveMetaStoreClient = new HiveMetaStoreClient(hiveConf);
} catch (MetaException e) {
String errorMsg =
@@ -67,6 +70,14 @@ private HiveMetaStoreProxy(Config config) {
metastoreUri);
throw new HiveConnectorException(
HiveConnectorErrorCode.INITIALIZE_HIVE_METASTORE_CLIENT_FAILED, errorMsg, e);
+ } catch (MalformedURLException e) {
+ String errorMsg =
+ String.format(
+ "Using this hive uris [%s], hive conf [%s] to initialize "
+ + "hive metastore client instance failed",
+ metastoreUri, config.getString(HiveConfig.HIVE_SITE_PATH.key()));
+ throw new HiveConnectorException(
+ HiveConnectorErrorCode.INITIALIZE_HIVE_METASTORE_CLIENT_FAILED, errorMsg, e);
}
}
diff --git a/seatunnel-connectors-v2/connector-http/connector-http-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/http/client/HttpClientProvider.java b/seatunnel-connectors-v2/connector-http/connector-http-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/http/client/HttpClientProvider.java
index ef5b1f77d6b..2c6fe67b797 100644
--- a/seatunnel-connectors-v2/connector-http/connector-http-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/http/client/HttpClientProvider.java
+++ b/seatunnel-connectors-v2/connector-http/connector-http-base/src/main/java/org/apache/seatunnel/connectors/seatunnel/http/client/HttpClientProvider.java
@@ -404,7 +404,17 @@ private void addHeaders(HttpRequestBase request, Map headers) {
headers.forEach(request::addHeader);
}
+ private boolean checkAlreadyHaveContentType(HttpEntityEnclosingRequestBase request) {
+ if (request.getEntity() != null && request.getEntity().getContentType() != null) {
+ return HTTP.CONTENT_TYPE.equals(request.getEntity().getContentType().getName());
+ }
+ return false;
+ }
+
private void addBody(HttpEntityEnclosingRequestBase request, String body) {
+ if (checkAlreadyHaveContentType(request)) {
+ return;
+ }
request.addHeader(HTTP.CONTENT_TYPE, APPLICATION_JSON);
if (StringUtils.isBlank(body)) {
diff --git a/seatunnel-connectors-v2/connector-jdbc/pom.xml b/seatunnel-connectors-v2/connector-jdbc/pom.xml
index e76237e7e07..62d541d19f0 100644
--- a/seatunnel-connectors-v2/connector-jdbc/pom.xml
+++ b/seatunnel-connectors-v2/connector-jdbc/pom.xml
@@ -46,6 +46,7 @@
3.13.29
12.0.3-0
2.5.1
+ 8.6.0
@@ -143,6 +144,12 @@
${vertica.version}
provided
+
+ cn.com.kingbase
+ kingbase8
+ ${kingbase8.version}
+ provided
+
@@ -218,5 +225,11 @@
com.vertica.jdbc
vertica-jdbc
+
+
+ cn.com.kingbase
+ kingbase8
+
+
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/mysql/MysqlCreateTableSqlBuilder.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/mysql/MysqlCreateTableSqlBuilder.java
index 490ecd30ff8..3430de04b5a 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/mysql/MysqlCreateTableSqlBuilder.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/mysql/MysqlCreateTableSqlBuilder.java
@@ -25,6 +25,7 @@
import org.apache.seatunnel.api.table.catalog.TableSchema;
import org.apache.seatunnel.api.table.type.DecimalType;
import org.apache.seatunnel.api.table.type.SqlType;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.utils.CatalogUtils;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
@@ -55,6 +56,8 @@ public class MysqlCreateTableSqlBuilder {
private MysqlDataTypeConvertor mysqlDataTypeConvertor;
+ private String fieldIde;
+
private MysqlCreateTableSqlBuilder(String tableName) {
checkNotNull(tableName, "tableName must not be null");
this.tableName = tableName;
@@ -76,7 +79,8 @@ public static MysqlCreateTableSqlBuilder builder(
.charset(null)
.primaryKey(tableSchema.getPrimaryKey())
.constraintKeys(tableSchema.getConstraintKeys())
- .addColumn(tableSchema.getColumns());
+ .addColumn(tableSchema.getColumns())
+ .fieldIde(catalogTable.getOptions().get("fieldIde"));
}
public MysqlCreateTableSqlBuilder addColumn(List columns) {
@@ -90,6 +94,11 @@ public MysqlCreateTableSqlBuilder primaryKey(PrimaryKey primaryKey) {
return this;
}
+ public MysqlCreateTableSqlBuilder fieldIde(String fieldIde) {
+ this.fieldIde = fieldIde;
+ return this;
+ }
+
public MysqlCreateTableSqlBuilder constraintKeys(List constraintKeys) {
this.constraintKeys = constraintKeys;
return this;
@@ -120,7 +129,8 @@ public String build(String catalogName) {
sqls.add(
String.format(
"CREATE TABLE %s (\n%s\n)",
- tableName, buildColumnsIdentifySql(catalogName)));
+ CatalogUtils.quoteIdentifier(tableName, fieldIde, "`"),
+ buildColumnsIdentifySql(catalogName)));
if (engine != null) {
sqls.add("ENGINE = " + engine);
}
@@ -157,7 +167,7 @@ private String buildColumnsIdentifySql(String catalogName) {
private String buildColumnIdentifySql(Column column, String catalogName) {
final List columnSqls = new ArrayList<>();
- columnSqls.add(column.getName());
+ columnSqls.add(CatalogUtils.quoteIdentifier(column.getName(), fieldIde, "`"));
if (StringUtils.equals(catalogName, "mysql")) {
columnSqls.add(column.getSourceType());
} else {
@@ -243,7 +253,7 @@ private String buildPrimaryKeySql() {
.map(columnName -> "`" + columnName + "`")
.collect(Collectors.joining(", "));
// add sort type
- return String.format("PRIMARY KEY (%s)", key);
+ return String.format("PRIMARY KEY (%s)", CatalogUtils.quoteIdentifier(key, fieldIde));
}
private String buildConstraintKeySql(ConstraintKey constraintKey) {
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/oracle/OracleCreateTableSqlBuilder.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/oracle/OracleCreateTableSqlBuilder.java
index 984dd93e6a6..4b780131d54 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/oracle/OracleCreateTableSqlBuilder.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/oracle/OracleCreateTableSqlBuilder.java
@@ -23,6 +23,7 @@
import org.apache.seatunnel.api.table.catalog.TablePath;
import org.apache.seatunnel.api.table.type.DecimalType;
import org.apache.seatunnel.api.table.type.SqlType;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.utils.CatalogUtils;
import org.apache.commons.lang3.StringUtils;
@@ -36,23 +37,27 @@ public class OracleCreateTableSqlBuilder {
private PrimaryKey primaryKey;
private OracleDataTypeConvertor oracleDataTypeConvertor;
private String sourceCatalogName;
+ private String fieldIde;
public OracleCreateTableSqlBuilder(CatalogTable catalogTable) {
this.columns = catalogTable.getTableSchema().getColumns();
this.primaryKey = catalogTable.getTableSchema().getPrimaryKey();
this.oracleDataTypeConvertor = new OracleDataTypeConvertor();
this.sourceCatalogName = catalogTable.getCatalogName();
+ this.fieldIde = catalogTable.getOptions().get("fieldIde");
}
public String build(TablePath tablePath) {
StringBuilder createTableSql = new StringBuilder();
createTableSql
.append("CREATE TABLE ")
- .append(tablePath.getSchemaAndTableName())
+ .append(tablePath.getSchemaAndTableName("\""))
.append(" (\n");
List columnSqls =
- columns.stream().map(this::buildColumnSql).collect(Collectors.toList());
+ columns.stream()
+ .map(column -> CatalogUtils.getFieldIde(buildColumnSql(column), fieldIde))
+ .collect(Collectors.toList());
// Add primary key directly in the create table statement
if (primaryKey != null
@@ -70,7 +75,7 @@ public String build(TablePath tablePath) {
.map(
column ->
buildColumnCommentSql(
- column, tablePath.getSchemaAndTableName()))
+ column, tablePath.getSchemaAndTableName("\"")))
.collect(Collectors.toList());
if (!commentSqls.isEmpty()) {
@@ -83,7 +88,7 @@ public String build(TablePath tablePath) {
private String buildColumnSql(Column column) {
StringBuilder columnSql = new StringBuilder();
- columnSql.append(column.getName()).append(" ");
+ columnSql.append("\"").append(column.getName()).append("\" ");
String columnType =
sourceCatalogName.equals("oracle")
@@ -95,11 +100,6 @@ private String buildColumnSql(Column column) {
columnSql.append(" NOT NULL");
}
- // if (column.getDefaultValue() != null) {
- // columnSql.append(" DEFAULT
- // '").append(column.getDefaultValue().toString()).append("'");
- // }
-
return columnSql.toString();
}
@@ -140,7 +140,10 @@ private String buildColumnType(Column column) {
private String buildPrimaryKeySql(PrimaryKey primaryKey) {
String randomSuffix = UUID.randomUUID().toString().replace("-", "").substring(0, 4);
- String columnNamesString = String.join(", ", primaryKey.getColumnNames());
+ String columnNamesString =
+ primaryKey.getColumnNames().stream()
+ .map(columnName -> "\"" + columnName + "\"")
+ .collect(Collectors.joining(", "));
// In Oracle database, the maximum length for an identifier is 30 characters.
String primaryKeyStr = primaryKey.getPrimaryKey();
@@ -148,21 +151,26 @@ private String buildPrimaryKeySql(PrimaryKey primaryKey) {
primaryKeyStr = primaryKeyStr.substring(0, 25);
}
- return "CONSTRAINT "
- + primaryKeyStr
- + "_"
- + randomSuffix
- + " PRIMARY KEY ("
- + columnNamesString
- + ")";
+ return CatalogUtils.getFieldIde(
+ "CONSTRAINT "
+ + primaryKeyStr
+ + "_"
+ + randomSuffix
+ + " PRIMARY KEY ("
+ + columnNamesString
+ + ")",
+ fieldIde);
}
private String buildColumnCommentSql(Column column, String tableName) {
StringBuilder columnCommentSql = new StringBuilder();
- columnCommentSql.append("COMMENT ON COLUMN ").append(tableName).append(".");
columnCommentSql
- .append(column.getName())
- .append(" IS '")
+ .append(CatalogUtils.quoteIdentifier("COMMENT ON COLUMN ", fieldIde))
+ .append(tableName)
+ .append(".");
+ columnCommentSql
+ .append(CatalogUtils.quoteIdentifier(column.getName(), fieldIde, "\""))
+ .append(CatalogUtils.quoteIdentifier(" IS '", fieldIde))
.append(column.getComment())
.append("'");
return columnCommentSql.toString();
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/psql/PostgresCreateTableSqlBuilder.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/psql/PostgresCreateTableSqlBuilder.java
index d423f183010..74b684c0e39 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/psql/PostgresCreateTableSqlBuilder.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/psql/PostgresCreateTableSqlBuilder.java
@@ -23,6 +23,7 @@
import org.apache.seatunnel.api.table.catalog.TablePath;
import org.apache.seatunnel.api.table.type.DecimalType;
import org.apache.seatunnel.api.table.type.SqlType;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.utils.CatalogUtils;
import org.apache.commons.lang3.StringUtils;
@@ -37,23 +38,30 @@ public class PostgresCreateTableSqlBuilder {
private PrimaryKey primaryKey;
private PostgresDataTypeConvertor postgresDataTypeConvertor;
private String sourceCatalogName;
+ private String fieldIde;
public PostgresCreateTableSqlBuilder(CatalogTable catalogTable) {
this.columns = catalogTable.getTableSchema().getColumns();
this.primaryKey = catalogTable.getTableSchema().getPrimaryKey();
this.postgresDataTypeConvertor = new PostgresDataTypeConvertor();
this.sourceCatalogName = catalogTable.getCatalogName();
+ this.fieldIde = catalogTable.getOptions().get("fieldIde");
}
public String build(TablePath tablePath) {
StringBuilder createTableSql = new StringBuilder();
createTableSql
- .append("CREATE TABLE ")
- .append(tablePath.getSchemaAndTableName())
+ .append(CatalogUtils.quoteIdentifier("CREATE TABLE ", fieldIde))
+ .append(tablePath.getSchemaAndTableName("\""))
.append(" (\n");
List columnSqls =
- columns.stream().map(this::buildColumnSql).collect(Collectors.toList());
+ columns.stream()
+ .map(
+ column ->
+ CatalogUtils.quoteIdentifier(
+ buildColumnSql(column), fieldIde))
+ .collect(Collectors.toList());
createTableSql.append(String.join(",\n", columnSqls));
createTableSql.append("\n);");
@@ -64,7 +72,7 @@ public String build(TablePath tablePath) {
.map(
columns ->
buildColumnCommentSql(
- columns, tablePath.getSchemaAndTableName()))
+ columns, tablePath.getSchemaAndTableName("\"")))
.collect(Collectors.toList());
if (!commentSqls.isEmpty()) {
@@ -77,7 +85,7 @@ public String build(TablePath tablePath) {
private String buildColumnSql(Column column) {
StringBuilder columnSql = new StringBuilder();
- columnSql.append(column.getName()).append(" ");
+ columnSql.append("\"").append(column.getName()).append("\" ");
// For simplicity, assume the column type in SeaTunnelDataType is the same as in PostgreSQL
String columnType =
@@ -96,12 +104,6 @@ private String buildColumnSql(Column column) {
columnSql.append(" PRIMARY KEY");
}
- // Add default value if exists
- // if (column.getDefaultValue() != null) {
- // columnSql.append(" DEFAULT
- // '").append(column.getDefaultValue().toString()).append("'");
- // }
-
return columnSql.toString();
}
@@ -133,10 +135,13 @@ private String buildColumnType(Column column) {
private String buildColumnCommentSql(Column column, String tableName) {
StringBuilder columnCommentSql = new StringBuilder();
- columnCommentSql.append("COMMENT ON COLUMN ").append(tableName).append(".");
columnCommentSql
- .append(column.getName())
- .append(" IS '")
+ .append(CatalogUtils.quoteIdentifier("COMMENT ON COLUMN ", fieldIde))
+ .append(tableName)
+ .append(".");
+ columnCommentSql
+ .append(CatalogUtils.quoteIdentifier(column.getName(), fieldIde, "\""))
+ .append(CatalogUtils.quoteIdentifier(" IS '", fieldIde))
.append(column.getComment())
.append("'");
return columnCommentSql.toString();
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/sqlserver/SqlServerCreateTableSqlBuilder.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/sqlserver/SqlServerCreateTableSqlBuilder.java
index 0bec148b372..86afa6e41e1 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/sqlserver/SqlServerCreateTableSqlBuilder.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/sqlserver/SqlServerCreateTableSqlBuilder.java
@@ -25,6 +25,7 @@
import org.apache.seatunnel.api.table.catalog.TableSchema;
import org.apache.seatunnel.api.table.type.DecimalType;
import org.apache.seatunnel.api.table.type.SqlType;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.utils.CatalogUtils;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
@@ -55,6 +56,8 @@ public class SqlServerCreateTableSqlBuilder {
private SqlServerDataTypeConvertor sqlServerDataTypeConvertor;
+ private String fieldIde;
+
private SqlServerCreateTableSqlBuilder(String tableName) {
checkNotNull(tableName, "tableName must not be null");
this.tableName = tableName;
@@ -76,7 +79,8 @@ public static SqlServerCreateTableSqlBuilder builder(
.charset(null)
.primaryKey(tableSchema.getPrimaryKey())
.constraintKeys(tableSchema.getConstraintKeys())
- .addColumn(tableSchema.getColumns());
+ .addColumn(tableSchema.getColumns())
+ .fieldIde(catalogTable.getOptions().get("fieldIde"));
}
public SqlServerCreateTableSqlBuilder addColumn(List columns) {
@@ -90,6 +94,11 @@ public SqlServerCreateTableSqlBuilder primaryKey(PrimaryKey primaryKey) {
return this;
}
+ public SqlServerCreateTableSqlBuilder fieldIde(String fieldIde) {
+ this.fieldIde = fieldIde;
+ return this;
+ }
+
public SqlServerCreateTableSqlBuilder constraintKeys(List constraintKeys) {
this.constraintKeys = constraintKeys;
return this;
@@ -117,7 +126,7 @@ public SqlServerCreateTableSqlBuilder comment(String comment) {
public String build(TablePath tablePath, CatalogTable catalogTable) {
List sqls = new ArrayList<>();
- String sqlTableName = tablePath.getFullName();
+ String sqlTableName = tablePath.getFullNameWithQuoted("[", "]");
Map columnComments = new HashMap<>();
sqls.add(
String.format(
@@ -137,6 +146,7 @@ public String build(TablePath tablePath, CatalogTable catalogTable) {
sqls.add("COLLATE = " + collate);
}
String sqlTableSql = String.join(" ", sqls) + ";";
+ sqlTableSql = CatalogUtils.quoteIdentifier(sqlTableSql, fieldIde);
StringBuilder tableAndColumnComment = new StringBuilder();
if (comment != null) {
sqls.add("COMMENT = '" + comment + "'");
@@ -185,7 +195,7 @@ private String buildColumnsIdentifySql(String catalogName, Map c
private String buildColumnIdentifySql(
Column column, String catalogName, Map columnComments) {
final List columnSqls = new ArrayList<>();
- columnSqls.add(column.getName());
+ columnSqls.add("[" + column.getName() + "]");
String tyNameDef = "";
if (StringUtils.equals(catalogName, "sqlserver")) {
columnSqls.add(column.getSourceType());
@@ -244,19 +254,7 @@ private String buildColumnIdentifySql(
} else {
columnSqls.add("NOT NULL");
}
- // default value
- // if (column.getDefaultValue() != null) {
- // String defaultValue = "'" + column.getDefaultValue().toString() + "'";
- // if (StringUtils.equals(SqlServerType.BINARY.getName(), tyNameDef)
- // && defaultValue.contains("b'")) {
- // String rep = defaultValue.replace("b", "").replace("'", "");
- // defaultValue = "0x" + Integer.toHexString(Integer.parseInt(rep));
- // } else if (StringUtils.equals(SqlServerType.BIT.getName(), tyNameDef)
- // && defaultValue.contains("b'")) {
- // defaultValue = defaultValue.replace("b", "").replace("'", "");
- // }
- // columnSqls.add("DEFAULT " + defaultValue);
- // }
+
// comment
if (column.getComment() != null) {
columnComments.put(column.getName(), column.getComment());
@@ -267,7 +265,10 @@ private String buildColumnIdentifySql(
private String buildPrimaryKeySql() {
// .map(columnName -> "`" + columnName + "`")
- String key = String.join(", ", primaryKey.getColumnNames());
+ String key =
+ primaryKey.getColumnNames().stream()
+ .map(columnName -> "[" + columnName + "]")
+ .collect(Collectors.joining(", "));
// add sort type
return String.format("PRIMARY KEY (%s)", key);
}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/tidb/TiDBDataTypeConvertor.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/tidb/TiDBDataTypeConvertor.java
new file mode 100644
index 00000000000..d16f06177ed
--- /dev/null
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/tidb/TiDBDataTypeConvertor.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.tidb;
+
+import org.apache.seatunnel.api.table.catalog.DataTypeConvertException;
+import org.apache.seatunnel.api.table.catalog.DataTypeConvertor;
+import org.apache.seatunnel.api.table.type.BasicType;
+import org.apache.seatunnel.api.table.type.DecimalType;
+import org.apache.seatunnel.api.table.type.LocalTimeType;
+import org.apache.seatunnel.api.table.type.PrimitiveByteArrayType;
+import org.apache.seatunnel.api.table.type.SeaTunnelDataType;
+import org.apache.seatunnel.api.table.type.SqlType;
+import org.apache.seatunnel.common.exception.CommonErrorCode;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.exception.JdbcConnectorException;
+
+import org.apache.commons.collections4.MapUtils;
+
+import com.google.auto.service.AutoService;
+import com.google.common.collect.ImmutableMap;
+import com.mysql.cj.MysqlType;
+
+import java.util.Collections;
+import java.util.Map;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+@AutoService(DataTypeConvertor.class)
+public class TiDBDataTypeConvertor implements DataTypeConvertor {
+
+ public static final String PRECISION = "precision";
+ public static final String SCALE = "scale";
+
+ public static final Integer DEFAULT_PRECISION = 10;
+
+ public static final Integer DEFAULT_SCALE = 0;
+
+ @Override
+ public SeaTunnelDataType> toSeaTunnelType(String connectorDataType) {
+ checkNotNull(connectorDataType, "connectorDataType can not be null");
+ MysqlType mysqlType = MysqlType.getByName(connectorDataType);
+ Map dataTypeProperties;
+ switch (mysqlType) {
+ case BIGINT_UNSIGNED:
+ case DECIMAL:
+ case DECIMAL_UNSIGNED:
+ case BIT:
+ int left = connectorDataType.indexOf("(");
+ int right = connectorDataType.indexOf(")");
+ int precision = DEFAULT_PRECISION;
+ int scale = DEFAULT_SCALE;
+ if (left != -1 && right != -1) {
+ String[] precisionAndScale =
+ connectorDataType.substring(left + 1, right).split(",");
+ if (precisionAndScale.length == 2) {
+ precision = Integer.parseInt(precisionAndScale[0]);
+ scale = Integer.parseInt(precisionAndScale[1]);
+ } else if (precisionAndScale.length == 1) {
+ precision = Integer.parseInt(precisionAndScale[0]);
+ }
+ }
+ dataTypeProperties = ImmutableMap.of(PRECISION, precision, SCALE, scale);
+ break;
+ default:
+ dataTypeProperties = Collections.emptyMap();
+ break;
+ }
+ return toSeaTunnelType(mysqlType, dataTypeProperties);
+ }
+
+ // todo: It's better to wrapper MysqlType to a pojo in ST, since MysqlType doesn't contains
+ // properties.
+ @Override
+ public SeaTunnelDataType> toSeaTunnelType(
+ MysqlType mysqlType, Map dataTypeProperties)
+ throws DataTypeConvertException {
+ checkNotNull(mysqlType, "mysqlType can not be null");
+ int precision;
+ int scale;
+ switch (mysqlType) {
+ case NULL:
+ return BasicType.VOID_TYPE;
+ case BOOLEAN:
+ return BasicType.BOOLEAN_TYPE;
+ case BIT:
+ precision = (Integer) dataTypeProperties.get(TiDBDataTypeConvertor.PRECISION);
+ if (precision == 1) {
+ return BasicType.BOOLEAN_TYPE;
+ } else {
+ return PrimitiveByteArrayType.INSTANCE;
+ }
+ case TINYINT:
+ return BasicType.BYTE_TYPE;
+ case TINYINT_UNSIGNED:
+ case SMALLINT:
+ return BasicType.SHORT_TYPE;
+ case SMALLINT_UNSIGNED:
+ case INT:
+ case MEDIUMINT:
+ case MEDIUMINT_UNSIGNED:
+ case YEAR:
+ return BasicType.INT_TYPE;
+ case INT_UNSIGNED:
+ case BIGINT:
+ return BasicType.LONG_TYPE;
+ case FLOAT:
+ case FLOAT_UNSIGNED:
+ return BasicType.FLOAT_TYPE;
+ case DOUBLE:
+ case DOUBLE_UNSIGNED:
+ return BasicType.DOUBLE_TYPE;
+ case TIME:
+ return LocalTimeType.LOCAL_TIME_TYPE;
+ case DATE:
+ return LocalTimeType.LOCAL_DATE_TYPE;
+ case TIMESTAMP:
+ case DATETIME:
+ return LocalTimeType.LOCAL_DATE_TIME_TYPE;
+ // TODO: to confirm
+ case CHAR:
+ case VARCHAR:
+ case TINYTEXT:
+ case TEXT:
+ case MEDIUMTEXT:
+ case LONGTEXT:
+ case JSON:
+ case ENUM:
+ return BasicType.STRING_TYPE;
+ case BINARY:
+ case VARBINARY:
+ case TINYBLOB:
+ case BLOB:
+ case MEDIUMBLOB:
+ case LONGBLOB:
+ case GEOMETRY:
+ return PrimitiveByteArrayType.INSTANCE;
+ case BIGINT_UNSIGNED:
+ case DECIMAL:
+ case DECIMAL_UNSIGNED:
+ precision = MapUtils.getInteger(dataTypeProperties, PRECISION, DEFAULT_PRECISION);
+ scale = MapUtils.getInteger(dataTypeProperties, SCALE, DEFAULT_SCALE);
+ return new DecimalType(precision, scale);
+ // TODO: support 'SET' & 'YEAR' type
+ default:
+ throw DataTypeConvertException.convertToSeaTunnelDataTypeException(mysqlType);
+ }
+ }
+
+ @Override
+ public MysqlType toConnectorType(
+ SeaTunnelDataType> seaTunnelDataType, Map dataTypeProperties)
+ throws DataTypeConvertException {
+ SqlType sqlType = seaTunnelDataType.getSqlType();
+ // todo: verify
+ switch (sqlType) {
+ case MAP:
+ case ROW:
+ case STRING:
+ return MysqlType.VARCHAR;
+ case BOOLEAN:
+ return MysqlType.BOOLEAN;
+ case TINYINT:
+ return MysqlType.TINYINT;
+ case SMALLINT:
+ return MysqlType.SMALLINT;
+ case INT:
+ return MysqlType.INT;
+ case BIGINT:
+ return MysqlType.BIGINT;
+ case FLOAT:
+ return MysqlType.FLOAT;
+ case DOUBLE:
+ return MysqlType.DOUBLE;
+ case DECIMAL:
+ return MysqlType.DECIMAL;
+ case NULL:
+ return MysqlType.NULL;
+ case BYTES:
+ return MysqlType.BIT;
+ case DATE:
+ return MysqlType.DATE;
+ case TIME:
+ return MysqlType.TIME;
+ case TIMESTAMP:
+ return MysqlType.DATETIME;
+ default:
+ throw new JdbcConnectorException(
+ CommonErrorCode.UNSUPPORTED_DATA_TYPE,
+ String.format("Doesn't support TiDB type '%s' yet", sqlType));
+ }
+ }
+
+ @Override
+ public String getIdentity() {
+ return "TiDB";
+ }
+}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/utils/CatalogUtils.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/utils/CatalogUtils.java
new file mode 100644
index 00000000000..4b60f92d80a
--- /dev/null
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/utils/CatalogUtils.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.utils;
+
+import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.dialectenum.FieldIdeEnum;
+
+import org.apache.commons.lang3.StringUtils;
+
+public class CatalogUtils {
+ public static String getFieldIde(String identifier, String fieldIde) {
+ if (StringUtils.isBlank(fieldIde)) {
+ return identifier;
+ }
+ switch (FieldIdeEnum.valueOf(fieldIde.toUpperCase())) {
+ case LOWERCASE:
+ return identifier.toLowerCase();
+ case UPPERCASE:
+ return identifier.toUpperCase();
+ default:
+ return identifier;
+ }
+ }
+
+ public static String quoteIdentifier(String identifier, String fieldIde, String quote) {
+ if (identifier.contains(".")) {
+ String[] parts = identifier.split("\\.");
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < parts.length - 1; i++) {
+ sb.append(quote).append(parts[i]).append(quote).append(".");
+ }
+ return sb.append(quote)
+ .append(getFieldIde(parts[parts.length - 1], fieldIde))
+ .append(quote)
+ .toString();
+ }
+
+ return quote + getFieldIde(identifier, fieldIde) + quote;
+ }
+
+ public static String quoteIdentifier(String identifier, String fieldIde) {
+ return getFieldIde(identifier, fieldIde);
+ }
+
+ public static String quoteTableIdentifier(String identifier, String fieldIde) {
+ if (identifier.contains(".")) {
+ String[] parts = identifier.split("\\.");
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < parts.length - 1; i++) {
+ sb.append(parts[i]).append(".");
+ }
+ return sb.append(getFieldIde(parts[parts.length - 1], fieldIde)).toString();
+ }
+
+ return getFieldIde(identifier, fieldIde);
+ }
+}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/config/JdbcOptions.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/config/JdbcOptions.java
index 5d2254cd347..b01fc872f31 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/config/JdbcOptions.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/config/JdbcOptions.java
@@ -19,6 +19,7 @@
import org.apache.seatunnel.api.configuration.Option;
import org.apache.seatunnel.api.configuration.Options;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.dialectenum.FieldIdeEnum;
import java.math.BigDecimal;
import java.util.List;
@@ -154,4 +155,10 @@ public interface JdbcOptions {
.intType()
.noDefaultValue()
.withDescription("partition num");
+
+ Option FIELD_IDE =
+ Options.key("field_ide")
+ .enumType(FieldIdeEnum.class)
+ .noDefaultValue()
+ .withDescription("Whether case conversion is required");
}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/JdbcDialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/JdbcDialect.java
index 8a0b31a5eeb..e0cf5252a60 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/JdbcDialect.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/JdbcDialect.java
@@ -20,6 +20,9 @@
import org.apache.seatunnel.api.table.catalog.TablePath;
import org.apache.seatunnel.connectors.seatunnel.jdbc.config.JdbcSourceConfig;
import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.converter.JdbcRowConverter;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.dialectenum.FieldIdeEnum;
+
+import org.apache.commons.lang3.StringUtils;
import java.io.Serializable;
import java.sql.Connection;
@@ -68,9 +71,13 @@ default String hashModForField(String fieldName, int mod) {
default String quoteIdentifier(String identifier) {
return identifier;
}
+ /** Quotes the identifier for database name or field name */
+ default String quoteDatabaseIdentifier(String identifier) {
+ return identifier;
+ }
default String tableIdentifier(String database, String tableName) {
- return quoteIdentifier(database) + "." + quoteIdentifier(tableName);
+ return quoteDatabaseIdentifier(database) + "." + quoteIdentifier(tableName);
}
/**
@@ -219,4 +226,18 @@ default ResultSetMetaData getResultSetMetaData(
default String extractTableName(TablePath tablePath) {
return tablePath.getSchemaAndTableName();
}
+
+ default String getFieldIde(String identifier, String fieldIde) {
+ if (StringUtils.isEmpty(fieldIde)) {
+ return identifier;
+ }
+ switch (FieldIdeEnum.valueOf(fieldIde.toUpperCase())) {
+ case LOWERCASE:
+ return identifier.toLowerCase();
+ case UPPERCASE:
+ return identifier.toUpperCase();
+ default:
+ return identifier;
+ }
+ }
}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/JdbcDialectFactory.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/JdbcDialectFactory.java
index 3d66de65909..5439937f53d 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/JdbcDialectFactory.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/JdbcDialectFactory.java
@@ -44,7 +44,7 @@ public interface JdbcDialectFactory {
* @param compatibleMode The compatible mode
* @return a new instance of {@link JdbcDialect}
*/
- default JdbcDialect create(String compatibleMode) {
+ default JdbcDialect create(String compatibleMode, String fieldId) {
return create();
}
}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/JdbcDialectLoader.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/JdbcDialectLoader.java
index b49df35ff3f..350a22e20c6 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/JdbcDialectLoader.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/JdbcDialectLoader.java
@@ -36,6 +36,10 @@ public final class JdbcDialectLoader {
private JdbcDialectLoader() {}
+ public static JdbcDialect load(String url, String compatibleMode) {
+ return load(url, compatibleMode, "");
+ }
+
/**
* Loads the unique JDBC Dialect that can handle the given database url.
*
@@ -45,7 +49,7 @@ private JdbcDialectLoader() {}
* unambiguously process the given database URL.
* @return The loaded dialect.
*/
- public static JdbcDialect load(String url, String compatibleMode) {
+ public static JdbcDialect load(String url, String compatibleMode, String fieldIde) {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
List foundFactories = discoverFactories(cl);
@@ -90,7 +94,7 @@ public static JdbcDialect load(String url, String compatibleMode) {
.collect(Collectors.joining("\n"))));
}
- return matchingFactories.get(0).create(compatibleMode);
+ return matchingFactories.get(0).create(compatibleMode, fieldIde);
}
private static List discoverFactories(ClassLoader classLoader) {
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/dialectenum/FieldIdeEnum.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/dialectenum/FieldIdeEnum.java
new file mode 100644
index 00000000000..39f95210623
--- /dev/null
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/dialectenum/FieldIdeEnum.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.dialectenum;
+
+public enum FieldIdeEnum {
+ ORIGINAL("original"), // Original string form
+ UPPERCASE("uppercase"), // Convert to uppercase
+ LOWERCASE("lowercase"); // Convert to lowercase
+
+ private final String value;
+
+ FieldIdeEnum(String value) {
+ this.value = value;
+ }
+
+ public String getValue() {
+ return value;
+ }
+}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/kingbase/KingbaseDialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/kingbase/KingbaseDialect.java
new file mode 100644
index 00000000000..2f6d5661063
--- /dev/null
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/kingbase/KingbaseDialect.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.kingbase;
+
+import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.converter.JdbcRowConverter;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialect;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialectTypeMapper;
+
+import java.util.Arrays;
+import java.util.Optional;
+import java.util.stream.Collectors;
+
+public class KingbaseDialect implements JdbcDialect {
+
+ @Override
+ public String dialectName() {
+ return "Kingbase";
+ }
+
+ @Override
+ public JdbcRowConverter getRowConverter() {
+ return new KingbaseJdbcRowConverter();
+ }
+
+ @Override
+ public JdbcDialectTypeMapper getJdbcDialectTypeMapper() {
+ return new KingbaseTypeMapper();
+ }
+
+ @Override
+ public Optional getUpsertStatement(
+ String database, String tableName, String[] fieldNames, String[] uniqueKeyFields) {
+ String uniqueColumns =
+ Arrays.stream(uniqueKeyFields)
+ .map(this::quoteIdentifier)
+ .collect(Collectors.joining(", "));
+ String updateClause =
+ Arrays.stream(fieldNames)
+ .map(
+ fieldName ->
+ quoteIdentifier(fieldName)
+ + "=EXCLUDED."
+ + quoteIdentifier(fieldName))
+ .collect(Collectors.joining(", "));
+ String upsertSQL =
+ String.format(
+ "%s ON CONFLICT (%s) DO UPDATE SET %s",
+ getInsertIntoStatement(database, tableName, fieldNames),
+ uniqueColumns,
+ updateClause);
+ return Optional.of(upsertSQL);
+ }
+}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/kingbase/KingbaseDialectFactory.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/kingbase/KingbaseDialectFactory.java
new file mode 100644
index 00000000000..f9998610351
--- /dev/null
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/kingbase/KingbaseDialectFactory.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.kingbase;
+
+import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialect;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialectFactory;
+
+import com.google.auto.service.AutoService;
+
+/** Factory for {@link KingbaseDialect}. */
+@AutoService(JdbcDialectFactory.class)
+public class KingbaseDialectFactory implements JdbcDialectFactory {
+
+ @Override
+ public boolean acceptsURL(String url) {
+ return url.startsWith("jdbc:kingbase8:");
+ }
+
+ @Override
+ public JdbcDialect create() {
+ return new KingbaseDialect();
+ }
+}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/kingbase/KingbaseJdbcRowConverter.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/kingbase/KingbaseJdbcRowConverter.java
new file mode 100644
index 00000000000..9577e12f620
--- /dev/null
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/kingbase/KingbaseJdbcRowConverter.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.kingbase;
+
+import org.apache.seatunnel.api.table.type.SeaTunnelDataType;
+import org.apache.seatunnel.api.table.type.SeaTunnelRow;
+import org.apache.seatunnel.api.table.type.SeaTunnelRowType;
+import org.apache.seatunnel.common.exception.CommonErrorCode;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.exception.JdbcConnectorException;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.converter.AbstractJdbcRowConverter;
+
+import java.math.BigDecimal;
+import java.sql.Date;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.util.Optional;
+
+public class KingbaseJdbcRowConverter extends AbstractJdbcRowConverter {
+
+ @Override
+ public String converterName() {
+ return "KingBase";
+ }
+
+ @Override
+ @SuppressWarnings("checkstyle:Indentation")
+ public SeaTunnelRow toInternal(ResultSet rs, SeaTunnelRowType typeInfo) throws SQLException {
+ Object[] fields = new Object[typeInfo.getTotalFields()];
+ for (int fieldIndex = 0; fieldIndex < typeInfo.getTotalFields(); fieldIndex++) {
+ SeaTunnelDataType> seaTunnelDataType = typeInfo.getFieldType(fieldIndex);
+ int resultSetIndex = fieldIndex + 1;
+ switch (seaTunnelDataType.getSqlType()) {
+ case STRING:
+ fields[fieldIndex] = rs.getString(resultSetIndex);
+ break;
+ case BOOLEAN:
+ fields[fieldIndex] = rs.getBoolean(resultSetIndex);
+ break;
+ case TINYINT:
+ fields[fieldIndex] = rs.getByte(resultSetIndex);
+ break;
+ case SMALLINT:
+ fields[fieldIndex] = rs.getShort(resultSetIndex);
+ break;
+ case INT:
+ fields[fieldIndex] = rs.getInt(resultSetIndex);
+ break;
+ case BIGINT:
+ fields[fieldIndex] = rs.getLong(resultSetIndex);
+ break;
+ case FLOAT:
+ fields[fieldIndex] = rs.getFloat(resultSetIndex);
+ break;
+ case DOUBLE:
+ fields[fieldIndex] = rs.getDouble(resultSetIndex);
+ break;
+ case DECIMAL:
+ fields[fieldIndex] = rs.getBigDecimal(resultSetIndex);
+ break;
+ case DATE:
+ Date sqlDate = rs.getDate(resultSetIndex);
+ fields[fieldIndex] =
+ Optional.ofNullable(sqlDate).map(Date::toLocalDate).orElse(null);
+ break;
+ case TIME:
+ Time sqlTime = rs.getTime(resultSetIndex);
+ fields[fieldIndex] =
+ Optional.ofNullable(sqlTime).map(Time::toLocalTime).orElse(null);
+ break;
+ case TIMESTAMP:
+ Timestamp sqlTimestamp = rs.getTimestamp(resultSetIndex);
+ fields[fieldIndex] =
+ Optional.ofNullable(sqlTimestamp)
+ .map(Timestamp::toLocalDateTime)
+ .orElse(null);
+ break;
+ case BYTES:
+ fields[fieldIndex] = rs.getBytes(resultSetIndex);
+ break;
+ case NULL:
+ fields[fieldIndex] = null;
+ break;
+ case ROW:
+ case MAP:
+ case ARRAY:
+ default:
+ throw new JdbcConnectorException(
+ CommonErrorCode.UNSUPPORTED_DATA_TYPE,
+ "Unexpected value: " + seaTunnelDataType);
+ }
+ }
+ return new SeaTunnelRow(fields);
+ }
+
+ @Override
+ public PreparedStatement toExternal(
+ SeaTunnelRowType rowType, SeaTunnelRow row, PreparedStatement statement)
+ throws SQLException {
+ for (int fieldIndex = 0; fieldIndex < rowType.getTotalFields(); fieldIndex++) {
+ SeaTunnelDataType> seaTunnelDataType = rowType.getFieldType(fieldIndex);
+ int statementIndex = fieldIndex + 1;
+ Object fieldValue = row.getField(fieldIndex);
+ if (fieldValue == null) {
+ statement.setObject(statementIndex, null);
+ continue;
+ }
+
+ switch (seaTunnelDataType.getSqlType()) {
+ case STRING:
+ statement.setString(statementIndex, (String) row.getField(fieldIndex));
+ break;
+ case BOOLEAN:
+ statement.setBoolean(statementIndex, (Boolean) row.getField(fieldIndex));
+ break;
+ case TINYINT:
+ statement.setByte(statementIndex, (Byte) row.getField(fieldIndex));
+ break;
+ case SMALLINT:
+ statement.setShort(statementIndex, (Short) row.getField(fieldIndex));
+ break;
+ case INT:
+ statement.setInt(statementIndex, (Integer) row.getField(fieldIndex));
+ break;
+ case BIGINT:
+ statement.setLong(statementIndex, (Long) row.getField(fieldIndex));
+ break;
+ case FLOAT:
+ statement.setFloat(statementIndex, (Float) row.getField(fieldIndex));
+ break;
+ case DOUBLE:
+ statement.setDouble(statementIndex, (Double) row.getField(fieldIndex));
+ break;
+ case DECIMAL:
+ statement.setBigDecimal(statementIndex, (BigDecimal) row.getField(fieldIndex));
+ break;
+ case DATE:
+ LocalDate localDate = (LocalDate) row.getField(fieldIndex);
+ statement.setDate(statementIndex, java.sql.Date.valueOf(localDate));
+ break;
+ case TIME:
+ LocalTime localTime = (LocalTime) row.getField(fieldIndex);
+ statement.setTime(statementIndex, java.sql.Time.valueOf(localTime));
+ break;
+ case TIMESTAMP:
+ LocalDateTime localDateTime = (LocalDateTime) row.getField(fieldIndex);
+ statement.setTimestamp(
+ statementIndex, java.sql.Timestamp.valueOf(localDateTime));
+ break;
+ case BYTES:
+ statement.setBytes(statementIndex, (byte[]) row.getField(fieldIndex));
+ break;
+ case NULL:
+ statement.setNull(statementIndex, java.sql.Types.NULL);
+ break;
+ case ROW:
+ case MAP:
+ case ARRAY:
+ default:
+ throw new JdbcConnectorException(
+ CommonErrorCode.UNSUPPORTED_DATA_TYPE,
+ "Unexpected value: " + seaTunnelDataType);
+ }
+ }
+ return statement;
+ }
+}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/kingbase/KingbaseTypeMapper.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/kingbase/KingbaseTypeMapper.java
new file mode 100644
index 00000000000..439c8fc4202
--- /dev/null
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/kingbase/KingbaseTypeMapper.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.kingbase;
+
+import org.apache.seatunnel.api.table.type.BasicType;
+import org.apache.seatunnel.api.table.type.DecimalType;
+import org.apache.seatunnel.api.table.type.LocalTimeType;
+import org.apache.seatunnel.api.table.type.SeaTunnelDataType;
+import org.apache.seatunnel.common.exception.CommonErrorCode;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.exception.JdbcConnectorException;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialectTypeMapper;
+
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+
+public class KingbaseTypeMapper implements JdbcDialectTypeMapper {
+
+ private static final String KB_SMALLSERIAL = "SMALLSERIAL";
+ private static final String KB_SERIAL = "SERIAL";
+ private static final String KB_BIGSERIAL = "BIGSERIAL";
+ private static final String KB_BYTEA = "BYTEA";
+ private static final String KB_BYTEA_ARRAY = "_BYTEA";
+ private static final String KB_SMALLINT = "INT2";
+ private static final String KB_SMALLINT_ARRAY = "_INT2";
+ private static final String KB_INTEGER = "INT4";
+ private static final String KB_INTEGER_ARRAY = "_INT4";
+ private static final String KB_BIGINT = "INT8";
+ private static final String KB_BIGINT_ARRAY = "_INT8";
+ private static final String KB_REAL = "FLOAT4";
+ private static final String KB_REAL_ARRAY = "_FLOAT4";
+ private static final String KB_DOUBLE_PRECISION = "FLOAT8";
+ private static final String KB_DOUBLE_PRECISION_ARRAY = "_FLOAT8";
+ private static final String KB_NUMERIC = "NUMERIC";
+ private static final String KB_NUMERIC_ARRAY = "_NUMERIC";
+ private static final String KB_BOOLEAN = "BOOL";
+ private static final String KB_BOOLEAN_ARRAY = "_BOOL";
+ private static final String KB_TIMESTAMP = "TIMESTAMP";
+ private static final String KB_TIMESTAMP_ARRAY = "_TIMESTAMP";
+ private static final String KB_TIMESTAMPTZ = "TIMESTAMPTZ";
+ private static final String KB_TIMESTAMPTZ_ARRAY = "_TIMESTAMPTZ";
+ private static final String KB_DATE = "DATE";
+ private static final String KB_DATE_ARRAY = "_DATE";
+ private static final String KB_TIME = "TIME";
+ private static final String KB_TIME_ARRAY = "_TIME";
+ private static final String KB_TEXT = "TEXT";
+ private static final String KB_TEXT_ARRAY = "_TEXT";
+ private static final String KB_CHAR = "BPCHAR";
+ private static final String KB_CHAR_ARRAY = "_BPCHAR";
+ private static final String KB_CHARACTER = "CHARACTER";
+
+ private static final String KB_CHARACTER_VARYING = "VARCHAR";
+ private static final String KB_CHARACTER_VARYING_ARRAY = "_VARCHAR";
+ private static final String KB_JSON = "JSON";
+ private static final String KB_JSONB = "JSONB";
+
+ @SuppressWarnings("checkstyle:MagicNumber")
+ @Override
+ public SeaTunnelDataType> mapping(ResultSetMetaData metadata, int colIndex)
+ throws SQLException {
+
+ String kbType = metadata.getColumnTypeName(colIndex).toUpperCase();
+
+ int precision = metadata.getPrecision(colIndex);
+
+ switch (kbType) {
+ case KB_BOOLEAN:
+ return BasicType.BOOLEAN_TYPE;
+ case KB_SMALLINT:
+ return BasicType.SHORT_TYPE;
+ case KB_SMALLSERIAL:
+ case KB_INTEGER:
+ case KB_SERIAL:
+ return BasicType.INT_TYPE;
+ case KB_BIGINT:
+ case KB_BIGSERIAL:
+ return BasicType.LONG_TYPE;
+ case KB_REAL:
+ return BasicType.FLOAT_TYPE;
+ case KB_DOUBLE_PRECISION:
+ return BasicType.DOUBLE_TYPE;
+ case KB_NUMERIC:
+ // see SPARK-26538: handle numeric without explicit precision and scale.
+ if (precision > 0) {
+ return new DecimalType(precision, metadata.getScale(colIndex));
+ }
+ return new DecimalType(38, 18);
+ case KB_CHAR:
+ case KB_CHARACTER:
+ case KB_CHARACTER_VARYING:
+ case KB_TEXT:
+ return BasicType.STRING_TYPE;
+ case KB_TIMESTAMP:
+ return LocalTimeType.LOCAL_DATE_TIME_TYPE;
+ case KB_TIME:
+ return LocalTimeType.LOCAL_TIME_TYPE;
+ case KB_DATE:
+ return LocalTimeType.LOCAL_DATE_TYPE;
+ case KB_CHAR_ARRAY:
+ case KB_CHARACTER_VARYING_ARRAY:
+ case KB_TEXT_ARRAY:
+ case KB_DOUBLE_PRECISION_ARRAY:
+ case KB_REAL_ARRAY:
+ case KB_BIGINT_ARRAY:
+ case KB_SMALLINT_ARRAY:
+ case KB_INTEGER_ARRAY:
+ case KB_BYTEA_ARRAY:
+ case KB_BOOLEAN_ARRAY:
+ case KB_TIMESTAMP_ARRAY:
+ case KB_NUMERIC_ARRAY:
+ case KB_TIMESTAMPTZ:
+ case KB_TIMESTAMPTZ_ARRAY:
+ case KB_TIME_ARRAY:
+ case KB_DATE_ARRAY:
+ case KB_JSONB:
+ case KB_JSON:
+ case KB_BYTEA:
+ default:
+ throw new JdbcConnectorException(
+ CommonErrorCode.UNSUPPORTED_OPERATION,
+ String.format("Doesn't support KingBaseES type '%s' yet", kbType));
+ }
+ }
+}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/mysql/MySqlDialectFactory.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/mysql/MySqlDialectFactory.java
index 10047311b93..a4f89a4dc85 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/mysql/MySqlDialectFactory.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/mysql/MySqlDialectFactory.java
@@ -22,6 +22,8 @@
import com.google.auto.service.AutoService;
+import javax.annotation.Nonnull;
+
/** Factory for {@link MysqlDialect}. */
@AutoService(JdbcDialectFactory.class)
public class MySqlDialectFactory implements JdbcDialectFactory {
@@ -34,4 +36,9 @@ public boolean acceptsURL(String url) {
public JdbcDialect create() {
return new MysqlDialect();
}
+
+ @Override
+ public JdbcDialect create(@Nonnull String compatibleMode, String fieldIde) {
+ return new MysqlDialect(fieldIde);
+ }
}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/mysql/MysqlDialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/mysql/MysqlDialect.java
index c71dc3f76a1..1ae69a6131f 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/mysql/MysqlDialect.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/mysql/MysqlDialect.java
@@ -21,6 +21,7 @@
import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.converter.JdbcRowConverter;
import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialect;
import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialectTypeMapper;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.dialectenum.FieldIdeEnum;
import java.sql.Connection;
import java.sql.PreparedStatement;
@@ -31,6 +32,14 @@
import java.util.stream.Collectors;
public class MysqlDialect implements JdbcDialect {
+ public String fieldIde = FieldIdeEnum.ORIGINAL.getValue();
+
+ public MysqlDialect() {}
+
+ public MysqlDialect(String fieldIde) {
+ this.fieldIde = fieldIde;
+ }
+
@Override
public String dialectName() {
return "MySQL";
@@ -48,6 +57,11 @@ public JdbcDialectTypeMapper getJdbcDialectTypeMapper() {
@Override
public String quoteIdentifier(String identifier) {
+ return "`" + getFieldIde(identifier, fieldIde) + "`";
+ }
+
+ @Override
+ public String quoteDatabaseIdentifier(String identifier) {
return "`" + identifier + "`";
}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oceanbase/OceanBaseDialectFactory.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oceanbase/OceanBaseDialectFactory.java
index 66df84205ed..b3a456870cc 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oceanbase/OceanBaseDialectFactory.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oceanbase/OceanBaseDialectFactory.java
@@ -40,7 +40,7 @@ public JdbcDialect create() {
}
@Override
- public JdbcDialect create(@Nonnull String compatibleMode) {
+ public JdbcDialect create(@Nonnull String compatibleMode, String fieldIde) {
if ("oracle".equalsIgnoreCase(compatibleMode)) {
return new OracleDialect();
}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oracle/OracleDialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oracle/OracleDialect.java
index 7edd935e780..e8e583dc143 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oracle/OracleDialect.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oracle/OracleDialect.java
@@ -20,6 +20,7 @@
import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.converter.JdbcRowConverter;
import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialect;
import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialectTypeMapper;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.dialectenum.FieldIdeEnum;
import java.sql.Connection;
import java.sql.PreparedStatement;
@@ -33,6 +34,13 @@
public class OracleDialect implements JdbcDialect {
private static final int DEFAULT_ORACLE_FETCH_SIZE = 128;
+ public String fieldIde = FieldIdeEnum.ORIGINAL.getValue();
+
+ public OracleDialect(String fieldIde) {
+ this.fieldIde = fieldIde;
+ }
+
+ public OracleDialect() {}
@Override
public String dialectName() {
@@ -56,7 +64,18 @@ public JdbcDialectTypeMapper getJdbcDialectTypeMapper() {
@Override
public String quoteIdentifier(String identifier) {
- return identifier;
+ if (identifier.contains(".")) {
+ String[] parts = identifier.split("\\.");
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < parts.length - 1; i++) {
+ sb.append("\"").append(parts[i]).append("\"").append(".");
+ }
+ return sb.append("\"")
+ .append(getFieldIde(parts[parts.length - 1], fieldIde))
+ .append("\"")
+ .toString();
+ }
+ return "\"" + getFieldIde(identifier, fieldIde) + "\"";
}
@Override
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oracle/OracleDialectFactory.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oracle/OracleDialectFactory.java
index 168dc4d8902..121098c4614 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oracle/OracleDialectFactory.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/oracle/OracleDialectFactory.java
@@ -22,6 +22,8 @@
import com.google.auto.service.AutoService;
+import javax.annotation.Nonnull;
+
/** Factory for {@link OracleDialect}. */
@AutoService(JdbcDialectFactory.class)
public class OracleDialectFactory implements JdbcDialectFactory {
@@ -34,4 +36,9 @@ public boolean acceptsURL(String url) {
public JdbcDialect create() {
return new OracleDialect();
}
+
+ @Override
+ public JdbcDialect create(@Nonnull String compatibleMode, String fieldIde) {
+ return new OracleDialect(fieldIde);
+ }
}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresDialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresDialect.java
index b36a28a5a60..f206589af59 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresDialect.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresDialect.java
@@ -20,6 +20,7 @@
import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.converter.JdbcRowConverter;
import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialect;
import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialectTypeMapper;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.dialectenum.FieldIdeEnum;
import java.sql.Connection;
import java.sql.PreparedStatement;
@@ -33,6 +34,14 @@ public class PostgresDialect implements JdbcDialect {
public static final int DEFAULT_POSTGRES_FETCH_SIZE = 128;
+ public String fieldIde = FieldIdeEnum.ORIGINAL.getValue();
+
+ public PostgresDialect() {}
+
+ public PostgresDialect(String fieldIde) {
+ this.fieldIde = fieldIde;
+ }
+
@Override
public String dialectName() {
return "PostgreSQL";
@@ -88,4 +97,32 @@ public PreparedStatement creatPreparedStatement(
}
return statement;
}
+
+ @Override
+ public String tableIdentifier(String database, String tableName) {
+ // resolve pg database name upper or lower not recognised
+ return quoteDatabaseIdentifier(database) + "." + quoteIdentifier(tableName);
+ }
+
+ @Override
+ public String quoteIdentifier(String identifier) {
+ if (identifier.contains(".")) {
+ String[] parts = identifier.split("\\.");
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < parts.length - 1; i++) {
+ sb.append("\"").append(parts[i]).append("\"").append(".");
+ }
+ return sb.append("\"")
+ .append(getFieldIde(parts[parts.length - 1], fieldIde))
+ .append("\"")
+ .toString();
+ }
+
+ return "\"" + getFieldIde(identifier, fieldIde) + "\"";
+ }
+
+ @Override
+ public String quoteDatabaseIdentifier(String identifier) {
+ return "\"" + identifier + "\"";
+ }
}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresDialectFactory.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresDialectFactory.java
index 857c85290df..59dc0b45c68 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresDialectFactory.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psql/PostgresDialectFactory.java
@@ -39,10 +39,10 @@ public JdbcDialect create() {
}
@Override
- public JdbcDialect create(@Nonnull String compatibleMode) {
+ public JdbcDialect create(@Nonnull String compatibleMode, String fieldIde) {
if ("postgresLow".equalsIgnoreCase(compatibleMode)) {
- return new PostgresLowDialect();
+ return new PostgresLowDialect(fieldIde);
}
- return new PostgresDialect();
+ return new PostgresDialect(fieldIde);
}
}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psqllow/PostgresLowDialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psqllow/PostgresLowDialect.java
index e367207ffa2..9100382628d 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psqllow/PostgresLowDialect.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/psqllow/PostgresLowDialect.java
@@ -22,6 +22,11 @@
import java.util.Optional;
public class PostgresLowDialect extends PostgresDialect {
+
+ public PostgresLowDialect(String fieldIde) {
+ this.fieldIde = fieldIde;
+ }
+
@Override
public Optional getUpsertStatement(
String database, String tableName, String[] fieldNames, String[] uniqueKeyFields) {
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/sqlserver/SqlServerDialect.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/sqlserver/SqlServerDialect.java
index 2121369e22a..792c03bd760 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/sqlserver/SqlServerDialect.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/sqlserver/SqlServerDialect.java
@@ -20,6 +20,7 @@
import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.converter.JdbcRowConverter;
import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialect;
import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialectTypeMapper;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.dialectenum.FieldIdeEnum;
import java.util.Arrays;
import java.util.List;
@@ -27,6 +28,15 @@
import java.util.stream.Collectors;
public class SqlServerDialect implements JdbcDialect {
+
+ public String fieldIde = FieldIdeEnum.ORIGINAL.getValue();
+
+ public SqlServerDialect() {}
+
+ public SqlServerDialect(String fieldIde) {
+ this.fieldIde = fieldIde;
+ }
+
@Override
public String dialectName() {
return "Sqlserver";
@@ -105,4 +115,26 @@ public Optional getUpsertStatement(
return Optional.of(upsertSQL);
}
+
+ @Override
+ public String quoteIdentifier(String identifier) {
+ if (identifier.contains(".")) {
+ String[] parts = identifier.split("\\.");
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < parts.length - 1; i++) {
+ sb.append("[").append(parts[i]).append("]").append(".");
+ }
+ return sb.append("[")
+ .append(getFieldIde(parts[parts.length - 1], fieldIde))
+ .append("]")
+ .toString();
+ }
+
+ return "[" + getFieldIde(identifier, fieldIde) + "]";
+ }
+
+ @Override
+ public String quoteDatabaseIdentifier(String identifier) {
+ return "[" + identifier + "]";
+ }
}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/sqlserver/SqlServerDialectFactory.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/sqlserver/SqlServerDialectFactory.java
index d8fce3c43c1..d7dae4efd57 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/sqlserver/SqlServerDialectFactory.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/sqlserver/SqlServerDialectFactory.java
@@ -22,6 +22,8 @@
import com.google.auto.service.AutoService;
+import javax.annotation.Nonnull;
+
/** Factory for {@link SqlServerDialect}. */
@AutoService(JdbcDialectFactory.class)
public class SqlServerDialectFactory implements JdbcDialectFactory {
@@ -34,4 +36,9 @@ public boolean acceptsURL(String url) {
public JdbcDialect create() {
return new SqlServerDialect();
}
+
+ @Override
+ public JdbcDialect create(@Nonnull String compatibleMode, String fieldIde) {
+ return new SqlServerDialect(fieldIde);
+ }
}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/sink/JdbcSink.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/sink/JdbcSink.java
index c23619b5aad..bbb776e486a 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/sink/JdbcSink.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/sink/JdbcSink.java
@@ -38,10 +38,13 @@
import org.apache.seatunnel.api.table.type.SeaTunnelDataType;
import org.apache.seatunnel.api.table.type.SeaTunnelRow;
import org.apache.seatunnel.api.table.type.SeaTunnelRowType;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.utils.CatalogUtils;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.config.JdbcOptions;
import org.apache.seatunnel.connectors.seatunnel.jdbc.config.JdbcSinkConfig;
import org.apache.seatunnel.connectors.seatunnel.jdbc.exception.JdbcConnectorException;
import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialect;
import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialectLoader;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.dialectenum.FieldIdeEnum;
import org.apache.seatunnel.connectors.seatunnel.jdbc.state.JdbcAggregatedCommitInfo;
import org.apache.seatunnel.connectors.seatunnel.jdbc.state.JdbcSinkState;
import org.apache.seatunnel.connectors.seatunnel.jdbc.state.XidInfo;
@@ -107,7 +110,10 @@ public void prepare(Config pluginConfig) throws PrepareFailException {
this.dialect =
JdbcDialectLoader.load(
jdbcSinkConfig.getJdbcConnectionConfig().getUrl(),
- jdbcSinkConfig.getJdbcConnectionConfig().getCompatibleMode());
+ jdbcSinkConfig.getJdbcConnectionConfig().getCompatibleMode(),
+ config.get(JdbcOptions.FIELD_IDE) == null
+ ? null
+ : config.get(JdbcOptions.FIELD_IDE).getValue());
this.dataSaveMode = DataSaveMode.KEEP_SCHEMA_AND_DATA;
}
@@ -206,14 +212,21 @@ public void handleSaveMode(DataSaveMode saveMode) {
catalogFactory.factoryIdentifier(),
ReadonlyConfig.fromMap(new HashMap<>(catalogOptions)))) {
catalog.open();
+ FieldIdeEnum fieldIdeEnumEnum = config.get(JdbcOptions.FIELD_IDE);
+ String fieldIde =
+ fieldIdeEnumEnum == null
+ ? FieldIdeEnum.ORIGINAL.getValue()
+ : fieldIdeEnumEnum.getValue();
TablePath tablePath =
TablePath.of(
jdbcSinkConfig.getDatabase()
+ "."
- + jdbcSinkConfig.getTable());
+ + CatalogUtils.quoteTableIdentifier(
+ jdbcSinkConfig.getTable(), fieldIde));
if (!catalog.databaseExists(jdbcSinkConfig.getDatabase())) {
catalog.createDatabase(tablePath, true);
}
+ catalogTable.getOptions().put("fieldIde", fieldIde);
if (!catalog.tableExists(tablePath)) {
catalog.createTable(tablePath, catalogTable, true);
}
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/sink/JdbcSinkFactory.java b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/sink/JdbcSinkFactory.java
index 8209533f9d5..d18ff0d7fdb 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/sink/JdbcSinkFactory.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/main/java/org/apache/seatunnel/connectors/seatunnel/jdbc/sink/JdbcSinkFactory.java
@@ -30,9 +30,11 @@
import org.apache.seatunnel.api.table.factory.TableFactoryContext;
import org.apache.seatunnel.api.table.factory.TableSinkFactory;
import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.JdbcCatalogOptions;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.config.JdbcOptions;
import org.apache.seatunnel.connectors.seatunnel.jdbc.config.JdbcSinkConfig;
import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialect;
import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.JdbcDialectLoader;
+import org.apache.seatunnel.connectors.seatunnel.jdbc.internal.dialect.dialectenum.FieldIdeEnum;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
@@ -142,10 +144,12 @@ public TableSink createSink(TableFactoryContext context) {
}
final ReadonlyConfig options = config;
JdbcSinkConfig sinkConfig = JdbcSinkConfig.of(config);
+ FieldIdeEnum fieldIdeEnum = config.get(JdbcOptions.FIELD_IDE);
JdbcDialect dialect =
JdbcDialectLoader.load(
sinkConfig.getJdbcConnectionConfig().getUrl(),
- sinkConfig.getJdbcConnectionConfig().getCompatibleMode());
+ sinkConfig.getJdbcConnectionConfig().getCompatibleMode(),
+ fieldIdeEnum == null ? null : fieldIdeEnum.getValue());
CatalogTable finalCatalogTable = catalogTable;
return () ->
new JdbcSink(
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/sql/MysqlCreateTableSqlBuilderTest.java b/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/sql/MysqlCreateTableSqlBuilderTest.java
index b75ac68223b..04e00f1de1a 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/sql/MysqlCreateTableSqlBuilderTest.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/sql/MysqlCreateTableSqlBuilderTest.java
@@ -93,12 +93,12 @@ public void testBuild() {
MysqlCreateTableSqlBuilder.builder(tablePath, catalogTable).build("mysql");
// create table sql is change; The old unit tests are no longer applicable
String expect =
- "CREATE TABLE test_table (\n"
- + "\tid null NOT NULL COMMENT 'id', \n"
- + "\tname null NOT NULL COMMENT 'name', \n"
- + "\tage null NULL COMMENT 'age', \n"
- + "\tcreateTime null NULL COMMENT 'createTime', \n"
- + "\tlastUpdateTime null NULL COMMENT 'lastUpdateTime', \n"
+ "CREATE TABLE `test_table` (\n"
+ + "\t`id` null NOT NULL COMMENT 'id', \n"
+ + "\t`name` null NOT NULL COMMENT 'name', \n"
+ + "\t`age` null NULL COMMENT 'age', \n"
+ + "\t`createTime` null NULL COMMENT 'createTime', \n"
+ + "\t`lastUpdateTime` null NULL COMMENT 'lastUpdateTime', \n"
+ "\tPRIMARY KEY (`id`)\n"
+ ") COMMENT = 'User table';";
CONSOLE.println(expect);
diff --git a/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/PostgresDialectFactoryTest.java b/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/PostgresDialectFactoryTest.java
index 79b1f11ac93..90b980a69e1 100644
--- a/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/PostgresDialectFactoryTest.java
+++ b/seatunnel-connectors-v2/connector-jdbc/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/internal/dialect/PostgresDialectFactoryTest.java
@@ -30,7 +30,7 @@ public class PostgresDialectFactoryTest {
@Test
public void testPostgresDialectCreate() {
PostgresDialectFactory postgresDialectFactory = new PostgresDialectFactory();
- JdbcDialect postgresLow = postgresDialectFactory.create("postgresLow");
+ JdbcDialect postgresLow = postgresDialectFactory.create("postgresLow", "");
String[] fields = {"id", "name", "age"};
String[] uniqueKeyField = {"id"};
Optional upsertStatement =
diff --git a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/config/StarRocksSinkOptions.java b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/config/StarRocksSinkOptions.java
index eed2afc3605..1129d447162 100644
--- a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/config/StarRocksSinkOptions.java
+++ b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/config/StarRocksSinkOptions.java
@@ -60,6 +60,7 @@ public interface StarRocksSinkOptions {
.stringType()
.defaultValue(
"CREATE TABLE IF NOT EXISTS `${database}`.`${table_name}` (\n"
+ + "${rowtype_primary_key},\n"
+ "${rowtype_fields}\n"
+ ") ENGINE=OLAP\n"
+ " PRIMARY KEY (${rowtype_primary_key})\n"
diff --git a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/sink/StarRocksSaveModeUtil.java b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/sink/StarRocksSaveModeUtil.java
index cb0d086859b..bbbc04eb20e 100644
--- a/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/sink/StarRocksSaveModeUtil.java
+++ b/seatunnel-connectors-v2/connector-starrocks/src/main/java/org/apache/seatunnel/connectors/seatunnel/starrocks/sink/StarRocksSaveModeUtil.java
@@ -27,6 +27,8 @@
import org.apache.commons.lang3.StringUtils;
+import java.util.Comparator;
+import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
@@ -82,8 +84,14 @@ private static String mergeColumnInTemplate(
Map columnMap =
tableSchema.getColumns().stream()
.collect(Collectors.toMap(Column::getName, Function.identity()));
- for (String col : columnInTemplate.keySet()) {
- CreateTableParser.ColumnInfo columnInfo = columnInTemplate.get(col);
+ List columnInfosInSeq =
+ columnInTemplate.values().stream()
+ .sorted(
+ Comparator.comparingInt(
+ CreateTableParser.ColumnInfo::getStartIndex))
+ .collect(Collectors.toList());
+ for (CreateTableParser.ColumnInfo columnInfo : columnInfosInSeq) {
+ String col = columnInfo.getName();
if (StringUtils.isEmpty(columnInfo.getInfo())) {
if (columnMap.containsKey(col)) {
Column column = columnMap.get(col);
diff --git a/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/StarRocksCreateTableTest.java b/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/StarRocksCreateTableTest.java
index 22536ffd684..b571deb68ad 100644
--- a/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/StarRocksCreateTableTest.java
+++ b/seatunnel-connectors-v2/connector-starrocks/src/test/java/org/apache/seatunnel/connectors/seatunnel/starrocks/StarRocksCreateTableTest.java
@@ -22,8 +22,11 @@
import org.apache.seatunnel.api.table.catalog.PrimaryKey;
import org.apache.seatunnel.api.table.catalog.TableSchema;
import org.apache.seatunnel.api.table.type.BasicType;
+import org.apache.seatunnel.api.table.type.DecimalType;
+import org.apache.seatunnel.api.table.type.LocalTimeType;
import org.apache.seatunnel.connectors.seatunnel.starrocks.sink.StarRocksSaveModeUtil;
+import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.ArrayList;
@@ -70,4 +73,83 @@ public void test() {
System.out.println(result);
}
+
+ @Test
+ public void testInSeq() {
+
+ List columns = new ArrayList<>();
+
+ columns.add(PhysicalColumn.of("L_ORDERKEY", BasicType.INT_TYPE, null, false, null, ""));
+ columns.add(PhysicalColumn.of("L_PARTKEY", BasicType.INT_TYPE, null, false, null, ""));
+ columns.add(PhysicalColumn.of("L_SUPPKEY", BasicType.INT_TYPE, null, false, null, ""));
+ columns.add(PhysicalColumn.of("L_LINENUMBER", BasicType.INT_TYPE, null, false, null, ""));
+ columns.add(PhysicalColumn.of("L_QUANTITY", new DecimalType(15, 2), null, false, null, ""));
+ columns.add(
+ PhysicalColumn.of(
+ "L_EXTENDEDPRICE", new DecimalType(15, 2), null, false, null, ""));
+ columns.add(PhysicalColumn.of("L_DISCOUNT", new DecimalType(15, 2), null, false, null, ""));
+ columns.add(PhysicalColumn.of("L_TAX", new DecimalType(15, 2), null, false, null, ""));
+ columns.add(
+ PhysicalColumn.of("L_RETURNFLAG", BasicType.STRING_TYPE, null, false, null, ""));
+ columns.add(
+ PhysicalColumn.of("L_LINESTATUS", BasicType.STRING_TYPE, null, false, null, ""));
+ columns.add(
+ PhysicalColumn.of(
+ "L_SHIPDATE", LocalTimeType.LOCAL_DATE_TYPE, null, false, null, ""));
+ columns.add(
+ PhysicalColumn.of(
+ "L_COMMITDATE", LocalTimeType.LOCAL_DATE_TYPE, null, false, null, ""));
+ columns.add(
+ PhysicalColumn.of(
+ "L_RECEIPTDATE", LocalTimeType.LOCAL_DATE_TYPE, null, false, null, ""));
+ columns.add(
+ PhysicalColumn.of("L_SHIPINSTRUCT", BasicType.STRING_TYPE, null, false, null, ""));
+ columns.add(PhysicalColumn.of("L_SHIPMODE", BasicType.STRING_TYPE, null, false, null, ""));
+ columns.add(PhysicalColumn.of("L_COMMENT", BasicType.STRING_TYPE, null, false, null, ""));
+
+ String result =
+ StarRocksSaveModeUtil.fillingCreateSql(
+ "CREATE TABLE IF NOT EXISTS `${database}`.`${table_name}` (\n"
+ + "`L_COMMITDATE`,\n"
+ + "${rowtype_primary_key},\n"
+ + "L_SUPPKEY BIGINT NOT NULL,\n"
+ + "${rowtype_fields}\n"
+ + ") ENGINE=OLAP\n"
+ + " PRIMARY KEY (L_COMMITDATE, ${rowtype_primary_key}, L_SUPPKEY)\n"
+ + "DISTRIBUTED BY HASH (${rowtype_primary_key})"
+ + "PROPERTIES (\n"
+ + " \"replication_num\" = \"1\" \n"
+ + ")",
+ "tpch",
+ "lineitem",
+ TableSchema.builder()
+ .primaryKey(
+ PrimaryKey.of(
+ "", Arrays.asList("L_ORDERKEY", "L_LINENUMBER")))
+ .columns(columns)
+ .build());
+ String expected =
+ "CREATE TABLE IF NOT EXISTS `tpch`.`lineitem` (\n"
+ + "`L_COMMITDATE` DATE NOT NULL ,\n"
+ + "`L_ORDERKEY` INT NOT NULL ,`L_LINENUMBER` INT NOT NULL ,\n"
+ + "L_SUPPKEY BIGINT NOT NULL,\n"
+ + "`L_PARTKEY` INT NOT NULL ,\n"
+ + "`L_QUANTITY` Decimal(15, 2) NOT NULL ,\n"
+ + "`L_EXTENDEDPRICE` Decimal(15, 2) NOT NULL ,\n"
+ + "`L_DISCOUNT` Decimal(15, 2) NOT NULL ,\n"
+ + "`L_TAX` Decimal(15, 2) NOT NULL ,\n"
+ + "`L_RETURNFLAG` STRING NOT NULL ,\n"
+ + "`L_LINESTATUS` STRING NOT NULL ,\n"
+ + "`L_SHIPDATE` DATE NOT NULL ,\n"
+ + "`L_RECEIPTDATE` DATE NOT NULL ,\n"
+ + "`L_SHIPINSTRUCT` STRING NOT NULL ,\n"
+ + "`L_SHIPMODE` STRING NOT NULL ,\n"
+ + "`L_COMMENT` STRING NOT NULL \n"
+ + ") ENGINE=OLAP\n"
+ + " PRIMARY KEY (L_COMMITDATE, `L_ORDERKEY`,`L_LINENUMBER`, L_SUPPKEY)\n"
+ + "DISTRIBUTED BY HASH (`L_ORDERKEY`,`L_LINENUMBER`)PROPERTIES (\n"
+ + " \"replication_num\" = \"1\" \n"
+ + ")";
+ Assertions.assertEquals(result, expected);
+ }
}
diff --git a/seatunnel-core/seatunnel-core-starter/src/main/java/org/apache/seatunnel/core/starter/flowcontrol/FlowControlGate.java b/seatunnel-core/seatunnel-core-starter/src/main/java/org/apache/seatunnel/core/starter/flowcontrol/FlowControlGate.java
new file mode 100644
index 00000000000..10b23013290
--- /dev/null
+++ b/seatunnel-core/seatunnel-core-starter/src/main/java/org/apache/seatunnel/core/starter/flowcontrol/FlowControlGate.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.seatunnel.core.starter.flowcontrol;
+
+import org.apache.seatunnel.shade.com.google.common.util.concurrent.RateLimiter;
+
+import org.apache.seatunnel.api.table.type.SeaTunnelRow;
+
+public class FlowControlGate {
+
+ private final RateLimiter bytesRateLimiter;
+ private final RateLimiter countRateLimiter;
+
+ private FlowControlGate(FlowControlStrategy flowControlStrategy) {
+ this.bytesRateLimiter = RateLimiter.create(flowControlStrategy.getBytesPerSecond());
+ this.countRateLimiter = RateLimiter.create(flowControlStrategy.getCountPreSecond());
+ }
+
+ public void audit(SeaTunnelRow row) {
+ bytesRateLimiter.acquire(row.getBytesSize());
+ countRateLimiter.acquire();
+ }
+
+ public static FlowControlGate create(FlowControlStrategy flowControlStrategy) {
+ return new FlowControlGate(flowControlStrategy);
+ }
+}
diff --git a/seatunnel-core/seatunnel-core-starter/src/main/java/org/apache/seatunnel/core/starter/flowcontrol/FlowControlStrategy.java b/seatunnel-core/seatunnel-core-starter/src/main/java/org/apache/seatunnel/core/starter/flowcontrol/FlowControlStrategy.java
new file mode 100644
index 00000000000..2547d7061e2
--- /dev/null
+++ b/seatunnel-core/seatunnel-core-starter/src/main/java/org/apache/seatunnel/core/starter/flowcontrol/FlowControlStrategy.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.seatunnel.core.starter.flowcontrol;
+
+import lombok.Getter;
+import lombok.Setter;
+
+@Getter
+@Setter
+public class FlowControlStrategy {
+
+ int bytesPerSecond;
+ int countPreSecond;
+
+ public FlowControlStrategy(int bytesPerSecond, int countPreSecond) {
+ if (bytesPerSecond <= 0 || countPreSecond <= 0) {
+ throw new IllegalArgumentException(
+ "bytesPerSecond and countPreSecond must be positive");
+ }
+ this.bytesPerSecond = bytesPerSecond;
+ this.countPreSecond = countPreSecond;
+ }
+
+ public static FlowControlStrategy of(int bytesPerSecond, int countPreSecond) {
+ return new FlowControlStrategy(bytesPerSecond, countPreSecond);
+ }
+
+ public static FlowControlStrategy ofBytes(int bytesPerSecond) {
+ return new FlowControlStrategy(bytesPerSecond, Integer.MAX_VALUE);
+ }
+
+ public static FlowControlStrategy ofCount(int countPreSecond) {
+ return new FlowControlStrategy(Integer.MAX_VALUE, countPreSecond);
+ }
+}
diff --git a/seatunnel-core/seatunnel-core-starter/src/test/java/org/apache/seatunnel/core/starter/flowcontrol/FlowControlGateTest.java b/seatunnel-core/seatunnel-core-starter/src/test/java/org/apache/seatunnel/core/starter/flowcontrol/FlowControlGateTest.java
new file mode 100644
index 00000000000..1bcb695bf52
--- /dev/null
+++ b/seatunnel-core/seatunnel-core-starter/src/test/java/org/apache/seatunnel/core/starter/flowcontrol/FlowControlGateTest.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.seatunnel.core.starter.flowcontrol;
+
+import org.apache.seatunnel.api.table.type.SeaTunnelRow;
+
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+import java.math.BigDecimal;
+import java.time.Clock;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class FlowControlGateTest {
+
+ private static final int rowSize = 181;
+
+ @Test
+ public void testWithBytes() {
+ Clock clock = Clock.systemDefaultZone();
+ FlowControlGate flowControlGate = FlowControlGate.create(FlowControlStrategy.ofBytes(100));
+ List rows = getRows(10);
+ long start = clock.millis();
+ for (SeaTunnelRow row : rows) {
+ flowControlGate.audit(row);
+ }
+ long end = clock.millis();
+ long useTime = rowSize * 10 / 100 * 1000;
+
+ Assertions.assertTrue(end - start > useTime * 0.8 && end - start < useTime * 1.2);
+ }
+
+ @Test
+ public void testWithCount() {
+ Clock clock = Clock.systemDefaultZone();
+ FlowControlGate flowControlGate = FlowControlGate.create(FlowControlStrategy.ofCount(2));
+ List rows = getRows(10);
+ long start = clock.millis();
+ for (SeaTunnelRow row : rows) {
+ flowControlGate.audit(row);
+ }
+ long end = clock.millis();
+ long useTime = 10 / 2 * 1000;
+
+ Assertions.assertTrue(end - start > useTime * 0.8 && end - start < useTime * 1.2);
+ }
+
+ @Test
+ public void testWithBytesAndCount() {
+ Clock clock = Clock.systemDefaultZone();
+ FlowControlGate flowControlGate = FlowControlGate.create(FlowControlStrategy.of(100, 2));
+ List rows = getRows(10);
+ long start = clock.millis();
+ for (SeaTunnelRow row : rows) {
+ flowControlGate.audit(row);
+ }
+ long end = clock.millis();
+ long useTime = rowSize * 10 / 100 * 1000;
+
+ Assertions.assertTrue(end - start > useTime * 0.8 && end - start < useTime * 1.2);
+ }
+
+ /** return row list with size, each row size is 181 */
+ private List getRows(int size) {
+ Map map = new HashMap<>();
+ map.put(
+ "key1",
+ new SeaTunnelRow(
+ new Object[] {
+ 1, "test", 1L, new BigDecimal("3333.333"),
+ }));
+ map.put(
+ "key2",
+ new SeaTunnelRow(
+ new Object[] {
+ 1, "test", 1L, new BigDecimal("3333.333"),
+ }));
+
+ List rows = new ArrayList<>();
+ for (int i = 0; i < size; i++) {
+ rows.add(
+ new SeaTunnelRow(
+ new Object[] {
+ 1,
+ "test",
+ 1L,
+ map,
+ new BigDecimal("3333.333"),
+ new String[] {"test2", "test", "3333.333"}
+ }));
+ }
+ return rows;
+ }
+}
diff --git a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-13-starter/src/main/bin/start-seatunnel-flink-13-connector-v2.cmd b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-13-starter/src/main/bin/start-seatunnel-flink-13-connector-v2.cmd
new file mode 100644
index 00000000000..c1cbc1d9556
--- /dev/null
+++ b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-13-starter/src/main/bin/start-seatunnel-flink-13-connector-v2.cmd
@@ -0,0 +1,71 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+setlocal enabledelayedexpansion
+
+rem resolve links - %0 may be a softlink
+set "PRG=%~f0"
+:resolve_loop
+rem Get the parent directory of the script
+set "PRG_DIR=%~dp0"
+rem Change current drive and directory to %PRG_DIR% and execute the 'dir' command, which will fail if %PRG% is not a valid file.
+cd /d "%PRG_DIR%" || (
+ echo Cannot determine the script's current directory.
+ exit /b 1
+)
+
+set "APP_DIR=%~dp0"
+set "CONF_DIR=%APP_DIR%\config"
+set "APP_JAR=%APP_DIR%\starter\seatunnel-flink-13-starter.jar"
+set "APP_MAIN=org.apache.seatunnel.core.starter.flink.FlinkStarter"
+
+if exist "%CONF_DIR%\seatunnel-env.cmd" (
+ call "%CONF_DIR%\seatunnel-env.cmd"
+)
+
+if "%~1"=="" (
+ set "args=-h"
+) else (
+ set "args=%*"
+)
+
+set "JAVA_OPTS="
+rem Log4j2 Config
+if exist "%CONF_DIR%\log4j2.properties" (
+ set "JAVA_OPTS=!JAVA_OPTS! -Dlog4j2.configurationFile=%CONF_DIR%\log4j2.properties"
+ set "JAVA_OPTS=!JAVA_OPTS! -Dseatunnel.logs.path=%APP_DIR%\logs"
+ set "JAVA_OPTS=!JAVA_OPTS! -Dseatunnel.logs.file_name=seatunnel-flink-starter"
+)
+
+set "CLASS_PATH=%APP_DIR%\starter\logging\*;%APP_JAR%"
+
+for /f "delims=" %%i in ('java %JAVA_OPTS% -cp %CLASS_PATH% %APP_MAIN% %args%') do (
+ set "CMD=%%i"
+ setlocal disabledelayedexpansion
+ if !errorlevel! equ 234 (
+ echo !CMD!
+ endlocal
+ exit /b 0
+ ) else if !errorlevel! equ 0 (
+ echo Execute SeaTunnel Flink Job: !CMD!
+ endlocal
+ call !CMD!
+ ) else (
+ echo !CMD!
+ endlocal
+ exit /b !errorlevel!
+ )
+)
diff --git a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-13-starter/src/main/java/org/apache/seatunnel/core/starter/flink/execution/FlinkRuntimeEnvironment.java b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-13-starter/src/main/java/org/apache/seatunnel/core/starter/flink/execution/FlinkRuntimeEnvironment.java
index 34aa7ee4f2d..996c9698fb0 100644
--- a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-13-starter/src/main/java/org/apache/seatunnel/core/starter/flink/execution/FlinkRuntimeEnvironment.java
+++ b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-13-starter/src/main/java/org/apache/seatunnel/core/starter/flink/execution/FlinkRuntimeEnvironment.java
@@ -316,19 +316,22 @@ private void setCheckpoint() {
}
}
- public void registerResultTable(Config config, DataStream dataStream) {
- if (config.hasPath(RESULT_TABLE_NAME)) {
- String name = config.getString(RESULT_TABLE_NAME);
- StreamTableEnvironment tableEnvironment = this.getStreamTableEnvironment();
- if (!TableUtil.tableExists(tableEnvironment, name)) {
+ public void registerResultTable(
+ Config config, DataStream dataStream, String name, Boolean isAppend) {
+ StreamTableEnvironment tableEnvironment = this.getStreamTableEnvironment();
+ if (!TableUtil.tableExists(tableEnvironment, name)) {
+ if (isAppend) {
if (config.hasPath("field_name")) {
String fieldName = config.getString("field_name");
tableEnvironment.registerDataStream(name, dataStream, fieldName);
- } else {
- tableEnvironment.registerDataStream(name, dataStream);
+ return;
}
+ tableEnvironment.registerDataStream(name, dataStream);
+ return;
}
}
+ tableEnvironment.createTemporaryView(
+ name, tableEnvironment.fromChangelogStream(dataStream));
}
public static FlinkRuntimeEnvironment getInstance(Config config) {
diff --git a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-15-starter/src/main/bin/start-seatunnel-flink-15-connector-v2.cmd b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-15-starter/src/main/bin/start-seatunnel-flink-15-connector-v2.cmd
new file mode 100644
index 00000000000..ed4c1f6979e
--- /dev/null
+++ b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-15-starter/src/main/bin/start-seatunnel-flink-15-connector-v2.cmd
@@ -0,0 +1,71 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+setlocal enabledelayedexpansion
+
+rem resolve links - %0 may be a softlink
+set "PRG=%~f0"
+:resolve_loop
+rem Get the parent directory of the script
+set "PRG_DIR=%~dp0"
+rem Change current drive and directory to %PRG_DIR% and execute the 'dir' command, which will fail if %PRG% is not a valid file.
+cd /d "%PRG_DIR%" || (
+ echo Cannot determine the script's current directory.
+ exit /b 1
+)
+
+set "APP_DIR=%~dp0"
+set "CONF_DIR=%APP_DIR%\config"
+set "APP_JAR=%APP_DIR%\starter\seatunnel-flink-15-starter.jar"
+set "APP_MAIN=org.apache.seatunnel.core.starter.flink.FlinkStarter"
+
+if exist "%CONF_DIR%\seatunnel-env.cmd" (
+ call "%CONF_DIR%\seatunnel-env.cmd"
+)
+
+if "%~1"=="" (
+ set "args=-h"
+) else (
+ set "args=%*"
+)
+
+set "JAVA_OPTS="
+rem Log4j2 Config
+if exist "%CONF_DIR%\log4j2.properties" (
+ set "JAVA_OPTS=!JAVA_OPTS! -Dlog4j2.configurationFile=%CONF_DIR%\log4j2.properties"
+ set "JAVA_OPTS=!JAVA_OPTS! -Dseatunnel.logs.path=%APP_DIR%\logs"
+ set "JAVA_OPTS=!JAVA_OPTS! -Dseatunnel.logs.file_name=seatunnel-flink-starter"
+)
+
+set "CLASS_PATH=%APP_DIR%\starter\logging\*;%APP_JAR%"
+
+for /f "delims=" %%i in ('java %JAVA_OPTS% -cp %CLASS_PATH% %APP_MAIN% %args%') do (
+ set "CMD=%%i"
+ setlocal disabledelayedexpansion
+ if !errorlevel! equ 234 (
+ echo !CMD!
+ endlocal
+ exit /b 0
+ ) else if !errorlevel! equ 0 (
+ echo Execute SeaTunnel Flink Job: !CMD!
+ endlocal
+ call !CMD!
+ ) else (
+ echo !CMD!
+ endlocal
+ exit /b !errorlevel!
+ )
+)
diff --git a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/FlinkAbstractPluginExecuteProcessor.java b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/FlinkAbstractPluginExecuteProcessor.java
index e9d36ba068e..6c61f61b957 100644
--- a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/FlinkAbstractPluginExecuteProcessor.java
+++ b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/FlinkAbstractPluginExecuteProcessor.java
@@ -31,15 +31,19 @@
import java.net.URL;
import java.net.URLClassLoader;
+import java.util.HashMap;
import java.util.List;
import java.util.Optional;
import java.util.function.BiConsumer;
+import static org.apache.seatunnel.api.common.CommonOptions.RESULT_TABLE_NAME;
+
public abstract class FlinkAbstractPluginExecuteProcessor
implements PluginExecuteProcessor, FlinkRuntimeEnvironment> {
protected static final String ENGINE_TYPE = "seatunnel";
protected static final String PLUGIN_NAME = "plugin_name";
protected static final String SOURCE_TABLE_NAME = "source_table_name";
+ protected static HashMap isAppendMap = new HashMap<>();
protected static final BiConsumer ADD_URL_TO_CLASSLOADER =
(classLoader, url) -> {
@@ -76,14 +80,41 @@ protected Optional> fromSourceTable(Config pluginConfig) {
if (pluginConfig.hasPath(SOURCE_TABLE_NAME)) {
StreamTableEnvironment tableEnvironment =
flinkRuntimeEnvironment.getStreamTableEnvironment();
- Table table = tableEnvironment.from(pluginConfig.getString(SOURCE_TABLE_NAME));
- return Optional.ofNullable(TableUtil.tableToDataStream(tableEnvironment, table, true));
+ String tableName = pluginConfig.getString(SOURCE_TABLE_NAME);
+ Table table = tableEnvironment.from(tableName);
+ return Optional.ofNullable(
+ TableUtil.tableToDataStream(
+ tableEnvironment, table, isAppendMap.getOrDefault(tableName, true)));
}
return Optional.empty();
}
protected void registerResultTable(Config pluginConfig, DataStream dataStream) {
- flinkRuntimeEnvironment.registerResultTable(pluginConfig, dataStream);
+ if (pluginConfig.hasPath(RESULT_TABLE_NAME.key())) {
+ String resultTable = pluginConfig.getString(RESULT_TABLE_NAME.key());
+ if (pluginConfig.hasPath(SOURCE_TABLE_NAME)) {
+ String sourceTable = pluginConfig.getString(SOURCE_TABLE_NAME);
+ flinkRuntimeEnvironment.registerResultTable(
+ pluginConfig,
+ dataStream,
+ resultTable,
+ isAppendMap.getOrDefault(sourceTable, true));
+ registerAppendStream(pluginConfig);
+ return;
+ }
+ flinkRuntimeEnvironment.registerResultTable(
+ pluginConfig,
+ dataStream,
+ resultTable,
+ isAppendMap.getOrDefault(resultTable, true));
+ }
+ }
+
+ protected void registerAppendStream(Config pluginConfig) {
+ if (pluginConfig.hasPath(RESULT_TABLE_NAME.key())) {
+ String tableName = pluginConfig.getString(RESULT_TABLE_NAME.key());
+ isAppendMap.put(tableName, false);
+ }
}
protected abstract List initializePlugins(
diff --git a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/FlinkRuntimeEnvironment.java b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/FlinkRuntimeEnvironment.java
index 583a1cf3e5c..12168921d8c 100644
--- a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/FlinkRuntimeEnvironment.java
+++ b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/FlinkRuntimeEnvironment.java
@@ -316,19 +316,22 @@ private void setCheckpoint() {
}
}
- public void registerResultTable(Config config, DataStream dataStream) {
- if (config.hasPath(RESULT_TABLE_NAME)) {
- String name = config.getString(RESULT_TABLE_NAME);
- StreamTableEnvironment tableEnvironment = this.getStreamTableEnvironment();
- if (!TableUtil.tableExists(tableEnvironment, name)) {
+ public void registerResultTable(
+ Config config, DataStream dataStream, String name, Boolean isAppend) {
+ StreamTableEnvironment tableEnvironment = this.getStreamTableEnvironment();
+ if (!TableUtil.tableExists(tableEnvironment, name)) {
+ if (isAppend) {
if (config.hasPath("field_name")) {
String fieldName = config.getString("field_name");
tableEnvironment.registerDataStream(name, dataStream, fieldName);
- } else {
- tableEnvironment.registerDataStream(name, dataStream);
+ return;
}
+ tableEnvironment.registerDataStream(name, dataStream);
+ return;
}
}
+ tableEnvironment.createTemporaryView(
+ name, tableEnvironment.fromChangelogStream(dataStream));
}
public static FlinkRuntimeEnvironment getInstance(Config config) {
diff --git a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/SourceExecuteProcessor.java b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/SourceExecuteProcessor.java
index 6bcc5fe8939..f3ebdd04378 100644
--- a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/SourceExecuteProcessor.java
+++ b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/execution/SourceExecuteProcessor.java
@@ -65,9 +65,11 @@ public List> execute(List> upstreamDataStreams)
List> sources = new ArrayList<>();
for (int i = 0; i < plugins.size(); i++) {
SeaTunnelSource internalSource = plugins.get(i);
+ Config pluginConfig = pluginConfigs.get(i);
BaseSeaTunnelSourceFunction sourceFunction;
if (internalSource instanceof SupportCoordinate) {
sourceFunction = new SeaTunnelCoordinatedSource(internalSource);
+ registerAppendStream(pluginConfig);
} else {
sourceFunction = new SeaTunnelParallelSource(internalSource);
}
@@ -80,7 +82,6 @@ public List> execute(List> upstreamDataStreams)
sourceFunction,
"SeaTunnel " + internalSource.getClass().getSimpleName(),
bounded);
- Config pluginConfig = pluginConfigs.get(i);
if (pluginConfig.hasPath(CommonOptions.PARALLELISM.key())) {
int parallelism = pluginConfig.getInt(CommonOptions.PARALLELISM.key());
sourceStream.setParallelism(parallelism);
diff --git a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/utils/TableUtil.java b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/utils/TableUtil.java
index ca1603cdf99..aad97518f4b 100644
--- a/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/utils/TableUtil.java
+++ b/seatunnel-core/seatunnel-flink-starter/seatunnel-flink-starter-common/src/main/java/org/apache/seatunnel/core/starter/flink/utils/TableUtil.java
@@ -37,11 +37,9 @@ public static DataStream tableToDataStream(
if (isAppend) {
return tableEnvironment.toAppendStream(table, typeInfo);
}
- return tableEnvironment
- .toRetractStream(table, typeInfo)
- .filter(row -> row.f0)
- .map(row -> row.f1)
- .returns(typeInfo);
+ DataStream dataStream = tableEnvironment.toChangelogStream(table);
+ dataStream.getTransformation().setOutputType(typeInfo);
+ return dataStream;
}
public static boolean tableExists(TableEnvironment tableEnvironment, String name) {
diff --git a/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-2-starter/src/main/bin/start-seatunnel-spark-2-connector-v2.cmd b/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-2-starter/src/main/bin/start-seatunnel-spark-2-connector-v2.cmd
new file mode 100644
index 00000000000..b2671671383
--- /dev/null
+++ b/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-2-starter/src/main/bin/start-seatunnel-spark-2-connector-v2.cmd
@@ -0,0 +1,71 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+setlocal enabledelayedexpansion
+
+rem resolve links - %0 may be a softlink
+set "PRG=%~f0"
+:resolve_loop
+rem Get the parent directory of the script
+set "PRG_DIR=%~dp0"
+rem Change current drive and directory to %PRG_DIR% and execute the 'dir' command, which will fail if %PRG% is not a valid file.
+cd /d "%PRG_DIR%" || (
+ echo Cannot determine the script's current directory.
+ exit /b 1
+)
+
+set "APP_DIR=%~dp0"
+set "CONF_DIR=%APP_DIR%\config"
+set "APP_JAR=%APP_DIR%\starter\seatunnel-spark-2-starter.jar"
+set "APP_MAIN=org.apache.seatunnel.core.starter.spark.SparkStarter"
+
+if exist "%CONF_DIR%\seatunnel-env.cmd" (
+ call "%CONF_DIR%\seatunnel-env.cmd"
+)
+
+if "%~1"=="" (
+ set "args=-h"
+) else (
+ set "args=%*"
+)
+
+set "JAVA_OPTS="
+rem Log4j2 Config
+if exist "%CONF_DIR%\log4j2.properties" (
+ set "JAVA_OPTS=!JAVA_OPTS! -Dlog4j2.configurationFile=%CONF_DIR%\log4j2.properties"
+ set "JAVA_OPTS=!JAVA_OPTS! -Dseatunnel.logs.path=%APP_DIR%\logs"
+ set "JAVA_OPTS=!JAVA_OPTS! -Dseatunnel.logs.file_name=seatunnel-spark-starter"
+)
+
+set "CLASS_PATH=%APP_DIR%\starter\logging\*;%APP_JAR%"
+
+for /f "delims=" %%i in ('java %JAVA_OPTS% -cp %CLASS_PATH% %APP_MAIN% %args%') do (
+ set "CMD=%%i"
+ setlocal disabledelayedexpansion
+ if !errorlevel! equ 234 (
+ echo !CMD!
+ endlocal
+ exit /b 0
+ ) else if !errorlevel! equ 0 (
+ echo Execute SeaTunnel Spark Job: !CMD!
+ endlocal
+ call !CMD!
+ ) else (
+ echo !CMD!
+ endlocal
+ exit /b !errorlevel!
+ )
+)
diff --git a/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-2-starter/src/main/java/org/apache/seatunnel/core/starter/spark/SparkStarter.java b/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-2-starter/src/main/java/org/apache/seatunnel/core/starter/spark/SparkStarter.java
index 5e295ef8045..1b8918976b4 100644
--- a/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-2-starter/src/main/java/org/apache/seatunnel/core/starter/spark/SparkStarter.java
+++ b/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-2-starter/src/main/java/org/apache/seatunnel/core/starter/spark/SparkStarter.java
@@ -169,9 +169,9 @@ static Map getSparkConf(String configFile) throws FileNotFoundEx
Map.Entry::getKey, e -> e.getValue().unwrapped().toString()));
}
- /** return connector's jars, which located in 'connectors/spark/*'. */
+ /** return connector's jars, which located in 'connectors/*'. */
private List getConnectorJarDependencies() {
- Path pluginRootDir = Common.connectorJarDir("seatunnel");
+ Path pluginRootDir = Common.connectorDir();
if (!Files.exists(pluginRootDir) || !Files.isDirectory(pluginRootDir)) {
return Collections.emptyList();
}
diff --git a/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-3-starter/src/main/bin/start-seatunnel-spark-3-connector-v2.cmd b/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-3-starter/src/main/bin/start-seatunnel-spark-3-connector-v2.cmd
new file mode 100644
index 00000000000..433fe23c6d1
--- /dev/null
+++ b/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-3-starter/src/main/bin/start-seatunnel-spark-3-connector-v2.cmd
@@ -0,0 +1,71 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+setlocal enabledelayedexpansion
+
+rem resolve links - %0 may be a softlink
+set "PRG=%~f0"
+:resolve_loop
+rem Get the parent directory of the script
+set "PRG_DIR=%~dp0"
+rem Change current drive and directory to %PRG_DIR% and execute the 'dir' command, which will fail if %PRG% is not a valid file.
+cd /d "%PRG_DIR%" || (
+ echo Cannot determine the script's current directory.
+ exit /b 1
+)
+
+set "APP_DIR=%~dp0"
+set "CONF_DIR=%APP_DIR%\config"
+set "APP_JAR=%APP_DIR%\starter\seatunnel-spark-3-starter.jar"
+set "APP_MAIN=org.apache.seatunnel.core.starter.spark.SparkStarter"
+
+if exist "%CONF_DIR%\seatunnel-env.cmd" (
+ call "%CONF_DIR%\seatunnel-env.cmd"
+)
+
+if "%~1"=="" (
+ set "args=-h"
+) else (
+ set "args=%*"
+)
+
+set "JAVA_OPTS="
+rem Log4j2 Config
+if exist "%CONF_DIR%\log4j2.properties" (
+ set "JAVA_OPTS=!JAVA_OPTS! -Dlog4j2.configurationFile=%CONF_DIR%\log4j2.properties"
+ set "JAVA_OPTS=!JAVA_OPTS! -Dseatunnel.logs.path=%APP_DIR%\logs"
+ set "JAVA_OPTS=!JAVA_OPTS! -Dseatunnel.logs.file_name=seatunnel-spark-starter"
+)
+
+set "CLASS_PATH=%APP_DIR%\starter\logging\*;%APP_JAR%"
+
+for /f "delims=" %%i in ('java %JAVA_OPTS% -cp %CLASS_PATH% %APP_MAIN% %args%') do (
+ set "CMD=%%i"
+ setlocal disabledelayedexpansion
+ if !errorlevel! equ 234 (
+ echo !CMD!
+ endlocal
+ exit /b 0
+ ) else if !errorlevel! equ 0 (
+ echo Execute SeaTunnel Spark Job: !CMD!
+ endlocal
+ call !CMD!
+ ) else (
+ echo !CMD!
+ endlocal
+ exit /b !errorlevel!
+ )
+)
diff --git a/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-starter-common/src/main/java/org/apache/seatunnel/core/starter/spark/SparkStarter.java b/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-starter-common/src/main/java/org/apache/seatunnel/core/starter/spark/SparkStarter.java
index aa07f4ecded..c33544873a7 100644
--- a/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-starter-common/src/main/java/org/apache/seatunnel/core/starter/spark/SparkStarter.java
+++ b/seatunnel-core/seatunnel-spark-starter/seatunnel-spark-starter-common/src/main/java/org/apache/seatunnel/core/starter/spark/SparkStarter.java
@@ -169,9 +169,9 @@ static Map getSparkConf(String configFile) throws FileNotFoundEx
Map.Entry::getKey, e -> e.getValue().unwrapped().toString()));
}
- /** return connector's jars, which located in 'connectors/spark/*'. */
+ /** return connector's jars, which located in 'connectors/*'. */
private List getConnectorJarDependencies() {
- Path pluginRootDir = Common.connectorJarDir("seatunnel");
+ Path pluginRootDir = Common.connectorDir();
if (!Files.exists(pluginRootDir) || !Files.isDirectory(pluginRootDir)) {
return Collections.emptyList();
}
diff --git a/seatunnel-core/seatunnel-starter/src/main/bin/seatunnel-cluster.cmd b/seatunnel-core/seatunnel-starter/src/main/bin/seatunnel-cluster.cmd
new file mode 100644
index 00000000000..e94a4bb482f
--- /dev/null
+++ b/seatunnel-core/seatunnel-starter/src/main/bin/seatunnel-cluster.cmd
@@ -0,0 +1,86 @@
+@echo off
+REM Licensed to the Apache Software Foundation (ASF) under one or more
+REM contributor license agreements. See the NOTICE file distributed with
+REM this work for additional information regarding copyright ownership.
+REM The ASF licenses this file to You under the Apache License, Version 2.0
+REM (the "License"); you may not use this file except in compliance with
+REM the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
+
+setlocal enabledelayedexpansion
+
+REM resolve links - %0 may be a softlink
+for %%F in ("%~f0") do (
+ set "PRG=%%~fF"
+ set "PRG_DIR=%%~dpF"
+ set "APP_DIR=%%~dpF.."
+)
+
+set "CONF_DIR=%APP_DIR%\config"
+set "APP_JAR=%APP_DIR%\starter\seatunnel-starter.jar"
+set "APP_MAIN=org.apache.seatunnel.core.starter.seatunnel.SeaTunnelServer"
+set "OUT=%APP_DIR%\logs\seatunnel-server.out"
+
+set "HELP=false"
+set "args="
+
+for %%I in (%*) do (
+ set "args=!args! %%I"
+ if "%%I"=="-d" set "DAEMON=true"
+ if "%%I"=="--daemon" set "DAEMON=true"
+ if "%%I"=="-h" set "HELP=true"
+ if "%%I"=="--help" set "HELP=true"
+)
+
+REM SeaTunnel Engine Config
+set "HAZELCAST_CONFIG=%CONF_DIR%\hazelcast.yaml"
+set "SEATUNNEL_CONFIG=%CONF_DIR%\seatunnel.yaml"
+set "JAVA_OPTS=%JvmOption%"
+
+for %%I in (%*) do (
+ set "arg=%%I"
+ if "!arg:~0,10!"=="JvmOption=" (
+ set "JAVA_OPTS=%JAVA_OPTS% !arg:~10!"
+ )
+)
+
+set "JAVA_OPTS=%JAVA_OPTS% -Dseatunnel.config=%SEATUNNEL_CONFIG%"
+set "JAVA_OPTS=%JAVA_OPTS% -Dhazelcast.config=%HAZELCAST_CONFIG%"
+set "JAVA_OPTS=%JAVA_OPTS% -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector"
+
+REM Server Debug Config
+REM Usage instructions:
+REM If you need to debug your code in cluster mode, please enable this configuration option and listen to the specified
+REM port in your IDE. After that, you can happily debug your code.
+REM set "JAVA_OPTS=%JAVA_OPTS% -Xdebug -Xrunjdwp:server=y,transport=dt_socket,address=5001,suspend=y"
+
+if exist "%CONF_DIR%\log4j2.properties" (
+ set "JAVA_OPTS=%JAVA_OPTS% -Dlog4j2.configurationFile=%CONF_DIR%\log4j2.properties"
+ set "JAVA_OPTS=%JAVA_OPTS% -Dseatunnel.logs.path=%APP_DIR%\logs"
+ set "JAVA_OPTS=%JAVA_OPTS% -Dseatunnel.logs.file_name=seatunnel-engine-server"
+)
+
+set "CLASS_PATH=%APP_DIR%\lib\*;%APP_JAR%"
+
+for /f "usebackq delims=" %%I in ("%APP_DIR%\config\jvm_options") do (
+ set "line=%%I"
+ if not "!line:~0,1!"=="#" if "!line!" NEQ "" (
+ set "JAVA_OPTS=!JAVA_OPTS! !line!"
+ )
+)
+
+if "%HELP%"=="false" (
+ if not exist "%APP_DIR%\logs\" mkdir "%APP_DIR%\logs"
+ start "SeaTunnel Server" java %JAVA_OPTS% -cp "%CLASS_PATH%" %APP_MAIN% %args% > "%OUT%" 2>&1
+) else (
+ java %JAVA_OPTS% -cp "%CLASS_PATH%" %APP_MAIN% %args%
+)
+
+endlocal
\ No newline at end of file
diff --git a/seatunnel-core/seatunnel-starter/src/main/bin/seatunnel-cluster.sh b/seatunnel-core/seatunnel-starter/src/main/bin/seatunnel-cluster.sh
index d4644f2d4da..e85a97e67a4 100755
--- a/seatunnel-core/seatunnel-starter/src/main/bin/seatunnel-cluster.sh
+++ b/seatunnel-core/seatunnel-starter/src/main/bin/seatunnel-cluster.sh
@@ -93,7 +93,7 @@ fi
# Usage instructions:
# If you need to debug your code in cluster mode, please enable this configuration option and listen to the specified
# port in your IDE. After that, you can happily debug your code.
-# JAVA_OPTS="${JAVA_OPTS} -Xrunjdwp:server=y,transport=dt_socket,address=8000,suspend=n"
+# JAVA_OPTS="${JAVA_OPTS} -Xdebug -Xrunjdwp:server=y,transport=dt_socket,address=5001,suspend=y"
CLASS_PATH=${APP_DIR}/lib/*:${APP_JAR}
diff --git a/seatunnel-core/seatunnel-starter/src/main/bin/seatunnel.cmd b/seatunnel-core/seatunnel-starter/src/main/bin/seatunnel.cmd
new file mode 100644
index 00000000000..cf9258e9d50
--- /dev/null
+++ b/seatunnel-core/seatunnel-starter/src/main/bin/seatunnel.cmd
@@ -0,0 +1,108 @@
+@echo off
+REM Licensed to the Apache Software Foundation (ASF) under one or more
+REM contributor license agreements. See the NOTICE file distributed with
+REM this work for additional information regarding copyright ownership.
+REM The ASF licenses this file to You under the Apache License, Version 2.0
+REM (the "License"); you may not use this file except in compliance with
+REM the License. You may obtain a copy of the License at
+REM
+REM http://www.apache.org/licenses/LICENSE-2.0
+REM
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
+
+setlocal enabledelayedexpansion
+REM resolve links - %0 may be a softlink
+set "PRG=%~0"
+
+:resolveLoop
+for %%F in ("%PRG%") do (
+ set "PRG_DIR=%%~dpF"
+ set "PRG_NAME=%%~nxF"
+)
+set "PRG=%PRG_DIR%%PRG_NAME%"
+
+REM Get application directory
+cd "%PRG_DIR%\.."
+set "APP_DIR=%CD%"
+
+set "CONF_DIR=%APP_DIR%\config"
+set "APP_JAR=%APP_DIR%\starter\seatunnel-starter.jar"
+set "APP_MAIN=org.apache.seatunnel.core.starter.seatunnel.SeaTunnelClient"
+
+if exist "%CONF_DIR%\seatunnel-env.cmd" call "%CONF_DIR%\seatunnel-env.cmd"
+
+if "%~1"=="" (
+ set "args=-h"
+) else (
+ set "args=%*"
+)
+
+REM SeaTunnel Engine Config
+if not defined HAZELCAST_CLIENT_CONFIG (
+ set "HAZELCAST_CLIENT_CONFIG=%CONF_DIR%\hazelcast-client.yaml"
+)
+
+if not defined HAZELCAST_CONFIG (
+ set "HAZELCAST_CONFIG=%CONF_DIR%\hazelcast.yaml"
+)
+
+if not defined SEATUNNEL_CONFIG (
+ set "SEATUNNEL_CONFIG=%CONF_DIR%\seatunnel.yaml"
+)
+
+if defined JvmOption (
+ set "JAVA_OPTS=%JAVA_OPTS% %JvmOption%"
+)
+
+for %%i in (%*) do (
+ set "arg=%%i"
+ if "!arg:~0,9!"=="JvmOption" (
+ set "JVM_OPTION=!arg:~9!"
+ set "JAVA_OPTS=!JAVA_OPTS! !JVM_OPTION!"
+ goto :break_loop
+ )
+)
+:break_loop
+
+set "JAVA_OPTS=%JAVA_OPTS% -Dhazelcast.client.config=%HAZELCAST_CLIENT_CONFIG%"
+set "JAVA_OPTS=%JAVA_OPTS% -Dseatunnel.config=%SEATUNNEL_CONFIG%"
+set "JAVA_OPTS=%JAVA_OPTS% -Dhazelcast.config=%HAZELCAST_CONFIG%"
+
+REM if you want to debug, please
+REM set "JAVA_OPTS=%JAVA_OPTS% -Xdebug -Xrunjdwp:transport=dt_socket,server=y,address=5000,suspend=y"
+
+REM Log4j2 Config
+if exist "%CONF_DIR%\log4j2_client.properties" (
+ set "JAVA_OPTS=%JAVA_OPTS% -Dlog4j2.configurationFile=%CONF_DIR%\log4j2_client.properties"
+ set "JAVA_OPTS=%JAVA_OPTS% -Dseatunnel.logs.path=%APP_DIR%\logs"
+ for %%i in (%args%) do (
+ set "arg=%%i"
+ if "!arg!"=="-m" set "is_local_mode=true"
+ if "!arg!"=="--master" set "is_local_mode=true"
+ if "!arg!"=="-e" set "is_local_mode=true"
+ if "!arg!"=="--deploy-mode" set "is_local_mode=true"
+ )
+ if defined is_local_mode (
+ for /f "tokens=1-3 delims=:" %%A in ('echo %time%') do (
+ set "ntime=%%A%%B%%C"
+ )
+ set "JAVA_OPTS=%JAVA_OPTS% -Dseatunnel.logs.file_name=seatunnel-starter-client-!date:~0,4!!date:~5,2!!date:~8,2!-!time:~0,2!!time:~3,2!!time:~6,2!!ntime!"
+ ) else (
+ set "JAVA_OPTS=%JAVA_OPTS% -Dseatunnel.logs.file_name=seatunnel-starter-client"
+ )
+)
+
+set "CLASS_PATH=%APP_DIR%\lib\*;%APP_JAR%"
+
+for /f "usebackq delims=" %%a in ("%APP_DIR%\config\jvm_client_options") do (
+ set "line=%%a"
+ if not "!line:~0,1!"=="#" if "!line!" neq "" (
+ set "JAVA_OPTS=!JAVA_OPTS! !line!"
+ )
+)
+
+java %JAVA_OPTS% -cp %CLASS_PATH% %APP_MAIN% %args%
diff --git a/seatunnel-core/seatunnel-starter/src/main/bin/seatunnel.sh b/seatunnel-core/seatunnel-starter/src/main/bin/seatunnel.sh
index 7c25ec126c0..b95800f1c2c 100755
--- a/seatunnel-core/seatunnel-starter/src/main/bin/seatunnel.sh
+++ b/seatunnel-core/seatunnel-starter/src/main/bin/seatunnel.sh
@@ -81,6 +81,12 @@ JAVA_OPTS="${JAVA_OPTS} -Dhazelcast.client.config=${HAZELCAST_CLIENT_CONFIG}"
JAVA_OPTS="${JAVA_OPTS} -Dseatunnel.config=${SEATUNNEL_CONFIG}"
JAVA_OPTS="${JAVA_OPTS} -Dhazelcast.config=${HAZELCAST_CONFIG}"
+# Client Debug Config
+# Usage instructions:
+# If you need to debug your code in cluster mode, please enable this configuration option and listen to the specified
+# port in your IDE. After that, you can happily debug your code.
+# JAVA_OPTS="${JAVA_OPTS} -Xdebug -Xrunjdwp:transport=dt_socket,server=y,address=5000,suspend=y"
+
# Log4j2 Config
if [ -e "${CONF_DIR}/log4j2_client.properties" ]; then
JAVA_OPTS="${JAVA_OPTS} -Dlog4j2.configurationFile=${CONF_DIR}/log4j2_client.properties"
diff --git a/seatunnel-core/seatunnel-starter/src/main/bin/stop-seatunnel-cluster.cmd b/seatunnel-core/seatunnel-starter/src/main/bin/stop-seatunnel-cluster.cmd
new file mode 100644
index 00000000000..0c0cb72b014
--- /dev/null
+++ b/seatunnel-core/seatunnel-starter/src/main/bin/stop-seatunnel-cluster.cmd
@@ -0,0 +1,58 @@
+@echo off
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+
+setlocal enabledelayedexpansion
+
+set "SEATUNNEL_DEFAULT_CLUSTER_NAME=seatunnel_default_cluster"
+set "SHOW_USAGE=Usage: stop-seatunnel-cluster.bat \n Options: \n -cn, --cluster The name of the cluster to shut down (default: $SEATUNNEL_DEFAULT_CLUSTER_NAME) \n -h, --help Show the usage message"
+set "APP_MAIN=org.apache.seatunnel.core.starter.seatunnel.SeaTunnelServer"
+set "CLUSTER_NAME="
+
+if "%~1"=="" (
+ echo !SHOW_USAGE!
+ exit /B 1
+)
+
+:parse_args
+if "%~1"=="-cn" (
+ shift
+ set "CLUSTER_NAME=%~1"
+ shift
+ goto :parse_args
+) else if "%~1"=="--cluster" (
+ shift
+ set "CLUSTER_NAME=%~1"
+ shift
+ goto :parse_args
+) else if "%~1"=="-h" (
+ echo !SHOW_USAGE!
+ exit /B 0
+) else if "%~1"=="--help" (
+ echo !SHOW_USAGE!
+ exit /B 0
+)
+
+if not defined CLUSTER_NAME (
+ for /f %%i in ('tasklist /fi "imagename eq java.exe" ^| find "!APP_MAIN!"') do (
+ taskkill /F /PID %%i
+ )
+) else (
+ for /f %%i in ('tasklist /fi "imagename eq java.exe" ^| find "!APP_MAIN!" ^| find "!CLUSTER_NAME!"') do (
+ taskkill /F /PID %%i
+ )
+)
+
+exit /B 0
\ No newline at end of file
diff --git a/seatunnel-dist/src/main/assembly/assembly-bin-ci.xml b/seatunnel-dist/src/main/assembly/assembly-bin-ci.xml
index de11af1e173..5c1171a82da 100644
--- a/seatunnel-dist/src/main/assembly/assembly-bin-ci.xml
+++ b/seatunnel-dist/src/main/assembly/assembly-bin-ci.xml
@@ -156,7 +156,7 @@
org.apache.seatunnel:connector-file-base-hadoop
org.apache.seatunnel:connector-cdc-base
- /connectors/seatunnel
+ /connectors
provided
diff --git a/seatunnel-dist/src/main/assembly/assembly-bin.xml b/seatunnel-dist/src/main/assembly/assembly-bin.xml
index c0996b2656f..4f5e7986915 100644
--- a/seatunnel-dist/src/main/assembly/assembly-bin.xml
+++ b/seatunnel-dist/src/main/assembly/assembly-bin.xml
@@ -184,7 +184,7 @@
org.apache.seatunnel:connector-fake:jar
org.apache.seatunnel:connector-console:jar
- /connectors/seatunnel
+ /connectors
provided
diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-cdc-mysql-e2e/src/test/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/MysqlCDCIT.java b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-cdc-mysql-e2e/src/test/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/MysqlCDCIT.java
index 1d0d90853fc..b648febd7d9 100644
--- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-cdc-mysql-e2e/src/test/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/MysqlCDCIT.java
+++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-cdc-mysql-e2e/src/test/java/org/apache/seatunnel/connectors/seatunnel/cdc/mysql/MysqlCDCIT.java
@@ -55,7 +55,7 @@
@Slf4j
@DisabledOnContainer(
value = {},
- type = {EngineType.SPARK, EngineType.FLINK},
+ type = {EngineType.SPARK},
disabledReason = "Currently SPARK and FLINK do not support cdc")
public class MysqlCDCIT extends TestSuiteBase implements TestResource {
@@ -88,6 +88,9 @@ public class MysqlCDCIT extends TestSuiteBase implements TestResource {
+ " f_enum, cast(f_mediumblob as char) as f_mediumblob, f_long_varchar, f_real, f_time, f_tinyint, f_tinyint_unsigned,"
+ " f_json, cast(f_year as year) from mysql_cdc_e2e_sink_table";
+ private static final String CLEAN_SOURCE = "truncate table mysql_cdc_e2e_source_table";
+ private static final String CLEAN_SINK = "truncate table mysql_cdc_e2e_sink_table";
+
private static MySqlContainer createMySqlContainer(MySqlVersion version) {
MySqlContainer mySqlContainer =
new MySqlContainer(version)
@@ -134,6 +137,9 @@ public void startUp() throws ClassNotFoundException, InterruptedException {
@TestTemplate
public void testMysqlCdcCheckDataE2e(TestContainer container)
throws IOException, InterruptedException {
+ // Clear related content to ensure that multiple operations are not affected
+ executeSql(CLEAN_SOURCE);
+ executeSql(CLEAN_SINK);
CompletableFuture executeJobFuture =
CompletableFuture.supplyAsync(
diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-cdc-sqlserver-e2e/src/test/java/org/apache/seatunnel/e2e/connector/cdc/sqlserver/SqlServerCDCIT.java b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-cdc-sqlserver-e2e/src/test/java/org/apache/seatunnel/e2e/connector/cdc/sqlserver/SqlServerCDCIT.java
index 8bca3e3b036..bfe2a358889 100644
--- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-cdc-sqlserver-e2e/src/test/java/org/apache/seatunnel/e2e/connector/cdc/sqlserver/SqlServerCDCIT.java
+++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-cdc-sqlserver-e2e/src/test/java/org/apache/seatunnel/e2e/connector/cdc/sqlserver/SqlServerCDCIT.java
@@ -65,7 +65,7 @@
@Slf4j
@DisabledOnContainer(
value = {},
- type = {EngineType.SPARK, EngineType.FLINK},
+ type = {EngineType.SPARK},
disabledReason = "Currently SPARK and FLINK do not support cdc")
public class SqlServerCDCIT extends TestSuiteBase implements TestResource {
diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-clickhouse-e2e/src/test/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/ClickhouseIT.java b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-clickhouse-e2e/src/test/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/ClickhouseIT.java
index abc82a1b287..c0a4254739b 100644
--- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-clickhouse-e2e/src/test/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/ClickhouseIT.java
+++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-clickhouse-e2e/src/test/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/ClickhouseIT.java
@@ -76,7 +76,7 @@
public class ClickhouseIT extends TestSuiteBase implements TestResource {
private static final Logger LOG = LoggerFactory.getLogger(ClickhouseIT.class);
- private static final String CLICKHOUSE_DOCKER_IMAGE = "yandex/clickhouse-server:22.1.3.7";
+ private static final String CLICKHOUSE_DOCKER_IMAGE = "clickhouse/clickhouse-server:23.3.13.6";
private static final String HOST = "clickhouse";
private static final String DRIVER_CLASS = "com.clickhouse.jdbc.ClickHouseDriver";
private static final String INIT_CLICKHOUSE_PATH = "/init/clickhouse_init.conf";
@@ -352,8 +352,8 @@ private static Pair> generateTestDataSet()
}
private void compareResult() throws SQLException, IOException {
- String sourceSql = "select * from " + SOURCE_TABLE;
- String sinkSql = "select * from " + SINK_TABLE;
+ String sourceSql = "select * from " + SOURCE_TABLE + " order by id";
+ String sinkSql = "select * from " + SINK_TABLE + " order by id";
List columnList =
Arrays.stream(generateTestDataSet().getKey().getFieldNames())
.collect(Collectors.toList());
diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-clickhouse-e2e/src/test/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/ClickhouseSinkCDCChangelogIT.java b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-clickhouse-e2e/src/test/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/ClickhouseSinkCDCChangelogIT.java
index f5f7095d939..4c2b9cedb18 100644
--- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-clickhouse-e2e/src/test/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/ClickhouseSinkCDCChangelogIT.java
+++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-clickhouse-e2e/src/test/java/org/apache/seatunnel/connectors/seatunnel/clickhouse/ClickhouseSinkCDCChangelogIT.java
@@ -56,7 +56,7 @@
disabledReason = "Spark engine will lose the row kind of record")
@Slf4j
public class ClickhouseSinkCDCChangelogIT extends TestSuiteBase implements TestResource {
- private static final String CLICKHOUSE_DOCKER_IMAGE = "clickhouse/clickhouse-server:latest";
+ private static final String CLICKHOUSE_DOCKER_IMAGE = "clickhouse/clickhouse-server:23.3.13.6";
private static final String HOST = "clickhouse";
private static final String DRIVER_CLASS = "com.clickhouse.jdbc.ClickHouseDriver";
private static final String DATABASE = "default";
diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-clickhouse-e2e/src/test/resources/clickhouse_to_clickhouse.conf b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-clickhouse-e2e/src/test/resources/clickhouse_to_clickhouse.conf
index d9b5c86e8f3..31d2bc24714 100644
--- a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-clickhouse-e2e/src/test/resources/clickhouse_to_clickhouse.conf
+++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-clickhouse-e2e/src/test/resources/clickhouse_to_clickhouse.conf
@@ -22,6 +22,7 @@ env {
# You can set spark configuration here
execution.parallelism = 1
job.mode = "BATCH"
+ checkpoint.interval = 10
}
source {
diff --git a/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-1/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/JdbcPostgresIdentifierIT.java b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-1/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/JdbcPostgresIdentifierIT.java
new file mode 100644
index 00000000000..13adec70084
--- /dev/null
+++ b/seatunnel-e2e/seatunnel-connector-v2-e2e/connector-jdbc-e2e/connector-jdbc-e2e-part-1/src/test/java/org/apache/seatunnel/connectors/seatunnel/jdbc/JdbcPostgresIdentifierIT.java
@@ -0,0 +1,387 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.seatunnel.connectors.seatunnel.jdbc;
+
+import org.apache.seatunnel.e2e.common.TestResource;
+import org.apache.seatunnel.e2e.common.TestSuiteBase;
+import org.apache.seatunnel.e2e.common.container.ContainerExtendedFactory;
+import org.apache.seatunnel.e2e.common.container.EngineType;
+import org.apache.seatunnel.e2e.common.container.TestContainer;
+import org.apache.seatunnel.e2e.common.junit.DisabledOnContainer;
+import org.apache.seatunnel.e2e.common.junit.TestContainerExtension;
+
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.TestTemplate;
+import org.testcontainers.containers.Container;
+import org.testcontainers.containers.PostgreSQLContainer;
+import org.testcontainers.containers.output.Slf4jLogConsumer;
+import org.testcontainers.lifecycle.Startables;
+import org.testcontainers.utility.DockerImageName;
+import org.testcontainers.utility.DockerLoggerFactory;
+
+import com.google.common.collect.Lists;
+import lombok.extern.slf4j.Slf4j;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Stream;
+
+import static org.awaitility.Awaitility.given;
+
+@Slf4j
+@DisabledOnContainer(
+ value = {},
+ type = {EngineType.SPARK, EngineType.FLINK},
+ disabledReason = "Currently SPARK and FLINK do not support cdc")
+public class JdbcPostgresIdentifierIT extends TestSuiteBase implements TestResource {
+ private static final String PG_IMAGE = "postgis/postgis";
+ private static final String PG_DRIVER_JAR =
+ "https://repo1.maven.org/maven2/org/postgresql/postgresql/42.3.3/postgresql-42.3.3.jar";
+ private static final String PG_JDBC_JAR =
+ "https://repo1.maven.org/maven2/net/postgis/postgis-jdbc/2.5.1/postgis-jdbc-2.5.1.jar";
+ private static final String PG_GEOMETRY_JAR =
+ "https://repo1.maven.org/maven2/net/postgis/postgis-geometry/2.5.1/postgis-geometry-2.5.1.jar";
+ private static final List PG_CONFIG_FILE_LIST =
+ Lists.newArrayList("/jdbc_postgres_ide_source_and_sink.conf");
+ private PostgreSQLContainer> POSTGRESQL_CONTAINER;
+ private static final String PG_SOURCE_DDL =
+ "CREATE TABLE IF NOT EXISTS pg_ide_source_table (\n"
+ + " gid SERIAL PRIMARY KEY,\n"
+ + " text_col TEXT,\n"
+ + " varchar_col VARCHAR(255),\n"
+ + " char_col CHAR(10),\n"
+ + " boolean_col bool,\n"
+ + " smallint_col int2,\n"
+ + " integer_col int4,\n"
+ + " bigint_col BIGINT,\n"
+ + " decimal_col DECIMAL(10, 2),\n"
+ + " numeric_col NUMERIC(8, 4),\n"
+ + " real_col float4,\n"
+ + " double_precision_col float8,\n"
+ + " smallserial_col SMALLSERIAL,\n"
+ + " serial_col SERIAL,\n"
+ + " bigserial_col BIGSERIAL,\n"
+ + " date_col DATE,\n"
+ + " timestamp_col TIMESTAMP,\n"
+ + " bpchar_col BPCHAR(10),\n"
+ + " age INT NOT null,\n"
+ + " name VARCHAR(255) NOT null,\n"
+ + " point geometry(POINT, 4326),\n"
+ + " linestring geometry(LINESTRING, 4326),\n"
+ + " polygon_colums geometry(POLYGON, 4326),\n"
+ + " multipoint geometry(MULTIPOINT, 4326),\n"
+ + " multilinestring geometry(MULTILINESTRING, 4326),\n"
+ + " multipolygon geometry(MULTIPOLYGON, 4326),\n"
+ + " geometrycollection geometry(GEOMETRYCOLLECTION, 4326),\n"
+ + " geog geography(POINT, 4326)\n"
+ + ")";
+ private static final String PG_SINK_DDL =
+ "CREATE TABLE IF NOT EXISTS test.public.\"PG_IDE_SINK_TABLE\" (\n"
+ + " \"GID\" SERIAL PRIMARY KEY,\n"
+ + " \"TEXT_COL\" TEXT,\n"
+ + " \"VARCHAR_COL\" VARCHAR(255),\n"
+ + " \"CHAR_COL\" CHAR(10),\n"
+ + " \"BOOLEAN_COL\" bool,\n"
+ + " \"SMALLINT_COL\" int2,\n"
+ + " \"INTEGER_COL\" int4,\n"
+ + " \"BIGINT_COL\" BIGINT,\n"
+ + " \"DECIMAL_COL\" DECIMAL(10, 2),\n"
+ + " \"NUMERIC_COL\" NUMERIC(8, 4),\n"
+ + " \"REAL_COL\" float4,\n"
+ + " \"DOUBLE_PRECISION_COL\" float8,\n"
+ + " \"SMALLSERIAL_COL\" SMALLSERIAL,\n"
+ + " \"SERIAL_COL\" SERIAL,\n"
+ + " \"BIGSERIAL_COL\" BIGSERIAL,\n"
+ + " \"DATE_COL\" DATE,\n"
+ + " \"TIMESTAMP_COL\" TIMESTAMP,\n"
+ + " \"BPCHAR_COL\" BPCHAR(10),\n"
+ + " \"AGE\" int4 NOT NULL,\n"
+ + " \"NAME\" varchar(255) NOT NULL,\n"
+ + " \"POINT\" varchar(2000) NULL,\n"
+ + " \"LINESTRING\" varchar(2000) NULL,\n"
+ + " \"POLYGON_COLUMS\" varchar(2000) NULL,\n"
+ + " \"MULTIPOINT\" varchar(2000) NULL,\n"
+ + " \"MULTILINESTRING\" varchar(2000) NULL,\n"
+ + " \"MULTIPOLYGON\" varchar(2000) NULL,\n"
+ + " \"GEOMETRYCOLLECTION\" varchar(2000) NULL,\n"
+ + " \"GEOG\" varchar(2000) NULL\n"
+ + " )";
+
+ private static final String SOURCE_SQL =
+ "select \n"
+ + "gid,\n"
+ + "text_col,\n"
+ + "varchar_col,\n"
+ + "char_col,\n"
+ + "boolean_col,\n"
+ + "smallint_col,\n"
+ + "integer_col,\n"
+ + "bigint_col,\n"
+ + "decimal_col,\n"
+ + "numeric_col,\n"
+ + "real_col,\n"
+ + "double_precision_col,\n"
+ + "smallserial_col,\n"
+ + "serial_col,\n"
+ + "bigserial_col,\n"
+ + "date_col,\n"
+ + "timestamp_col,\n"
+ + "bpchar_col,\n"
+ + "age,\n"
+ + "name,\n"
+ + "point,\n"
+ + "linestring,\n"
+ + "polygon_colums,\n"
+ + "multipoint,\n"
+ + "multilinestring,\n"
+ + "multipolygon,\n"
+ + "geometrycollection,\n"
+ + "geog\n"
+ + " from pg_ide_source_table";
+ private static final String SINK_SQL =
+ "SELECT\n"
+ + " \"GID\",\n"
+ + " \"TEXT_COL\",\n"
+ + " \"VARCHAR_COL\",\n"
+ + " \"CHAR_COL\",\n"
+ + " \"BOOLEAN_COL\",\n"
+ + " \"SMALLINT_COL\",\n"
+ + " \"INTEGER_COL\",\n"
+ + " \"BIGINT_COL\",\n"
+ + " \"DECIMAL_COL\",\n"
+ + " \"NUMERIC_COL\",\n"
+ + " \"REAL_COL\",\n"
+ + " \"DOUBLE_PRECISION_COL\",\n"
+ + " \"SMALLSERIAL_COL\",\n"
+ + " \"SERIAL_COL\",\n"
+ + " \"BIGSERIAL_COL\",\n"
+ + " \"DATE_COL\",\n"
+ + " \"TIMESTAMP_COL\",\n"
+ + " \"BPCHAR_COL\",\n"
+ + " \"AGE\",\n"
+ + " \"NAME\",\n"
+ + " CAST(\"POINT\" AS GEOMETRY) AS POINT,\n"
+ + " CAST(\"LINESTRING\" AS GEOMETRY) AS LINESTRING,\n"
+ + " CAST(\"POLYGON_COLUMS\" AS GEOMETRY) AS POLYGON_COLUMS,\n"
+ + " CAST(\"MULTIPOINT\" AS GEOMETRY) AS MULTIPOINT,\n"
+ + " CAST(\"MULTILINESTRING\" AS GEOMETRY) AS MULTILINESTRING,\n"
+ + " CAST(\"MULTIPOLYGON\" AS GEOMETRY) AS MULTILINESTRING,\n"
+ + " CAST(\"GEOMETRYCOLLECTION\" AS GEOMETRY) AS GEOMETRYCOLLECTION,\n"
+ + " CAST(\"GEOG\" AS GEOGRAPHY) AS GEOG\n"
+ + "FROM\n"
+ + " \"PG_IDE_SINK_TABLE\";";
+
+ @TestContainerExtension
+ private final ContainerExtendedFactory extendedFactory =
+ container -> {
+ Container.ExecResult extraCommands =
+ container.execInContainer(
+ "bash",
+ "-c",
+ "mkdir -p /tmp/seatunnel/plugins/Jdbc/lib && cd /tmp/seatunnel/plugins/Jdbc/lib && curl -O "
+ + PG_DRIVER_JAR
+ + " && curl -O "
+ + PG_JDBC_JAR
+ + " && curl -O "
+ + PG_GEOMETRY_JAR);
+ Assertions.assertEquals(0, extraCommands.getExitCode());
+ };
+
+ @BeforeAll
+ @Override
+ public void startUp() throws Exception {
+ POSTGRESQL_CONTAINER =
+ new PostgreSQLContainer<>(
+ DockerImageName.parse(PG_IMAGE)
+ .asCompatibleSubstituteFor("postgres"))
+ .withNetwork(TestSuiteBase.NETWORK)
+ .withNetworkAliases("postgresql")
+ .withCommand("postgres -c max_prepared_transactions=100")
+ .withLogConsumer(
+ new Slf4jLogConsumer(DockerLoggerFactory.getLogger(PG_IMAGE)));
+ Startables.deepStart(Stream.of(POSTGRESQL_CONTAINER)).join();
+ log.info("PostgreSQL container started");
+ Class.forName(POSTGRESQL_CONTAINER.getDriverClassName());
+ given().ignoreExceptions()
+ .await()
+ .atLeast(100, TimeUnit.MILLISECONDS)
+ .pollInterval(500, TimeUnit.MILLISECONDS)
+ .atMost(2, TimeUnit.MINUTES)
+ .untilAsserted(this::initializeJdbcTable);
+ log.info("pg data initialization succeeded. Procedure");
+ }
+
+ @TestTemplate
+ public void testAutoGenerateSQL(TestContainer container)
+ throws IOException, InterruptedException {
+ for (String CONFIG_FILE : PG_CONFIG_FILE_LIST) {
+ Container.ExecResult execResult = container.executeJob(CONFIG_FILE);
+ Assertions.assertEquals(0, execResult.getExitCode());
+ Assertions.assertIterableEquals(querySql(SOURCE_SQL), querySql(SINK_SQL));
+ executeSQL("truncate table \"PG_IDE_SINK_TABLE\"");
+ log.info(CONFIG_FILE + " e2e test completed");
+ }
+ }
+
+ private void initializeJdbcTable() {
+ try (Connection connection = getJdbcConnection()) {
+ Statement statement = connection.createStatement();
+ statement.execute(PG_SOURCE_DDL);
+ statement.execute(PG_SINK_DDL);
+ for (int i = 1; i <= 10; i++) {
+ statement.addBatch(
+ "INSERT INTO\n"
+ + " pg_ide_source_table (gid,\n"
+ + " text_col,\n"
+ + " varchar_col,\n"
+ + " char_col,\n"
+ + " boolean_col,\n"
+ + " smallint_col,\n"
+ + " integer_col,\n"
+ + " bigint_col,\n"
+ + " decimal_col,\n"
+ + " numeric_col,\n"
+ + " real_col,\n"
+ + " double_precision_col,\n"
+ + " smallserial_col,\n"
+ + " serial_col,\n"
+ + " bigserial_col,\n"
+ + " date_col,\n"
+ + " timestamp_col,\n"
+ + " bpchar_col,\n"
+ + " age,\n"
+ + " name,\n"
+ + " point,\n"
+ + " linestring,\n"
+ + " polygon_colums,\n"
+ + " multipoint,\n"
+ + " multilinestring,\n"
+ + " multipolygon,\n"
+ + " geometrycollection,\n"
+ + " geog\n"
+ + " )\n"
+ + "VALUES\n"
+ + " (\n"
+ + " '"
+ + i
+ + "',\n"
+ + " 'Hello World',\n"
+ + " 'Test',\n"
+ + " 'Testing',\n"
+ + " true,\n"
+ + " 10,\n"
+ + " 100,\n"
+ + " 1000,\n"
+ + " 10.55,\n"
+ + " 8.8888,\n"
+ + " 3.14,\n"
+ + " 3.14159265,\n"
+ + " 1,\n"
+ + " 100,\n"
+ + " 10000,\n"
+ + " '2023-05-07',\n"
+ + " '2023-05-07 14:30:00',\n"
+ + " 'Testing',\n"
+ + " 21,\n"
+ + " 'Leblanc',\n"
+ + " ST_GeomFromText('POINT(-122.3452 47.5925)', 4326),\n"
+ + " ST_GeomFromText(\n"
+ + " 'LINESTRING(-122.3451 47.5924, -122.3449 47.5923)',\n"
+ + " 4326\n"
+ + " ),\n"
+ + " ST_GeomFromText(\n"
+ + " 'POLYGON((-122.3453 47.5922, -122.3453 47.5926, -122.3448 47.5926, -122.3448 47.5922, -122.3453 47.5922))',\n"
+ + " 4326\n"
+ + " ),\n"
+ + " ST_GeomFromText(\n"
+ + " 'MULTIPOINT(-122.3459 47.5927, -122.3445 47.5918)',\n"
+ + " 4326\n"
+ + " ),\n"
+ + " ST_GeomFromText(\n"
+ + " 'MULTILINESTRING((-122.3463 47.5920, -122.3461 47.5919),(-122.3459 47.5924, -122.3457 47.5923))',\n"
+ + " 4326\n"
+ + " ),\n"
+ + " ST_GeomFromText(\n"
+ + " 'MULTIPOLYGON(((-122.3458 47.5925, -122.3458 47.5928, -122.3454 47.5928, -122.3454 47.5925, -122.3458 47.5925)),((-122.3453 47.5921, -122.3453 47.5924, -122.3448 47.5924, -122.3448 47.5921, -122.3453 47.5921)))',\n"
+ + " 4326\n"
+ + " ),\n"
+ + " ST_GeomFromText(\n"
+ + " 'GEOMETRYCOLLECTION(POINT(-122.3462 47.5921), LINESTRING(-122.3460 47.5924, -122.3457 47.5924))',\n"
+ + " 4326\n"
+ + " ),\n"
+ + " ST_GeographyFromText('POINT(-122.3452 47.5925)')\n"
+ + " )");
+ }
+
+ statement.executeBatch();
+ } catch (SQLException e) {
+ throw new RuntimeException("Initializing PostgreSql table failed!", e);
+ }
+ }
+
+ private Connection getJdbcConnection() throws SQLException {
+ return DriverManager.getConnection(
+ POSTGRESQL_CONTAINER.getJdbcUrl(),
+ POSTGRESQL_CONTAINER.getUsername(),
+ POSTGRESQL_CONTAINER.getPassword());
+ }
+
+ private List> querySql(String sql) {
+ try (Connection connection = getJdbcConnection()) {
+ ResultSet resultSet = connection.createStatement().executeQuery(sql);
+ List> result = new ArrayList<>();
+ int columnCount = resultSet.getMetaData().getColumnCount();
+ while (resultSet.next()) {
+ ArrayList