diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index daf6e83..5318815 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -17,11 +17,12 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up JDK 1.8 - uses: actions/setup-java@v1 + uses: actions/setup-java@v4 with: - java-version: 1.8 + distribution: "temurin" + java-version: "8" - name: Cache the Maven packages to speed up build uses: actions/cache@v2 @@ -37,9 +38,9 @@ jobs: git clone https://github.com/vesoft-inc/nebula-docker-compose.git pushd nebula-docker-compose/ cp ../../nebula-spark-connector/src/test/resources/docker-compose.yaml . - docker-compose up -d + docker compose up -d sleep 30 - docker-compose ps + docker compose ps popd popd diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 6f46154..46771e2 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -13,11 +13,12 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up JDK 1.8 - uses: actions/setup-java@v1 + uses: actions/setup-java@v4 with: - java-version: 1.8 + distribution: "temurin" + java-version: "8" - name: Cache the Maven packages to speed up build uses: actions/cache@v2 @@ -33,7 +34,7 @@ jobs: git clone https://github.com/vesoft-inc/nebula-docker-compose.git pushd nebula-docker-compose/ cp ../../nebula-spark-connector/src/test/resources/docker-compose.yaml . - docker-compose up -d + docker compose up -d sleep 30 popd popd diff --git a/.github/workflows/snapshot.yml b/.github/workflows/snapshot.yml index c4b16c0..47cafc0 100644 --- a/.github/workflows/snapshot.yml +++ b/.github/workflows/snapshot.yml @@ -15,11 +15,12 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up JDK 1.8 - uses: actions/setup-java@v1 + uses: actions/setup-java@v4 with: - java-version: 1.8 + distribution: "temurin" + java-version: "8" - name: Cache the Maven packages to speed up build uses: actions/cache@v2 @@ -35,7 +36,7 @@ jobs: git clone https://github.com/vesoft-inc/nebula-docker-compose.git pushd nebula-docker-compose/ cp ../../nebula-spark-connector/src/test/resources/docker-compose.yaml . - docker-compose up -d + docker compose up -d sleep 30 popd popd diff --git a/README.md b/README.md index ea4a26e..3b2e570 100644 --- a/README.md +++ b/README.md @@ -217,6 +217,7 @@ Let's try a write example, by default, the `writeMode` is `insert` ```python df.write.format("com.vesoft.nebula.connector.NebulaDataSource").option( "type", "vertex").option( + "operateType", "write").option( "spaceName", "basketballplayer").option( "label", "player").option( "vidPolicy", "").option( @@ -232,6 +233,7 @@ For delete or update write mode, we could(for instance)specify with `writeMode` ```python df.write.format("com.vesoft.nebula.connector.NebulaDataSource").option( "type", "vertex").option( + "operateType", "write").option( "spaceName", "basketballplayer").option( "label", "player").option( "vidPolicy", "").option( @@ -247,6 +249,7 @@ df.write.format("com.vesoft.nebula.connector.NebulaDataSource").option( ```python df.write.format("com.vesoft.nebula.connector.NebulaDataSource")\ .mode("overwrite")\ + .option("operateType", "write")\ .option("srcPolicy", "")\ .option("dstPolicy", "")\ .option("metaAddress", "metad0:9559")\ @@ -266,6 +269,7 @@ df.write.format("com.vesoft.nebula.connector.NebulaDataSource")\ ```python df.write.format("com.vesoft.nebula.connector.NebulaDataSource")\ .mode("overwrite")\ + .option("operateType", "write")\ .option("srcPolicy", "")\ .option("dstPolicy", "")\ .option("metaAddress", "metad0:9559")\ @@ -289,6 +293,7 @@ For more options, i.e. delete edge with vertex being deleted, refer to [nebula/c ```scala /** write config */ + val OPERATE_TYPE: String = "operateType" val RATE_LIMIT: String = "rateLimit" val VID_POLICY: String = "vidPolicy" val SRC_POLICY: String = "srcPolicy" @@ -330,6 +335,7 @@ spark = SparkSession.builder.config( df = spark.read.format( "com.vesoft.nebula.connector.NebulaDataSource").option( "type", "vertex").option( + "operateType", "write").option( "spaceName", "basketballplayer").option( "label", "player").option( "returnCols", "name,age").option( diff --git a/README_CN.md b/README_CN.md index 74dcea2..971f6b1 100644 --- a/README_CN.md +++ b/README_CN.md @@ -208,6 +208,7 @@ Nebula Spark Connector 支持 Spark 2.2, 2.4 和 3.x. df = spark.read.format( "com.vesoft.nebula.connector.NebulaDataSource").option( "type", "vertex").option( + "operateType", "write").option( "spaceName", "basketballplayer").option( "label", "player").option( "returnCols", "name,age").option( @@ -243,6 +244,7 @@ rows ```python df.write.format("com.vesoft.nebula.connector.NebulaDataSource").option( "type", "vertex").option( + "operateType", "write").option( "spaceName", "basketballplayer").option( "label", "player").option( "vidPolicy", "").option( @@ -261,6 +263,7 @@ df.write.format("com.vesoft.nebula.connector.NebulaDataSource").option( ```python df.write.format("com.vesoft.nebula.connector.NebulaDataSource").option( "type", "vertex").option( + "operateType", "write").option( "spaceName", "basketballplayer").option( "label", "player").option( "vidPolicy", "").option( @@ -278,6 +281,7 @@ df.write.format("com.vesoft.nebula.connector.NebulaDataSource").option( ```python df.write.format("com.vesoft.nebula.connector.NebulaDataSource") .mode("overwrite") + .option("operateType", "write") .option("srcPolicy", "") .option("dstPolicy", "") .option("metaAddress", "metad0:9559") @@ -299,6 +303,7 @@ df.write.format("com.vesoft.nebula.connector.NebulaDataSource") ```python df.write.format("com.vesoft.nebula.connector.NebulaDataSource") .mode("overwrite") + .option("operateType", "write") .option("srcPolicy", "") .option("dstPolicy", "") .option("metaAddress", "metad0:9559") @@ -323,6 +328,7 @@ df.write.format("com.vesoft.nebula.connector.NebulaDataSource") ```scala /** write config */ +val OPERATE_TYPE: String = "operateType" val RATE_LIMIT: String = "rateLimit" val VID_POLICY: String = "vidPolicy" val SRC_POLICY: String = "srcPolicy" @@ -364,6 +370,7 @@ spark = SparkSession.builder.config( df = spark.read.format( "com.vesoft.nebula.connector.NebulaDataSource").option( "type", "vertex").option( + "operateType", "write").option( "spaceName", "basketballplayer").option( "label", "player").option( "returnCols", "name,age").option(