diff --git a/asynchbase/README.md b/asynchbase/README.md index 1a300c9bd2..787d6a8152 100644 --- a/asynchbase/README.md +++ b/asynchbase/README.md @@ -21,9 +21,24 @@ This driver provides a YCSB workload binding for Apache HBase using an alternati ## Quickstart -### 1. Setup Hbase +### 1. Start a HBase Server +You need to start a single node or a cluster to point the client at. Please see [Apache HBase Reference Guide](http://hbase.apache.org/book.html) for more details and instructions. + +### 2. Set up YCSB + +Download the [latest YCSB](https://github.com/brianfrankcooper/YCSB/releases/latest) file. Follow the instructions. + +### 3. Create a HBase table for testing + +For best results, use the pre-splitting strategy recommended in [HBASE-4163](https://issues.apache.org/jira/browse/HBASE-4163): + +``` +hbase(main):001:0> n_splits = 200 # HBase recommends (10 * number of regionservers) +hbase(main):002:0> create 'usertable', 'family', {SPLITS => (1..n_splits).map {|i| "user#{1000+i*(9999-1000)/n_splits}"}} +``` + +*Failing to do so will cause all writes to initially target a single region server*. -Follow directions 1 to 3 from ``hbase098``'s readme. ### 2. Load a Workload @@ -54,6 +69,3 @@ The following options can be configured using CLI (using the `-p` parameter) or * `durable`: When set to false, writes and deletes bypass the WAL for quicker responses. Default is true. * `jointimeout`: A timeout value, in milliseconds, for waiting on operations synchronously before an error is thrown. * `prefetchmeta`: Whether or not to read meta for all regions in the table and connect to the proper region servers before starting operations. Defaults to false. - - -Note: This module includes some Google Guava source files from version 12 that were later removed but are still required by HBase's test modules for setting up the mini cluster during integration testing. \ No newline at end of file diff --git a/asynchbase/pom.xml b/asynchbase/pom.xml index 00ada32418..e192766a08 100644 --- a/asynchbase/pom.xml +++ b/asynchbase/pom.xml @@ -32,12 +32,34 @@ LICENSE file. + org.hbase asynchbase ${asynchbase.version} + + + org.slf4j + log4j-over-slf4j + + + + org.slf4j + slf4j-log4j12 + 1.7.25 + + + + log4j + log4j + 1.2.17 + + site.ycsb core @@ -50,14 +72,6 @@ LICENSE file. zookeeper 3.4.5 - - log4j - log4j - - - org.slf4j - slf4j-log4j12 - jline jline @@ -82,43 +96,18 @@ LICENSE file. org.apache.hbase - hbase-testing-util - ${hbase10.version} + hbase-shaded-testing-util + ${hbase14.version} test - - - jdk.tools - jdk.tools - - org.apache.hbase - hbase-client - ${hbase10.version} + hbase-shaded-client + ${hbase14.version} test - - - jdk.tools - jdk.tools - - - - log4j - log4j - 1.2.17 - test - - - - org.slf4j - log4j-over-slf4j - 1.7.7 - test - diff --git a/asynchbase/src/test/java/com/google/common/base/Stopwatch.java b/asynchbase/src/test/java/com/google/common/base/Stopwatch.java deleted file mode 100644 index 4d46924bda..0000000000 --- a/asynchbase/src/test/java/com/google/common/base/Stopwatch.java +++ /dev/null @@ -1,278 +0,0 @@ -/* - * Copyright (C) 2008 The Guava Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.common.base; - -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkState; -import static java.util.concurrent.TimeUnit.MICROSECONDS; -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.NANOSECONDS; -import static java.util.concurrent.TimeUnit.SECONDS; - -import com.google.common.annotations.Beta; -import com.google.common.annotations.GwtCompatible; -import com.google.common.annotations.GwtIncompatible; - -import java.util.concurrent.TimeUnit; - -/** - * An object that measures elapsed time in nanoseconds. It is useful to measure - * elapsed time using this class instead of direct calls to {@link - * System#nanoTime} for a few reasons: - * - * - * - *

Basic usage: - *

- *   Stopwatch stopwatch = Stopwatch.{@link #createStarted createStarted}();
- *   doSomething();
- *   stopwatch.{@link #stop stop}(); // optional
- *
- *   long millis = stopwatch.elapsed(MILLISECONDS);
- *
- *   log.info("that took: " + stopwatch); // formatted string like "12.3 ms"
- * 
- * - *

Stopwatch methods are not idempotent; it is an error to start or stop a - * stopwatch that is already in the desired state. - * - *

When testing code that uses this class, use the {@linkplain - * #Stopwatch(Ticker) alternate constructor} to supply a fake or mock ticker. - * This allows you to - * simulate any valid behavior of the stopwatch. - * - *

Note: This class is not thread-safe. - * - * @author Kevin Bourrillion - * @since 10.0 - */ -@Beta -@GwtCompatible(emulated = true) -public final class Stopwatch { - private final Ticker ticker; - private boolean isRunning; - private long elapsedNanos; - private long startTick; - - /** - * Creates (but does not start) a new stopwatch using {@link System#nanoTime} - * as its time source. - * - * @since 15.0 - */ - public static Stopwatch createUnstarted() { - return new Stopwatch(); - } - - /** - * Creates (but does not start) a new stopwatch, using the specified time - * source. - * - * @since 15.0 - */ - public static Stopwatch createUnstarted(Ticker ticker) { - return new Stopwatch(ticker); - } - - /** - * Creates (and starts) a new stopwatch using {@link System#nanoTime} - * as its time source. - * - * @since 15.0 - */ - public static Stopwatch createStarted() { - return new Stopwatch().start(); - } - - /** - * Creates (and starts) a new stopwatch, using the specified time - * source. - * - * @since 15.0 - */ - public static Stopwatch createStarted(Ticker ticker) { - return new Stopwatch(ticker).start(); - } - - /** - * Creates (but does not start) a new stopwatch using {@link System#nanoTime} - * as its time source. - * - * @deprecated Use {@link Stopwatch#createUnstarted()} instead. - */ - @Deprecated - public Stopwatch() { - this(Ticker.systemTicker()); - } - - /** - * Creates (but does not start) a new stopwatch, using the specified time - * source. - * - * @deprecated Use {@link Stopwatch#createUnstarted(Ticker)} instead. - */ - @Deprecated - public Stopwatch(Ticker ticker) { - this.ticker = checkNotNull(ticker, "ticker"); - } - - /** - * Returns {@code true} if {@link #start()} has been called on this stopwatch, - * and {@link #stop()} has not been called since the last call to {@code - * start()}. - */ - public boolean isRunning() { - return isRunning; - } - - /** - * Starts the stopwatch. - * - * @return this {@code Stopwatch} instance - * @throws IllegalStateException if the stopwatch is already running. - */ - public Stopwatch start() { - checkState(!isRunning, "This stopwatch is already running."); - isRunning = true; - startTick = ticker.read(); - return this; - } - - /** - * Stops the stopwatch. Future reads will return the fixed duration that had - * elapsed up to this point. - * - * @return this {@code Stopwatch} instance - * @throws IllegalStateException if the stopwatch is already stopped. - */ - public Stopwatch stop() { - long tick = ticker.read(); - checkState(isRunning, "This stopwatch is already stopped."); - isRunning = false; - elapsedNanos += tick - startTick; - return this; - } - - /** - * Sets the elapsed time for this stopwatch to zero, - * and places it in a stopped state. - * - * @return this {@code Stopwatch} instance - */ - public Stopwatch reset() { - elapsedNanos = 0; - isRunning = false; - return this; - } - - private long elapsedNanos() { - return isRunning ? ticker.read() - startTick + elapsedNanos : elapsedNanos; - } - - /** - * Returns the current elapsed time shown on this stopwatch, expressed - * in the desired time unit, with any fraction rounded down. - * - *

Note that the overhead of measurement can be more than a microsecond, so - * it is generally not useful to specify {@link TimeUnit#NANOSECONDS} - * precision here. - * - * @since 14.0 (since 10.0 as {@code elapsedTime()}) - */ - public long elapsed(TimeUnit desiredUnit) { - return desiredUnit.convert(elapsedNanos(), NANOSECONDS); - } - - /** - * Returns the current elapsed time shown on this stopwatch, expressed - * in the desired time unit, with any fraction rounded down. - * - *

Note that the overhead of measurement can be more than a microsecond, so - * it is generally not useful to specify {@link TimeUnit#NANOSECONDS} - * precision here. - * - * @deprecated Use {@link Stopwatch#elapsed(TimeUnit)} instead. This method is - * scheduled to be removed in Guava release 16.0. - */ - @Deprecated - public long elapsedTime(TimeUnit desiredUnit) { - return elapsed(desiredUnit); - } - - /** - * Returns the current elapsed time shown on this stopwatch, expressed - * in milliseconds, with any fraction rounded down. This is identical to - * {@code elapsed(TimeUnit.MILLISECONDS)}. - * - * @deprecated Use {@code stopwatch.elapsed(MILLISECONDS)} instead. This - * method is scheduled to be removed in Guava release 16.0. - */ - @Deprecated - public long elapsedMillis() { - return elapsed(MILLISECONDS); - } - - /** - * Returns a string representation of the current elapsed time. - */ - @GwtIncompatible("String.format()") - @Override public String toString() { - long nanos = elapsedNanos(); - - TimeUnit unit = chooseUnit(nanos); - double value = (double) nanos / NANOSECONDS.convert(1, unit); - - // Too bad this functionality is not exposed as a regular method call - return String.format("%.4g %s", value, abbreviate(unit)); - } - - private static TimeUnit chooseUnit(long nanos) { - if (SECONDS.convert(nanos, NANOSECONDS) > 0) { - return SECONDS; - } - if (MILLISECONDS.convert(nanos, NANOSECONDS) > 0) { - return MILLISECONDS; - } - if (MICROSECONDS.convert(nanos, NANOSECONDS) > 0) { - return MICROSECONDS; - } - return NANOSECONDS; - } - - private static String abbreviate(TimeUnit unit) { - switch (unit) { - case NANOSECONDS: - return "ns"; - case MICROSECONDS: - return "\u03bcs"; // μs - case MILLISECONDS: - return "ms"; - case SECONDS: - return "s"; - default: - throw new AssertionError(); - } - } -} \ No newline at end of file diff --git a/asynchbase/src/test/java/com/google/common/io/Closeables.java b/asynchbase/src/test/java/com/google/common/io/Closeables.java deleted file mode 100644 index 4a92c9c098..0000000000 --- a/asynchbase/src/test/java/com/google/common/io/Closeables.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (C) 2007 The Guava Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.common.io; - -import com.google.common.annotations.Beta; -import com.google.common.annotations.VisibleForTesting; - -import java.io.Closeable; -import java.io.IOException; -import java.util.logging.Level; -import java.util.logging.Logger; - -import javax.annotation.Nullable; - -/** - * Utility methods for working with {@link Closeable} objects. - * - * @author Michael Lancaster - * @since 1.0 - */ -@Beta -public final class Closeables { - @VisibleForTesting static final Logger logger - = Logger.getLogger(Closeables.class.getName()); - - private Closeables() {} - - /** - * Closes a {@link Closeable}, with control over whether an - * {@code IOException} may be thrown. This is primarily useful in a - * finally block, where a thrown exception needs to be logged but not - * propagated (otherwise the original exception will be lost). - * - *

If {@code swallowIOException} is true then we never throw - * {@code IOException} but merely log it. - * - *

Example: - * - *

public void useStreamNicely() throws IOException {
-   * SomeStream stream = new SomeStream("foo");
-   * boolean threw = true;
-   * try {
-   *   // Some code which does something with the Stream. May throw a
-   *   // Throwable.
-   *   threw = false; // No throwable thrown.
-   * } finally {
-   *   // Close the stream.
-   *   // If an exception occurs, only rethrow it if (threw==false).
-   *   Closeables.close(stream, threw);
-   * }
-   * 
- * - * @param closeable the {@code Closeable} object to be closed, or null, - * in which case this method does nothing - * @param swallowIOException if true, don't propagate IO exceptions - * thrown by the {@code close} methods - * @throws IOException if {@code swallowIOException} is false and - * {@code close} throws an {@code IOException}. - */ - public static void close(@Nullable Closeable closeable, - boolean swallowIOException) throws IOException { - if (closeable == null) { - return; - } - try { - closeable.close(); - } catch (IOException e) { - if (swallowIOException) { - logger.log(Level.WARNING, - "IOException thrown while closing Closeable.", e); - } else { - throw e; - } - } - } - - /** - * Equivalent to calling {@code close(closeable, true)}, but with no - * IOException in the signature. - * @param closeable the {@code Closeable} object to be closed, or null, in - * which case this method does nothing - */ - public static void closeQuietly(@Nullable Closeable closeable) { - try { - close(closeable, true); - } catch (IOException e) { - logger.log(Level.SEVERE, "IOException should not have been thrown.", e); - } - } -} \ No newline at end of file diff --git a/asynchbase/src/test/java/com/google/common/io/LimitInputStream.java b/asynchbase/src/test/java/com/google/common/io/LimitInputStream.java deleted file mode 100644 index a529f5e127..0000000000 --- a/asynchbase/src/test/java/com/google/common/io/LimitInputStream.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (C) 2007 The Guava Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.common.io; - -import com.google.common.annotations.Beta; -import com.google.common.base.Preconditions; - -import java.io.FilterInputStream; -import java.io.IOException; -import java.io.InputStream; - -/** - * An InputStream that limits the number of bytes which can be read. - * - * @author Charles Fry - * @since 1.0 - */ -@Beta -public final class LimitInputStream extends FilterInputStream { - - private long left; - private long mark = -1; - - /** - * Wraps another input stream, limiting the number of bytes which can be read. - * - * @param in the input stream to be wrapped - * @param limit the maximum number of bytes to be read - */ - public LimitInputStream(InputStream in, long limit) { - super(in); - Preconditions.checkNotNull(in); - Preconditions.checkArgument(limit >= 0, "limit must be non-negative"); - left = limit; - } - - @Override public int available() throws IOException { - return (int) Math.min(in.available(), left); - } - - @Override public synchronized void mark(int readlimit) { - in.mark(readlimit); - mark = left; - // it's okay to mark even if mark isn't supported, as reset won't work - } - - @Override public int read() throws IOException { - if (left == 0) { - return -1; - } - - int result = in.read(); - if (result != -1) { - --left; - } - return result; - } - - @Override public int read(byte[] b, int off, int len) throws IOException { - if (left == 0) { - return -1; - } - - len = (int) Math.min(len, left); - int result = in.read(b, off, len); - if (result != -1) { - left -= result; - } - return result; - } - - @Override public synchronized void reset() throws IOException { - if (!in.markSupported()) { - throw new IOException("Mark not supported"); - } - if (mark == -1) { - throw new IOException("Mark not set"); - } - - in.reset(); - left = mark; - } - - @Override public long skip(long n) throws IOException { - n = Math.min(n, left); - long skipped = in.skip(n); - left -= skipped; - return skipped; - } -} \ No newline at end of file diff --git a/bin/bindings.properties b/bin/bindings.properties index d11744fb36..2f7e5a6cdd 100755 --- a/bin/bindings.properties +++ b/bin/bindings.properties @@ -48,11 +48,8 @@ foundationdb:site.ycsb.db.foundationdb.FoundationDBClient geode:site.ycsb.db.GeodeClient googlebigtable:site.ycsb.db.GoogleBigtableClient googledatastore:site.ycsb.db.GoogleDatastoreClient -hbase098:site.ycsb.db.HBaseClient -hbase10:site.ycsb.db.HBaseClient10 -hbase12:site.ycsb.db.hbase12.HBaseClient12 hbase14:site.ycsb.db.hbase14.HBaseClient14 -hbase20:site.ycsb.db.hbase20.HBaseClient20 +hbase22:site.ycsb.db.hbase22.HBaseClient22 hypertable:site.ycsb.db.HypertableClient ignite:site.ycsb.db.ignite.IgniteClient ignite-sql:site.ycsb.db.ignite.IgniteSqlClient diff --git a/bin/ycsb b/bin/ycsb index a1734e398d..349a96313f 100755 --- a/bin/ycsb +++ b/bin/ycsb @@ -75,11 +75,8 @@ DATABASES = { "googlebigtable" : "site.ycsb.db.GoogleBigtableClient", "googledatastore" : "site.ycsb.db.GoogleDatastoreClient", "griddb" : "site.ycsb.db.griddb.GridDBClient", - "hbase098" : "site.ycsb.db.HBaseClient", - "hbase10" : "site.ycsb.db.HBaseClient10", - "hbase12" : "site.ycsb.db.hbase12.HBaseClient12", "hbase14" : "site.ycsb.db.hbase14.HBaseClient14", - "hbase20" : "site.ycsb.db.hbase20.HBaseClient20", + "hbase22" : "site.ycsb.db.hbase22.HBaseClient22", "hypertable" : "site.ycsb.db.HypertableClient", "ignite" : "site.ycsb.db.ignite.IgniteClient", "ignite-sql" : "site.ycsb.db.ignite.IgniteSqlClient", @@ -270,16 +267,6 @@ def main(): warn("The 'couchbase' client has been deprecated. If you are using " "Couchbase 4.0+ try using the 'couchbase2' client instead.") - if binding == "hbase098": - warn("The 'hbase098' client has been deprecated because HBase 0.98 " - "is EOM. If you are using HBase 1.2+ try using the 'hbase12' " - "client instead.") - - if binding == "hbase10": - warn("The 'hbase10' client has been deprecated because HBase 1.0 " - "is EOM. If you are using HBase 1.2+ try using the 'hbase12' " - "client instead.") - if binding == "arangodb3": warn("The 'arangodb3' client has been deprecated. The binding 'arangodb' " "now covers every ArangoDB version. This alias will be removed " diff --git a/bin/ycsb.bat b/bin/ycsb.bat index a53ccb2388..323f37a832 100755 --- a/bin/ycsb.bat +++ b/bin/ycsb.bat @@ -206,16 +206,6 @@ IF NOT "%BINDING_DIR%" == "couchbase" GOTO notOldCouchbase echo [WARN] The 'couchbase' client is deprecated. If you are using Couchbase 4.0+ try using the 'couchbase2' client instead. :notOldCouchbase -@REM HBase 0.98 deprecation message -IF NOT "%BINDING_DIR%" == "hbase098" GOTO not098HBase -echo [WARN] The 'hbase098' client is deprecated because HBase 0.98 is EOM. If you are using HBase 1.2+ try using the 'hbase12' client instead. -:not098HBase - -@REM HBase 1.0 deprecation message -IF NOT "%BINDING_DIR%" == "hbase10" GOTO not10HBase -echo [WARN] The 'hbase10' client is deprecated because HBase 1.0 is EOM. If you are using HBase 1.2+ try using the 'hbase12' client instead. -:not10HBase - @REM Get the rest of the arguments, skipping the first 2 FOR /F "tokens=2*" %%G IN ("%*") DO ( SET YCSB_ARGS=%%H diff --git a/bin/ycsb.sh b/bin/ycsb.sh index 01b4e04685..05f432e759 100755 --- a/bin/ycsb.sh +++ b/bin/ycsb.sh @@ -232,20 +232,6 @@ if [ "${BINDING_DIR}" = "couchbase" ] ; then Couchbase 4.0+ try using the 'couchbase2' client instead." fi -# HBase 0.98 deprecation message -if [ "${BINDING_DIR}" = "hbase098" ] ; then - echo "[WARN] The 'hbase098' client is deprecated because HBase 0.98 \ -is EOM. If you are using HBase 1.2+ try using the 'hbase12' client \ -instead." -fi - -# HBase 1.0 deprecation message -if [ "${BINDING_DIR}" = "hbase10" ] ; then - echo "[WARN] The 'hbase10' client is deprecated because HBase 1.0 \ -is EOM. If you are using HBase 1.2+ try using the 'hbase12' client \ -instead." -fi - # For Cygwin, switch paths to Windows format before running java if $CYGWIN; then [ -n "$JAVA_HOME" ] && JAVA_HOME=$(cygpath --unix "$JAVA_HOME") diff --git a/distribution/pom.xml b/distribution/pom.xml index b477f9032e..ad62f8731d 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -134,21 +134,6 @@ LICENSE file. griddb-binding ${project.version} - - site.ycsb - hbase098-binding - ${project.version} - - - site.ycsb - hbase10-binding - ${project.version} - - - site.ycsb - hbase12-binding - ${project.version} - site.ycsb hbase14-binding @@ -156,7 +141,7 @@ LICENSE file. site.ycsb - hbase20-binding + hbase22-binding ${project.version} diff --git a/googlebigtable/README.md b/googlebigtable/README.md index 81b6cf484a..d33a379d62 100644 --- a/googlebigtable/README.md +++ b/googlebigtable/README.md @@ -17,7 +17,7 @@ LICENSE file. # Google Bigtable Driver for YCSB -This driver provides a YCSB workload binding for Google's hosted Bigtable, the inspiration for a number of key-value stores like HBase and Cassandra. The Bigtable Java client provides both Protobuf based GRPC and HBase client APIs. This binding implements the Protobuf API for testing the native client. To test Bigtable using the HBase API, see the `hbase10` binding. +This driver provides a YCSB workload binding for Google's hosted Bigtable, the inspiration for a number of key-value stores like HBase and Cassandra. The Bigtable Java client provides both Protobuf based GRPC and HBase client APIs. This binding implements the Protobuf API for testing the native client. To test Bigtable using the HBase API, see the `hbase14` binding. ## Quickstart diff --git a/hbase098/pom.xml b/hbase098/pom.xml deleted file mode 100644 index b35c0beb5a..0000000000 --- a/hbase098/pom.xml +++ /dev/null @@ -1,50 +0,0 @@ - - - - - 4.0.0 - - site.ycsb - binding-parent - 0.18.0-SNAPSHOT - ../binding-parent/ - - - hbase098-binding - HBase 0.98.x DB Binding - - - - org.apache.hbase - hbase-client - ${hbase098.version} - - - jdk.tools - jdk.tools - - - - - site.ycsb - core - ${project.version} - provided - - - diff --git a/hbase098/src/main/java/site/ycsb/db/HBaseClient.java b/hbase098/src/main/java/site/ycsb/db/HBaseClient.java deleted file mode 100644 index 0ce20b9065..0000000000 --- a/hbase098/src/main/java/site/ycsb/db/HBaseClient.java +++ /dev/null @@ -1,483 +0,0 @@ -/** - * Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you - * may not use this file except in compliance with the License. You - * may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - * implied. See the License for the specific language governing - * permissions and limitations under the License. See accompanying - * LICENSE file. - */ - -package site.ycsb.db; - -import site.ycsb.ByteArrayByteIterator; -import site.ycsb.ByteIterator; -import site.ycsb.DBException; -import site.ycsb.Status; -import site.ycsb.measurements.Measurements; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.client.*; -import org.apache.hadoop.hbase.filter.PageFilter; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.security.UserGroupInformation; - -import java.io.IOException; -import java.util.*; -import java.util.concurrent.atomic.AtomicInteger; - -import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY; -import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT; - -/** - * HBase client for YCSB framework. - */ -public class HBaseClient extends site.ycsb.DB { - private static final Configuration CONFIG = HBaseConfiguration.create(); - private static final AtomicInteger THREAD_COUNT = new AtomicInteger(0); - - private boolean debug = false; - - private String tableName = ""; - private static HConnection hConn = null; - private HTableInterface hTable = null; - private String columnFamily = ""; - private byte[] columnFamilyBytes; - private boolean clientSideBuffering = false; - private long writeBufferSize = 1024 * 1024 * 12; - /** - * Whether or not a page filter should be used to limit scan length. - */ - private boolean usePageFilter = true; - - private static final Object TABLE_LOCK = new Object(); - - /** - * Initialize any state for this DB. - * Called once per DB instance; there is one DB instance per client thread. - */ - public void init() throws DBException { - if ((getProperties().getProperty("debug") != null) && - (getProperties().getProperty("debug").compareTo("true") == 0)) { - debug = true; - } - - if (getProperties().containsKey("clientbuffering")) { - clientSideBuffering = Boolean.parseBoolean(getProperties().getProperty("clientbuffering")); - } - if (getProperties().containsKey("writebuffersize")) { - writeBufferSize = Long.parseLong(getProperties().getProperty("writebuffersize")); - } - if ("false".equals(getProperties().getProperty("hbase.usepagefilter", "true"))) { - usePageFilter = false; - } - if ("kerberos".equalsIgnoreCase(CONFIG.get("hbase.security.authentication"))) { - CONFIG.set("hadoop.security.authentication", "Kerberos"); - UserGroupInformation.setConfiguration(CONFIG); - } - if ((getProperties().getProperty("principal") != null) && (getProperties().getProperty("keytab") != null)) { - try { - UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"), - getProperties().getProperty("keytab")); - } catch (IOException e) { - System.err.println("Keytab file is not readable or not found"); - throw new DBException(e); - } - } - try { - THREAD_COUNT.getAndIncrement(); - synchronized (THREAD_COUNT) { - if (hConn == null) { - hConn = HConnectionManager.createConnection(CONFIG); - } - } - } catch (IOException e) { - System.err.println("Connection to HBase was not successful"); - throw new DBException(e); - } - columnFamily = getProperties().getProperty("columnfamily"); - if (columnFamily == null) { - System.err.println("Error, must specify a columnfamily for HBase tableName"); - throw new DBException("No columnfamily specified"); - } - columnFamilyBytes = Bytes.toBytes(columnFamily); - - // Terminate right now if tableName does not exist, since the client - // will not propagate this error upstream once the workload - // starts. - String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); - try { - HTableInterface ht = hConn.getTable(table); - ht.getTableDescriptor(); - } catch (IOException e) { - throw new DBException(e); - } - } - - /** - * Cleanup any state for this DB. - * Called once per DB instance; there is one DB instance per client thread. - */ - public void cleanup() throws DBException { - // Get the measurements instance as this is the only client that should - // count clean up time like an update since autoflush is off. - Measurements measurements = Measurements.getMeasurements(); - try { - long st = System.nanoTime(); - if (hTable != null) { - hTable.flushCommits(); - } - synchronized (THREAD_COUNT) { - int threadCount = THREAD_COUNT.decrementAndGet(); - if (threadCount <= 0 && hConn != null) { - hConn.close(); - } - } - long en = System.nanoTime(); - measurements.measure("UPDATE", (int) ((en - st) / 1000)); - } catch (IOException e) { - throw new DBException(e); - } - } - - private void getHTable(String table) throws IOException { - synchronized (TABLE_LOCK) { - hTable = hConn.getTable(table); - //2 suggestions from http://ryantwopointoh.blogspot.com/2009/01/performance-of-hbase-importing.html - hTable.setAutoFlush(!clientSideBuffering, true); - hTable.setWriteBufferSize(writeBufferSize); - //return hTable; - } - - } - - /** - * Read a record from the database. Each field/value pair from the result will be stored in a HashMap. - * - * @param table The name of the tableName - * @param key The record key of the record to read. - * @param fields The list of fields to read, or null for all of them - * @param result A HashMap of field/value pairs for the result - * @return Zero on success, a non-zero error code on error - */ - public Status read(String table, String key, Set fields, Map result) { - //if this is a "new" tableName, init HTable object. Else, use existing one - if (!this.tableName.equals(table)) { - hTable = null; - try { - getHTable(table); - this.tableName = table; - } catch (IOException e) { - System.err.println("Error accessing HBase tableName: " + e); - return Status.ERROR; - } - } - - Result r; - try { - if (debug) { - System.out.println("Doing read from HBase columnfamily " + columnFamily); - System.out.println("Doing read for key: " + key); - } - Get g = new Get(Bytes.toBytes(key)); - if (fields == null) { - g.addFamily(columnFamilyBytes); - } else { - for (String field : fields) { - g.addColumn(columnFamilyBytes, Bytes.toBytes(field)); - } - } - r = hTable.get(g); - } catch (IOException e) { - System.err.println("Error doing get: " + e); - return Status.ERROR; - } catch (ConcurrentModificationException e) { - //do nothing for now...need to understand HBase concurrency model better - return Status.ERROR; - } - - for (KeyValue kv : r.raw()) { - result.put( - Bytes.toString(kv.getQualifier()), - new ByteArrayByteIterator(kv.getValue())); - if (debug) { - System.out.println("Result for field: " + Bytes.toString(kv.getQualifier()) + - " is: " + Bytes.toString(kv.getValue())); - } - - } - return Status.OK; - } - - /** - * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored - * in a HashMap. - * - * @param table The name of the tableName - * @param startkey The record key of the first record to read. - * @param recordcount The number of records to read - * @param fields The list of fields to read, or null for all of them - * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record - * @return Zero on success, a non-zero error code on error - */ - public Status scan(String table, String startkey, int recordcount, Set fields, - Vector> result) { - //if this is a "new" tableName, init HTable object. Else, use existing one - if (!this.tableName.equals(table)) { - hTable = null; - try { - getHTable(table); - this.tableName = table; - } catch (IOException e) { - System.err.println("Error accessing HBase tableName: " + e); - return Status.ERROR; - } - } - - Scan s = new Scan(Bytes.toBytes(startkey)); - //HBase has no record limit. Here, assume recordcount is small enough to bring back in one call. - //We get back recordcount records - s.setCaching(recordcount); - if (this.usePageFilter) { - s.setFilter(new PageFilter(recordcount)); - } - - //add specified fields or else all fields - if (fields == null) { - s.addFamily(columnFamilyBytes); - } else { - for (String field : fields) { - s.addColumn(columnFamilyBytes, Bytes.toBytes(field)); - } - } - - //get results - try (ResultScanner scanner = hTable.getScanner(s)) { - int numResults = 0; - for (Result rr = scanner.next(); rr != null; rr = scanner.next()) { - //get row key - String key = Bytes.toString(rr.getRow()); - if (debug) { - System.out.println("Got scan result for key: " + key); - } - - HashMap rowResult = new HashMap<>(); - - for (KeyValue kv : rr.raw()) { - rowResult.put( - Bytes.toString(kv.getQualifier()), - new ByteArrayByteIterator(kv.getValue())); - } - //add rowResult to result vector - result.add(rowResult); - numResults++; - - // PageFilter does not guarantee that the number of results is <= pageSize, so this - // break is required. - //if hit recordcount, bail out - if (numResults >= recordcount) { - break; - } - } //done with row - - } catch (IOException e) { - if (debug) { - System.out.println("Error in getting/parsing scan result: " + e); - } - return Status.ERROR; - } - - return Status.OK; - } - - /** - * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the - * record with the specified record key, overwriting any existing values with the same field name. - * - * @param table The name of the tableName - * @param key The record key of the record to write - * @param values A HashMap of field/value pairs to update in the record - * @return Zero on success, a non-zero error code on error - */ - public Status update(String table, String key, Map values) { - //if this is a "new" tableName, init HTable object. Else, use existing one - if (!this.tableName.equals(table)) { - hTable = null; - try { - getHTable(table); - this.tableName = table; - } catch (IOException e) { - System.err.println("Error accessing HBase tableName: " + e); - return Status.ERROR; - } - } - - - if (debug) { - System.out.println("Setting up put for key: " + key); - } - Put p = new Put(Bytes.toBytes(key)); - for (Map.Entry entry : values.entrySet()) { - byte[] value = entry.getValue().toArray(); - if (debug) { - System.out.println("Adding field/value " + entry.getKey() + "/" + - Bytes.toStringBinary(value) + " to put request"); - } - p.add(columnFamilyBytes, Bytes.toBytes(entry.getKey()), value); - } - - try { - hTable.put(p); - } catch (IOException e) { - if (debug) { - System.err.println("Error doing put: " + e); - } - return Status.ERROR; - } catch (ConcurrentModificationException e) { - //do nothing for now...hope this is rare - return Status.ERROR; - } - - return Status.OK; - } - - /** - * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the - * record with the specified record key. - * - * @param table The name of the tableName - * @param key The record key of the record to insert. - * @param values A HashMap of field/value pairs to insert in the record - * @return Zero on success, a non-zero error code on error - */ - public Status insert(String table, String key, Map values) { - return update(table, key, values); - } - - /** - * Delete a record from the database. - * - * @param table The name of the tableName - * @param key The record key of the record to delete. - * @return Zero on success, a non-zero error code on error - */ - public Status delete(String table, String key) { - //if this is a "new" tableName, init HTable object. Else, use existing one - if (!this.tableName.equals(table)) { - hTable = null; - try { - getHTable(table); - this.tableName = table; - } catch (IOException e) { - System.err.println("Error accessing HBase tableName: " + e); - return Status.ERROR; - } - } - - if (debug) { - System.out.println("Doing delete for key: " + key); - } - - Delete d = new Delete(Bytes.toBytes(key)); - try { - hTable.delete(d); - } catch (IOException e) { - if (debug) { - System.err.println("Error doing delete: " + e); - } - return Status.ERROR; - } - - return Status.OK; - } - - public static void main(String[] args) { - if (args.length != 3) { - System.out.println("Please specify a threadcount, columnfamily and operation count"); - System.exit(0); - } - - final int keyspace = 10000; //120000000; - - final int threadcount = Integer.parseInt(args[0]); - - final String columnfamily = args[1]; - - - final int opcount = Integer.parseInt(args[2]) / threadcount; - - Vector allthreads = new Vector<>(); - - for (int i = 0; i < threadcount; i++) { - Thread t = new Thread() { - public void run() { - try { - Random random = new Random(); - - HBaseClient cli = new HBaseClient(); - - Properties props = new Properties(); - props.setProperty("columnfamily", columnfamily); - props.setProperty("debug", "true"); - cli.setProperties(props); - - cli.init(); - - long accum = 0; - - for (int i = 0; i < opcount; i++) { - int keynum = random.nextInt(keyspace); - String key = "user" + keynum; - long st = System.currentTimeMillis(); - Status result; - Vector> scanResults = new Vector<>(); - Set scanFields = new HashSet(); - result = cli.scan("table1", "user2", 20, null, scanResults); - - long en = System.currentTimeMillis(); - - accum += (en - st); - - if (!result.equals(Status.OK)) { - System.out.println("Error " + result + " for " + key); - } - - if (i % 10 == 0) { - System.out.println(i + " operations, average latency: " + (((double) accum) / ((double) i))); - } - } - } catch (Exception e) { - e.printStackTrace(); - } - } - }; - allthreads.add(t); - } - - long st = System.currentTimeMillis(); - for (Thread t : allthreads) { - t.start(); - } - - for (Thread t : allthreads) { - try { - t.join(); - } catch (InterruptedException ignored) { - System.err.println("interrupted"); - Thread.currentThread().interrupt(); - } - } - long en = System.currentTimeMillis(); - - System.out.println("Throughput: " + ((1000.0) * (((double) (opcount * threadcount)) / ((double) (en - st)))) - + " ops/sec"); - } -} diff --git a/hbase098/src/main/java/site/ycsb/db/package-info.java b/hbase098/src/main/java/site/ycsb/db/package-info.java deleted file mode 100644 index 0ba2b55f39..0000000000 --- a/hbase098/src/main/java/site/ycsb/db/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (c) 2017, Yahoo!, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. See accompanying LICENSE file. - */ - -/** - * The YCSB binding for HBase - * 0.98.X. - */ -package site.ycsb.db; diff --git a/hbase10/README.md b/hbase10/README.md deleted file mode 100644 index 8fdc7e58ad..0000000000 --- a/hbase10/README.md +++ /dev/null @@ -1,110 +0,0 @@ - - -# HBase (1.0.x) Driver for YCSB -This driver is a binding for the YCSB facilities to operate against a HBase 1.0.x Server cluster or Google's hosted Bigtable. -To run against an HBase 0.98.x cluster, use the `hbase098` binding. - -See `hbase098/README.md` for a quickstart to setup HBase for load testing and common configuration details. - -## Configuration Options -In addition to those options available for the `hbase098` binding, the following options are available for the `hbase10` binding: - -* `durability`: Whether or not writes should be appended to the WAL. Bypassing the WAL can improve throughput but data cannot be recovered in the event of a crash. The default is true. - -## Bigtable - -Google's Bigtable service provides an implementation of the HBase API for migrating existing applications. Users can perform load tests against Bigtable using this binding. - -### 1. Setup a Bigtable Cluster - -Login to the Google Cloud Console and follow the [Creating Cluster](https://cloud.google.com/bigtable/docs/creating-cluster) steps. Make a note of your cluster name, zone and project ID. - -### 2. Launch the Bigtable Shell - -From the Cloud Console, launch a shell and follow the [Quickstart](https://cloud.google.com/bigtable/docs/quickstart) up to step 4 where you launch the HBase shell. - -### 3. Create a Table - -For best results, use the pre-splitting strategy recommended in [HBASE-4163](https://issues.apache.org/jira/browse/HBASE-4163): - -``` -hbase(main):001:0> n_splits = 200 # HBase recommends (10 * number of regionservers) -hbase(main):002:0> create 'usertable', 'cf', {SPLITS => (1..n_splits).map {|i| "user#{1000+i*(9999-1000)/n_splits}"}} -``` - -Make a note of the column family, in this example it's `cf``. - -### 4. Download the Bigtable Client Jar with required dependencies: - -``` -mvn -N dependency:copy -Dartifact=com.google.cloud.bigtable:bigtable-hbase-1.x-hadoop:1.0.0 -DoutputDirectory=target/bigtable-deps -mvn -N dependency:copy -Dartifact=io.dropwizard.metrics:metrics-core:3.1.2 -DoutputDirectory=target/bigtable-deps -``` - -Download the latest `bigtable-hbase-1.x-hadoop` jar from [Maven](http://search.maven.org/#search%7Cgav%7C1%7Cg%3A%22com.google.cloud.bigtable%22%20AND%20a%3A%22bigtable-hbase-1.x-hadoop%22) to your host. - -### 5. Download JSON Credentials - -Follow these instructions for [Generating a JSON key](https://cloud.google.com/bigtable/docs/installing-hbase-shell#service-account) and save it to your host. - -### 6. Create or Edit hbase-site.xml - -If you have an existing HBase configuration directory with an `hbase-site.xml` file, edit the file as per below. If not, create a directory called `conf` under the `hbase10` directory. Create a file in the conf directory named `hbase-site.xml`. Provide the following settings in the XML file, making sure to replace the bracketed examples with the proper values from your Cloud console. - -``` - - - hbase.client.connection.impl - com.google.cloud.bigtable.hbase1_x.BigtableConnection - - - google.bigtable.project.id - [YOUR-PROJECT-ID] - - - google.bigtable.instance.id - [YOUR-INSTANCE-ID] - - - google.bigtable.auth.service.account.enable - true - - - google.bigtable.auth.json.keyfile - [PATH-TO-YOUR-KEY-FILE] - - -``` - -If you have an existing HBase config directory, make sure to add it to the class path via `-cp :`. - -### 7. Execute a Workload - -Switch to the root of the YCSB repo and choose the workload you want to run and `load` it first. With the CLI you must provide the column family, cluster properties and the ALPN jar to load. - -``` -bin/ycsb load hbase10 -p columnfamily=cf -cp 'target/bigtable-deps/*' -P workloads/workloada - -``` - -The `load` step only executes inserts into the datastore. After loading data, run the same workload to mix reads with writes. - -``` -bin/ycsb run hbase10 -p columnfamily=cf -cp 'target/bigtable-deps/* -P workloads/workloada - -``` diff --git a/hbase10/src/test/java/site/ycsb/db/HBaseClient10Test.java b/hbase10/src/test/java/site/ycsb/db/HBaseClient10Test.java deleted file mode 100644 index ebfcc27355..0000000000 --- a/hbase10/src/test/java/site/ycsb/db/HBaseClient10Test.java +++ /dev/null @@ -1,214 +0,0 @@ -/** - * Licensed under the Apache License, Version 2.0 (the "License"); you - * may not use this file except in compliance with the License. You - * may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - * implied. See the License for the specific language governing - * permissions and limitations under the License. See accompanying - * LICENSE file. - */ - -package site.ycsb.db; - -import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY; -import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.junit.Assume.assumeTrue; - -import site.ycsb.ByteIterator; -import site.ycsb.Status; -import site.ycsb.StringByteIterator; -import site.ycsb.measurements.Measurements; -import site.ycsb.workloads.CoreWorkload; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Test; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Map; -import java.util.List; -import java.util.Properties; -import java.util.Vector; - -/** - * Integration tests for the YCSB HBase client 1.0, using an HBase minicluster. - */ -public class HBaseClient10Test { - - private final static String COLUMN_FAMILY = "cf"; - - private static HBaseTestingUtility testingUtil; - private HBaseClient10 client; - private Table table = null; - private String tableName; - - private static boolean isWindows() { - final String os = System.getProperty("os.name"); - return os.startsWith("Windows"); - } - - /** - * Creates a mini-cluster for use in these tests. - * - * This is a heavy-weight operation, so invoked only once for the test class. - */ - @BeforeClass - public static void setUpClass() throws Exception { - // Minicluster setup fails on Windows with an UnsatisfiedLinkError. - // Skip if windows. - assumeTrue(!isWindows()); - testingUtil = HBaseTestingUtility.createLocalHTU(); - testingUtil.startMiniCluster(); - } - - /** - * Tears down mini-cluster. - */ - @AfterClass - public static void tearDownClass() throws Exception { - if (testingUtil != null) { - testingUtil.shutdownMiniCluster(); - } - } - - /** - * Sets up the mini-cluster for testing. - * - * We re-create the table for each test. - */ - @Before - public void setUp() throws Exception { - client = new HBaseClient10(); - client.setConfiguration(new Configuration(testingUtil.getConfiguration())); - - Properties p = new Properties(); - p.setProperty("columnfamily", COLUMN_FAMILY); - - Measurements.setProperties(p); - final CoreWorkload workload = new CoreWorkload(); - workload.init(p); - - tableName = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); - table = testingUtil.createTable(TableName.valueOf(tableName), Bytes.toBytes(COLUMN_FAMILY)); - - client.setProperties(p); - client.init(); - } - - @After - public void tearDown() throws Exception { - table.close(); - testingUtil.deleteTable(tableName); - } - - @Test - public void testRead() throws Exception { - final String rowKey = "row1"; - final Put p = new Put(Bytes.toBytes(rowKey)); - p.addColumn(Bytes.toBytes(COLUMN_FAMILY), - Bytes.toBytes("column1"), Bytes.toBytes("value1")); - p.addColumn(Bytes.toBytes(COLUMN_FAMILY), - Bytes.toBytes("column2"), Bytes.toBytes("value2")); - table.put(p); - - final HashMap result = new HashMap(); - final Status status = client.read(tableName, rowKey, null, result); - assertEquals(Status.OK, status); - assertEquals(2, result.size()); - assertEquals("value1", result.get("column1").toString()); - assertEquals("value2", result.get("column2").toString()); - } - - @Test - public void testReadMissingRow() throws Exception { - final HashMap result = new HashMap(); - final Status status = client.read(tableName, "Missing row", null, result); - assertEquals(Status.NOT_FOUND, status); - assertEquals(0, result.size()); - } - - @Test - public void testScan() throws Exception { - // Fill with data - final String colStr = "row_number"; - final byte[] col = Bytes.toBytes(colStr); - final int n = 10; - final List puts = new ArrayList(n); - for(int i = 0; i < n; i++) { - final byte[] key = Bytes.toBytes(String.format("%05d", i)); - final byte[] value = java.nio.ByteBuffer.allocate(4).putInt(i).array(); - final Put p = new Put(key); - p.addColumn(Bytes.toBytes(COLUMN_FAMILY), col, value); - puts.add(p); - } - table.put(puts); - - // Test - final Vector> result = - new Vector>(); - - // Scan 5 records, skipping the first - client.scan(tableName, "00001", 5, null, result); - - assertEquals(5, result.size()); - for(int i = 0; i < 5; i++) { - final Map row = result.get(i); - assertEquals(1, row.size()); - assertTrue(row.containsKey(colStr)); - final byte[] bytes = row.get(colStr).toArray(); - final ByteBuffer buf = ByteBuffer.wrap(bytes); - final int rowNum = buf.getInt(); - assertEquals(i + 1, rowNum); - } - } - - @Test - public void testUpdate() throws Exception{ - final String key = "key"; - final Map input = new HashMap(); - input.put("column1", "value1"); - input.put("column2", "value2"); - final Status status = client.insert(tableName, key, StringByteIterator.getByteIteratorMap(input)); - assertEquals(Status.OK, status); - - // Verify result - final Get get = new Get(Bytes.toBytes(key)); - final Result result = this.table.get(get); - assertFalse(result.isEmpty()); - assertEquals(2, result.size()); - for(final java.util.Map.Entry entry : input.entrySet()) { - assertEquals(entry.getValue(), - new String(result.getValue(Bytes.toBytes(COLUMN_FAMILY), - Bytes.toBytes(entry.getKey())))); - } - } - - @Test - @Ignore("Not yet implemented") - public void testDelete() { - fail("Not yet implemented"); - } -} - diff --git a/hbase12/README.md b/hbase12/README.md deleted file mode 100644 index 74bee71ff1..0000000000 --- a/hbase12/README.md +++ /dev/null @@ -1,27 +0,0 @@ - - -# HBase (1.2+) Driver for YCSB -This driver is a binding for the YCSB facilities to operate against a HBase 1.2+ Server cluster, using a shaded client that tries to avoid leaking third party libraries. - -See `hbase098/README.md` for a quickstart to setup HBase for load testing and common configuration details. - -## Configuration Options -In addition to those options available for the `hbase098` binding, the following options are available for the `hbase12` binding: - -* `durability`: Whether or not writes should be appended to the WAL. Bypassing the WAL can improve throughput but data cannot be recovered in the event of a crash. The default is true. We can set it to flase by option '-p durability=SKIP_WAL'. - diff --git a/hbase12/pom.xml b/hbase12/pom.xml deleted file mode 100644 index f2bea40a25..0000000000 --- a/hbase12/pom.xml +++ /dev/null @@ -1,87 +0,0 @@ - - - - - 4.0.0 - - site.ycsb - binding-parent - 0.18.0-SNAPSHOT - ../binding-parent/ - - - hbase12-binding - HBase 1.2 DB Binding - - - true - true - true - - - true - - - - site.ycsb - hbase10-binding - ${project.version} - - - - org.apache.hbase - hbase-client - - - - - site.ycsb - core - ${project.version} - provided - - - org.apache.hbase - hbase-shaded-client - ${hbase12.version} - - - junit - junit - 4.12 - test - - - - diff --git a/hbase12/src/main/java/site/ycsb/db/hbase12/HBaseClient12.java b/hbase12/src/main/java/site/ycsb/db/hbase12/HBaseClient12.java deleted file mode 100644 index 12ac84e572..0000000000 --- a/hbase12/src/main/java/site/ycsb/db/hbase12/HBaseClient12.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed under the Apache License, Version 2.0 (the "License"); you - * may not use this file except in compliance with the License. You - * may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - * implied. See the License for the specific language governing - * permissions and limitations under the License. See accompanying - * LICENSE file. - */ - -package site.ycsb.db.hbase12; - -/** - * HBase 1.2 client for YCSB framework. - * - * A modified version of HBaseClient (which targets HBase v1.2) utilizing the - * shaded client. - * - * It should run equivalent to following the hbase098 binding README. - * - */ -public class HBaseClient12 extends site.ycsb.db.HBaseClient10 { -} diff --git a/hbase12/src/main/java/site/ycsb/db/hbase12/package-info.java b/hbase12/src/main/java/site/ycsb/db/hbase12/package-info.java deleted file mode 100644 index 1363a52715..0000000000 --- a/hbase12/src/main/java/site/ycsb/db/hbase12/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2014, Yahoo!, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you - * may not use this file except in compliance with the License. You - * may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - * implied. See the License for the specific language governing - * permissions and limitations under the License. See accompanying - * LICENSE file. - */ - -/** - * The YCSB binding for HBase - * using the HBase 1.2+ shaded API. - */ -package site.ycsb.db.hbase12; - diff --git a/hbase12/src/test/java/site/ycsb/db/hbase12/HBaseClient12Test.java b/hbase12/src/test/java/site/ycsb/db/hbase12/HBaseClient12Test.java deleted file mode 100644 index f8bd7c4702..0000000000 --- a/hbase12/src/test/java/site/ycsb/db/hbase12/HBaseClient12Test.java +++ /dev/null @@ -1,213 +0,0 @@ -/** - * Licensed under the Apache License, Version 2.0 (the "License"); you - * may not use this file except in compliance with the License. You - * may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - * implied. See the License for the specific language governing - * permissions and limitations under the License. See accompanying - * LICENSE file. - */ - -package site.ycsb.db.hbase12; - -import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY; -import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.junit.Assume.assumeTrue; - -import site.ycsb.ByteIterator; -import site.ycsb.Status; -import site.ycsb.StringByteIterator; -import site.ycsb.measurements.Measurements; -import site.ycsb.workloads.CoreWorkload; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Test; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Properties; -import java.util.Vector; - -/** - * Integration tests for the YCSB HBase client 1.2, using an HBase minicluster. - */ -public class HBaseClient12Test { - - private final static String COLUMN_FAMILY = "cf"; - - private static HBaseTestingUtility testingUtil; - private HBaseClient12 client; - private Table table = null; - private String tableName; - - private static boolean isWindows() { - final String os = System.getProperty("os.name"); - return os.startsWith("Windows"); - } - - /** - * Creates a mini-cluster for use in these tests. - * - * This is a heavy-weight operation, so invoked only once for the test class. - */ - @BeforeClass - public static void setUpClass() throws Exception { - // Minicluster setup fails on Windows with an UnsatisfiedLinkError. - // Skip if windows. - assumeTrue(!isWindows()); - testingUtil = HBaseTestingUtility.createLocalHTU(); - testingUtil.startMiniCluster(); - } - - /** - * Tears down mini-cluster. - */ - @AfterClass - public static void tearDownClass() throws Exception { - if (testingUtil != null) { - testingUtil.shutdownMiniCluster(); - } - } - - /** - * Sets up the mini-cluster for testing. - * - * We re-create the table for each test. - */ - @Before - public void setUp() throws Exception { - client = new HBaseClient12(); - client.setConfiguration(new Configuration(testingUtil.getConfiguration())); - - Properties p = new Properties(); - p.setProperty("columnfamily", COLUMN_FAMILY); - - Measurements.setProperties(p); - final CoreWorkload workload = new CoreWorkload(); - workload.init(p); - - tableName = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); - table = testingUtil.createTable(TableName.valueOf(tableName), Bytes.toBytes(COLUMN_FAMILY)); - - client.setProperties(p); - client.init(); - } - - @After - public void tearDown() throws Exception { - table.close(); - testingUtil.deleteTable(tableName); - } - - @Test - public void testRead() throws Exception { - final String rowKey = "row1"; - final Put p = new Put(Bytes.toBytes(rowKey)); - p.addColumn(Bytes.toBytes(COLUMN_FAMILY), - Bytes.toBytes("column1"), Bytes.toBytes("value1")); - p.addColumn(Bytes.toBytes(COLUMN_FAMILY), - Bytes.toBytes("column2"), Bytes.toBytes("value2")); - table.put(p); - - final HashMap result = new HashMap(); - final Status status = client.read(tableName, rowKey, null, result); - assertEquals(Status.OK, status); - assertEquals(2, result.size()); - assertEquals("value1", result.get("column1").toString()); - assertEquals("value2", result.get("column2").toString()); - } - - @Test - public void testReadMissingRow() throws Exception { - final HashMap result = new HashMap(); - final Status status = client.read(tableName, "Missing row", null, result); - assertEquals(Status.NOT_FOUND, status); - assertEquals(0, result.size()); - } - - @Test - public void testScan() throws Exception { - // Fill with data - final String colStr = "row_number"; - final byte[] col = Bytes.toBytes(colStr); - final int n = 10; - final List puts = new ArrayList(n); - for(int i = 0; i < n; i++) { - final byte[] key = Bytes.toBytes(String.format("%05d", i)); - final byte[] value = java.nio.ByteBuffer.allocate(4).putInt(i).array(); - final Put p = new Put(key); - p.addColumn(Bytes.toBytes(COLUMN_FAMILY), col, value); - puts.add(p); - } - table.put(puts); - - // Test - final Vector> result = - new Vector>(); - - // Scan 5 records, skipping the first - client.scan(tableName, "00001", 5, null, result); - - assertEquals(5, result.size()); - for(int i = 0; i < 5; i++) { - final HashMap row = result.get(i); - assertEquals(1, row.size()); - assertTrue(row.containsKey(colStr)); - final byte[] bytes = row.get(colStr).toArray(); - final ByteBuffer buf = ByteBuffer.wrap(bytes); - final int rowNum = buf.getInt(); - assertEquals(i + 1, rowNum); - } - } - - @Test - public void testUpdate() throws Exception{ - final String key = "key"; - final HashMap input = new HashMap(); - input.put("column1", "value1"); - input.put("column2", "value2"); - final Status status = client.insert(tableName, key, StringByteIterator.getByteIteratorMap(input)); - assertEquals(Status.OK, status); - - // Verify result - final Get get = new Get(Bytes.toBytes(key)); - final Result result = this.table.get(get); - assertFalse(result.isEmpty()); - assertEquals(2, result.size()); - for(final java.util.Map.Entry entry : input.entrySet()) { - assertEquals(entry.getValue(), - new String(result.getValue(Bytes.toBytes(COLUMN_FAMILY), - Bytes.toBytes(entry.getKey())))); - } - } - - @Test - @Ignore("Not yet implemented") - public void testDelete() { - fail("Not yet implemented"); - } -} - diff --git a/hbase12/src/test/resources/hbase-site.xml b/hbase12/src/test/resources/hbase-site.xml deleted file mode 100644 index a8b29e451f..0000000000 --- a/hbase12/src/test/resources/hbase-site.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - hbase.master.info.port - -1 - The port for the hbase master web UI - Set to -1 if you do not want the info server to run. - - - - hbase.regionserver.info.port - -1 - The port for the hbase regionserver web UI - Set to -1 if you do not want the info server to run. - - - diff --git a/hbase12/src/test/resources/log4j.properties b/hbase12/src/test/resources/log4j.properties deleted file mode 100644 index a9df32e044..0000000000 --- a/hbase12/src/test/resources/log4j.properties +++ /dev/null @@ -1,28 +0,0 @@ -# -# Copyright (c) 2015 YCSB contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. See accompanying -# LICENSE file. -# - -# Root logger option -log4j.rootLogger=WARN, stderr - -log4j.appender.stderr=org.apache.log4j.ConsoleAppender -log4j.appender.stderr.target=System.err -log4j.appender.stderr.layout=org.apache.log4j.PatternLayout -log4j.appender.stderr.layout.conversionPattern=%d{yyyy/MM/dd HH:mm:ss} %-5p %c %x - %m%n - -# Suppress messages from ZKTableStateManager: Creates a large number of table -# state change messages. -log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKTableStateManager=ERROR diff --git a/hbase14/README.md b/hbase14/README.md index 01ab0ed2b7..271cdce5c3 100644 --- a/hbase14/README.md +++ b/hbase14/README.md @@ -18,10 +18,142 @@ LICENSE file. # HBase (1.4+) Driver for YCSB This driver is a binding for the YCSB facilities to operate against a HBase 1.4+ Server cluster, using a shaded client that tries to avoid leaking third party libraries. -See `hbase098/README.md` for a quickstart to setup HBase for load testing and common configuration details. +# Testing HBase +## 1. Start a HBase Server +You need to start a single node or a cluster to point the client at. Please see [Apache HBase Reference Guide](http://hbase.apache.org/book.html) for more details and instructions. + +## 2. Set up YCSB + +Download the [latest YCSB](https://github.com/brianfrankcooper/YCSB/releases/latest) file. Follow the instructions. + +## 3. Create a HBase table for testing + +For best results, use the pre-splitting strategy recommended in [HBASE-4163](https://issues.apache.org/jira/browse/HBASE-4163): + +``` +hbase(main):001:0> n_splits = 200 # HBase recommends (10 * number of regionservers) +hbase(main):002:0> create 'usertable', 'family', {SPLITS => (1..n_splits).map {|i| "user#{1000+i*(9999-1000)/n_splits}"}} +``` + +*Failing to do so will cause all writes to initially target a single region server*. + +## 4. Run the Workload +Before you can actually run the workload, you need to "load" the data first. + +You should specify a HBase config directory(or any other directory containing your hbase-site.xml) and a table name and a column family(-cp is used to set java classpath and -p is used to set various properties). + +``` +bin/ycsb load hbase14 -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family +``` + +Then, you can run the workload: + +``` +bin/ycsb run hbase14 -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family +``` + +Please see the general instructions in the `doc` folder if you are not sure how it all works. You can apply additional properties (as seen in the next section) like this: + +``` +bin/ycsb run hbase14 -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family -p clientbuffering=true +``` ## Configuration Options -In addition to those options available for the `hbase098` binding, the following options are available for the `hbase14` binding: +Following options can be configurable using `-p`. +* `columnfamily`: The HBase column family to target. +* `debug` : If true, debugging logs are activated. The default is false. +* `hbase.usepagefilter` : If true, HBase + [PageFilter](https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/PageFilter.html)s + are used to limit the number of records consumed in a scan operation. The default is true. +* `principal`: If testing need to be done against a secure HBase cluster using Kerberos Keytab, + this property can be used to pass the principal in the keytab file. +* `keytab`: The Kerberos keytab file name and location can be passed through this property. +* `clientbuffering`: Whether or not to use client side buffering and batching of write operations. This can significantly improve performance and defaults to true. +* `writebuffersize`: The maximum amount, in bytes, of data to buffer on the client side before a flush is forced. The default is 12MB. Only used when `clientbuffering` is true. * `durability`: Whether or not writes should be appended to the WAL. Bypassing the WAL can improve throughput but data cannot be recovered in the event of a crash. The default is true. +Additional HBase settings should be provided in the `hbase-site.xml` file located in your `/HBASE-HOME-DIR/conf` directory. Typically this will be `/etc/hbase/conf`. + +## Bigtable + +Google's Bigtable service provides an implementation of the HBase API for migrating existing applications. Users can perform load tests against Bigtable using this binding. + +### 1. Setup a Bigtable Cluster + +Login to the Google Cloud Console and follow the [Creating Cluster](https://cloud.google.com/bigtable/docs/creating-cluster) steps. Make a note of your cluster name, zone and project ID. + +### 2. Launch the Bigtable Shell + +From the Cloud Console, launch a shell and follow the [Quickstart](https://cloud.google.com/bigtable/docs/quickstart) up to step 4 where you launch the HBase shell. + +### 3. Create a Table + +For best results, use the pre-splitting strategy recommended in [HBASE-4163](https://issues.apache.org/jira/browse/HBASE-4163): + +``` +hbase(main):001:0> n_splits = 200 # HBase recommends (10 * number of regionservers) +hbase(main):002:0> create 'usertable', 'cf', {SPLITS => (1..n_splits).map {|i| "user#{1000+i*(9999-1000)/n_splits}"}} +``` + +Make a note of the column family, in this example it's `cf``. + +### 4. Download the Bigtable Client Jar with required dependencies: + +``` +mvn -N dependency:copy -Dartifact=com.google.cloud.bigtable:bigtable-hbase-1.x-hadoop:1.0.0 -DoutputDirectory=target/bigtable-deps +mvn -N dependency:copy -Dartifact=io.dropwizard.metrics:metrics-core:3.1.2 -DoutputDirectory=target/bigtable-deps +``` + +Download the latest `bigtable-hbase-1.x-hadoop` jar from [Maven](http://search.maven.org/#search%7Cgav%7C1%7Cg%3A%22com.google.cloud.bigtable%22%20AND%20a%3A%22bigtable-hbase-1.x-hadoop%22) to your host. + +### 5. Download JSON Credentials + +Follow these instructions for [Generating a JSON key](https://cloud.google.com/bigtable/docs/installing-hbase-shell#service-account) and save it to your host. + +### 6. Create or Edit hbase-site.xml + +If you have an existing HBase configuration directory with an `hbase-site.xml` file, edit the file as per below. If not, create a directory called `conf` under the `hbase10` directory. Create a file in the conf directory named `hbase-site.xml`. Provide the following settings in the XML file, making sure to replace the bracketed examples with the proper values from your Cloud console. + +``` + + + hbase.client.connection.impl + com.google.cloud.bigtable.hbase1_x.BigtableConnection + + + google.bigtable.project.id + [YOUR-PROJECT-ID] + + + google.bigtable.instance.id + [YOUR-INSTANCE-ID] + + + google.bigtable.auth.service.account.enable + true + + + google.bigtable.auth.json.keyfile + [PATH-TO-YOUR-KEY-FILE] + + +``` + +If you have an existing HBase config directory, make sure to add it to the class path via `-cp :`. + +### 7. Execute a Workload + +Switch to the root of the YCSB repo and choose the workload you want to run and `load` it first. With the CLI you must provide the column family, cluster properties and the ALPN jar to load. + +``` +bin/ycsb load hbase14 -p columnfamily=cf -cp 'target/bigtable-deps/*' -P workloads/workloada + +``` + +The `load` step only executes inserts into the datastore. After loading data, run the same workload to mix reads with writes. + +``` +bin/ycsb run hbase14 -p columnfamily=cf -cp 'target/bigtable-deps/* -P workloads/workloada + +``` diff --git a/hbase14/pom.xml b/hbase14/pom.xml index 5058406e1e..eb1701e7ca 100644 --- a/hbase14/pom.xml +++ b/hbase14/pom.xml @@ -31,25 +31,8 @@ LICENSE file. true - - true - - site.ycsb - hbase10-binding - ${project.version} - - - - org.apache.hbase - hbase-client - - - site.ycsb core @@ -67,19 +50,11 @@ LICENSE file. 4.12 test - diff --git a/hbase14/src/main/java/site/ycsb/db/hbase14/HBaseClient14.java b/hbase14/src/main/java/site/ycsb/db/hbase14/HBaseClient14.java index e27a10031d..917040a1f4 100644 --- a/hbase14/src/main/java/site/ycsb/db/hbase14/HBaseClient14.java +++ b/hbase14/src/main/java/site/ycsb/db/hbase14/HBaseClient14.java @@ -15,14 +15,516 @@ package site.ycsb.db.hbase14; +import site.ycsb.ByteArrayByteIterator; +import site.ycsb.ByteIterator; +import site.ycsb.DBException; +import site.ycsb.Status; +import site.ycsb.measurements.Measurements; + +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.BufferedMutator; +import org.apache.hadoop.hbase.client.BufferedMutatorParams; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.PageFilter; +import org.apache.hadoop.hbase.util.Bytes; + +import java.io.IOException; +import java.util.ConcurrentModificationException; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.Vector; +import java.util.concurrent.atomic.AtomicInteger; + +import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY; +import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT; + /** - * HBase 1.4 client for YCSB framework. - * - * A modified version of HBaseClient (which targets HBase v1.4) utilizing the - * shaded client. - * - * It should run equivalent to following the hbase098 binding README. + * HBase 1 client for YCSB framework. * + * Intended for use with HBase's shaded client. */ -public class HBaseClient14 extends site.ycsb.db.HBaseClient10 { +public class HBaseClient14 extends site.ycsb.DB { + private static final AtomicInteger THREAD_COUNT = new AtomicInteger(0); + + private Configuration config = HBaseConfiguration.create(); + + private boolean debug = false; + + private String tableName = ""; + + /** + * A Cluster Connection instance that is shared by all running ycsb threads. + * Needs to be initialized late so we pick up command-line configs if any. + * To ensure one instance only in a multi-threaded context, guard access + * with a 'lock' object. + * @See #CONNECTION_LOCK. + */ + private static Connection connection = null; + + // Depending on the value of clientSideBuffering, either bufferedMutator + // (clientSideBuffering) or currentTable (!clientSideBuffering) will be used. + private Table currentTable = null; + private BufferedMutator bufferedMutator = null; + + private String columnFamily = ""; + private byte[] columnFamilyBytes; + + /** + * Durability to use for puts and deletes. + */ + private Durability durability = Durability.USE_DEFAULT; + + /** Whether or not a page filter should be used to limit scan length. */ + private boolean usePageFilter = true; + + /** + * If true, buffer mutations on the client. This is the default behavior for + * HBaseClient. For measuring insert/update/delete latencies, client side + * buffering should be disabled. + */ + private boolean clientSideBuffering = false; + private long writeBufferSize = 1024 * 1024 * 12; + + /** + * Initialize any state for this DB. Called once per DB instance; there is one + * DB instance per client thread. + */ + @Override + public void init() throws DBException { + if ("true" + .equals(getProperties().getProperty("clientbuffering", "false"))) { + this.clientSideBuffering = true; + } + if (getProperties().containsKey("writebuffersize")) { + writeBufferSize = + Long.parseLong(getProperties().getProperty("writebuffersize")); + } + + if (getProperties().getProperty("durability") != null) { + this.durability = + Durability.valueOf(getProperties().getProperty("durability")); + } + + if ("kerberos".equalsIgnoreCase(config.get("hbase.security.authentication"))) { + config.set("hadoop.security.authentication", "Kerberos"); + UserGroupInformation.setConfiguration(config); + } + + if ((getProperties().getProperty("principal")!=null) + && (getProperties().getProperty("keytab")!=null)) { + try { + UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"), + getProperties().getProperty("keytab")); + } catch (IOException e) { + System.err.println("Keytab file is not readable or not found"); + throw new DBException(e); + } + } + + String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); + try { + THREAD_COUNT.getAndIncrement(); + synchronized (THREAD_COUNT) { + if (connection == null) { + // Initialize if not set up already. + connection = ConnectionFactory.createConnection(config); + + // Terminate right now if table does not exist, since the client + // will not propagate this error upstream once the workload + // starts. + final TableName tName = TableName.valueOf(table); + try (Admin admin = connection.getAdmin()) { + if (!admin.tableExists(tName)) { + throw new DBException("Table " + tName + " does not exists"); + } + } + } + } + } catch (java.io.IOException e) { + throw new DBException(e); + } + + if ((getProperties().getProperty("debug") != null) + && (getProperties().getProperty("debug").compareTo("true") == 0)) { + debug = true; + } + + if ("false" + .equals(getProperties().getProperty("hbase.usepagefilter", "true"))) { + usePageFilter = false; + } + + columnFamily = getProperties().getProperty("columnfamily"); + if (columnFamily == null) { + System.err.println("Error, must specify a columnfamily for HBase table"); + throw new DBException("No columnfamily specified"); + } + columnFamilyBytes = Bytes.toBytes(columnFamily); + } + + /** + * Cleanup any state for this DB. Called once per DB instance; there is one DB + * instance per client thread. + */ + @Override + public void cleanup() throws DBException { + // Get the measurements instance as this is the only client that should + // count clean up time like an update if client-side buffering is + // enabled. + Measurements measurements = Measurements.getMeasurements(); + try { + long st = System.nanoTime(); + if (bufferedMutator != null) { + bufferedMutator.close(); + } + if (currentTable != null) { + currentTable.close(); + } + long en = System.nanoTime(); + final String type = clientSideBuffering ? "UPDATE" : "CLEANUP"; + measurements.measure(type, (int) ((en - st) / 1000)); + int threadCount = THREAD_COUNT.decrementAndGet(); + if (threadCount <= 0) { + // Means we are done so ok to shut down the Connection. + synchronized (THREAD_COUNT) { + if (connection != null) { + connection.close(); + connection = null; + } + } + } + } catch (IOException e) { + throw new DBException(e); + } + } + + public void getHTable(String table) throws IOException { + final TableName tName = TableName.valueOf(table); + this.currentTable = connection.getTable(tName); + if (clientSideBuffering) { + final BufferedMutatorParams p = new BufferedMutatorParams(tName); + p.writeBufferSize(writeBufferSize); + this.bufferedMutator = connection.getBufferedMutator(p); + } + } + + /** + * Read a record from the database. Each field/value pair from the result will + * be stored in a HashMap. + * + * @param table + * The name of the table + * @param key + * The record key of the record to read. + * @param fields + * The list of fields to read, or null for all of them + * @param result + * A HashMap of field/value pairs for the result + * @return Zero on success, a non-zero error code on error + */ + public Status read(String table, String key, Set fields, + Map result) { + // if this is a "new" table, init HTable object. Else, use existing one + if (!tableName.equals(table)) { + currentTable = null; + try { + getHTable(table); + tableName = table; + } catch (IOException e) { + System.err.println("Error accessing HBase table: " + e); + return Status.ERROR; + } + } + + Result r = null; + try { + if (debug) { + System.out + .println("Doing read from HBase columnfamily " + columnFamily); + System.out.println("Doing read for key: " + key); + } + Get g = new Get(Bytes.toBytes(key)); + if (fields == null) { + g.addFamily(columnFamilyBytes); + } else { + for (String field : fields) { + g.addColumn(columnFamilyBytes, Bytes.toBytes(field)); + } + } + r = currentTable.get(g); + } catch (IOException e) { + if (debug) { + System.err.println("Error doing get: " + e); + } + return Status.ERROR; + } catch (ConcurrentModificationException e) { + // do nothing for now...need to understand HBase concurrency model better + return Status.ERROR; + } + + if (r.isEmpty()) { + return Status.NOT_FOUND; + } + + while (r.advance()) { + final Cell c = r.current(); + result.put(Bytes.toString(CellUtil.cloneQualifier(c)), + new ByteArrayByteIterator(CellUtil.cloneValue(c))); + if (debug) { + System.out.println( + "Result for field: " + Bytes.toString(CellUtil.cloneQualifier(c)) + + " is: " + Bytes.toString(CellUtil.cloneValue(c))); + } + } + return Status.OK; + } + + /** + * Perform a range scan for a set of records in the database. Each field/value + * pair from the result will be stored in a HashMap. + * + * @param table + * The name of the table + * @param startkey + * The record key of the first record to read. + * @param recordcount + * The number of records to read + * @param fields + * The list of fields to read, or null for all of them + * @param result + * A Vector of HashMaps, where each HashMap is a set field/value + * pairs for one record + * @return Zero on success, a non-zero error code on error + */ + @Override + public Status scan(String table, String startkey, int recordcount, + Set fields, Vector> result) { + // if this is a "new" table, init HTable object. Else, use existing one + if (!tableName.equals(table)) { + currentTable = null; + try { + getHTable(table); + tableName = table; + } catch (IOException e) { + System.err.println("Error accessing HBase table: " + e); + return Status.ERROR; + } + } + + Scan s = new Scan(Bytes.toBytes(startkey)); + // HBase has no record limit. Here, assume recordcount is small enough to + // bring back in one call. + // We get back recordcount records + s.setCaching(recordcount); + if (this.usePageFilter) { + s.setFilter(new PageFilter(recordcount)); + } + + // add specified fields or else all fields + if (fields == null) { + s.addFamily(columnFamilyBytes); + } else { + for (String field : fields) { + s.addColumn(columnFamilyBytes, Bytes.toBytes(field)); + } + } + + // get results + ResultScanner scanner = null; + try { + scanner = currentTable.getScanner(s); + int numResults = 0; + for (Result rr = scanner.next(); rr != null; rr = scanner.next()) { + // get row key + String key = Bytes.toString(rr.getRow()); + + if (debug) { + System.out.println("Got scan result for key: " + key); + } + + HashMap rowResult = + new HashMap(); + + while (rr.advance()) { + final Cell cell = rr.current(); + rowResult.put(Bytes.toString(CellUtil.cloneQualifier(cell)), + new ByteArrayByteIterator(CellUtil.cloneValue(cell))); + } + + // add rowResult to result vector + result.add(rowResult); + numResults++; + + // PageFilter does not guarantee that the number of results is <= + // pageSize, so this + // break is required. + if (numResults >= recordcount) {// if hit recordcount, bail out + break; + } + } // done with row + } catch (IOException e) { + if (debug) { + System.out.println("Error in getting/parsing scan result: " + e); + } + return Status.ERROR; + } finally { + if (scanner != null) { + scanner.close(); + } + } + + return Status.OK; + } + + /** + * Update a record in the database. Any field/value pairs in the specified + * values HashMap will be written into the record with the specified record + * key, overwriting any existing values with the same field name. + * + * @param table + * The name of the table + * @param key + * The record key of the record to write + * @param values + * A HashMap of field/value pairs to update in the record + * @return Zero on success, a non-zero error code on error + */ + @Override + public Status update(String table, String key, + Map values) { + // if this is a "new" table, init HTable object. Else, use existing one + if (!tableName.equals(table)) { + currentTable = null; + try { + getHTable(table); + tableName = table; + } catch (IOException e) { + System.err.println("Error accessing HBase table: " + e); + return Status.ERROR; + } + } + + if (debug) { + System.out.println("Setting up put for key: " + key); + } + Put p = new Put(Bytes.toBytes(key)); + p.setDurability(durability); + for (Map.Entry entry : values.entrySet()) { + byte[] value = entry.getValue().toArray(); + if (debug) { + System.out.println("Adding field/value " + entry.getKey() + "/" + + Bytes.toStringBinary(value) + " to put request"); + } + p.addColumn(columnFamilyBytes, Bytes.toBytes(entry.getKey()), value); + } + + try { + if (clientSideBuffering) { + // removed Preconditions.checkNotNull, which throws NPE, in favor of NPE on next line + bufferedMutator.mutate(p); + } else { + currentTable.put(p); + } + } catch (IOException e) { + if (debug) { + System.err.println("Error doing put: " + e); + } + return Status.ERROR; + } catch (ConcurrentModificationException e) { + // do nothing for now...hope this is rare + return Status.ERROR; + } + + return Status.OK; + } + + /** + * Insert a record in the database. Any field/value pairs in the specified + * values HashMap will be written into the record with the specified record + * key. + * + * @param table + * The name of the table + * @param key + * The record key of the record to insert. + * @param values + * A HashMap of field/value pairs to insert in the record + * @return Zero on success, a non-zero error code on error + */ + @Override + public Status insert(String table, String key, + Map values) { + return update(table, key, values); + } + + /** + * Delete a record from the database. + * + * @param table + * The name of the table + * @param key + * The record key of the record to delete. + * @return Zero on success, a non-zero error code on error + */ + @Override + public Status delete(String table, String key) { + // if this is a "new" table, init HTable object. Else, use existing one + if (!tableName.equals(table)) { + currentTable = null; + try { + getHTable(table); + tableName = table; + } catch (IOException e) { + System.err.println("Error accessing HBase table: " + e); + return Status.ERROR; + } + } + + if (debug) { + System.out.println("Doing delete for key: " + key); + } + + final Delete d = new Delete(Bytes.toBytes(key)); + d.setDurability(durability); + try { + if (clientSideBuffering) { + // removed Preconditions.checkNotNull, which throws NPE, in favor of NPE on next line + bufferedMutator.mutate(d); + } else { + currentTable.delete(d); + } + } catch (IOException e) { + if (debug) { + System.err.println("Error doing delete: " + e); + } + return Status.ERROR; + } + + return Status.OK; + } + + // Only non-private for testing. + void setConfiguration(final Configuration newConfig) { + this.config = newConfig; + } } + +/* + * For customized vim control set autoindent set si set shiftwidth=4 + */ diff --git a/hbase20/README.md b/hbase20/README.md deleted file mode 100644 index 27e183b359..0000000000 --- a/hbase20/README.md +++ /dev/null @@ -1,27 +0,0 @@ - - -# HBase (2.0+) Driver for YCSB -This driver is a binding for the YCSB facilities to operate against a HBase 2.0+ Server cluster, using a shaded client that tries to avoid leaking third party libraries. - -See `hbase098/README.md` for a quickstart to setup HBase for load testing and common configuration details. - -## Configuration Options -In addition to those options available for the `hbase098` binding, the following options are available for the `hbase20` binding: - -* `durability`: Whether or not writes should be appended to the WAL. Bypassing the WAL can improve throughput but data cannot be recovered in the event of a crash. The default is true. - diff --git a/hbase20/pom.xml b/hbase20/pom.xml deleted file mode 100644 index a0dd77f45a..0000000000 --- a/hbase20/pom.xml +++ /dev/null @@ -1,85 +0,0 @@ - - - - - 4.0.0 - - site.ycsb - binding-parent - 0.18.0-SNAPSHOT - ../binding-parent/ - - - hbase20-binding - HBase 2.0 DB Binding - - - - true - - true - - - - site.ycsb - hbase10-binding - ${project.version} - - - - org.apache.hbase - hbase-client - - - - - site.ycsb - core - ${project.version} - provided - - - org.apache.hbase - hbase-shaded-client - ${hbase20.version} - - - junit - junit - 4.12 - test - - - - diff --git a/hbase20/src/main/java/site/ycsb/db/hbase20/HBaseClient20.java b/hbase20/src/main/java/site/ycsb/db/hbase20/HBaseClient20.java deleted file mode 100644 index 77b6eb8598..0000000000 --- a/hbase20/src/main/java/site/ycsb/db/hbase20/HBaseClient20.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed under the Apache License, Version 2.0 (the "License"); you - * may not use this file except in compliance with the License. You - * may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - * implied. See the License for the specific language governing - * permissions and limitations under the License. See accompanying - * LICENSE file. - */ - -package site.ycsb.db.hbase20; - -/** - * HBase 2.0 client for YCSB framework. - * - * A modified version of HBaseClient (which targets HBase v2.0) utilizing the - * shaded client. - * - * It should run equivalent to following the hbase098 binding README. - * - */ -public class HBaseClient20 extends site.ycsb.db.HBaseClient10 { -} diff --git a/hbase20/src/main/java/site/ycsb/db/hbase20/package-info.java b/hbase20/src/main/java/site/ycsb/db/hbase20/package-info.java deleted file mode 100644 index 027bb432c3..0000000000 --- a/hbase20/src/main/java/site/ycsb/db/hbase20/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2014, Yahoo!, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you - * may not use this file except in compliance with the License. You - * may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - * implied. See the License for the specific language governing - * permissions and limitations under the License. See accompanying - * LICENSE file. - */ - -/** - * The YCSB binding for HBase - * using the HBase 2.0+ shaded API. - */ -package site.ycsb.db.hbase20; - diff --git a/hbase20/src/test/resources/hbase-site.xml b/hbase20/src/test/resources/hbase-site.xml deleted file mode 100644 index a8b29e451f..0000000000 --- a/hbase20/src/test/resources/hbase-site.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - hbase.master.info.port - -1 - The port for the hbase master web UI - Set to -1 if you do not want the info server to run. - - - - hbase.regionserver.info.port - -1 - The port for the hbase regionserver web UI - Set to -1 if you do not want the info server to run. - - - diff --git a/hbase20/src/test/resources/log4j.properties b/hbase20/src/test/resources/log4j.properties deleted file mode 100644 index a9df32e044..0000000000 --- a/hbase20/src/test/resources/log4j.properties +++ /dev/null @@ -1,28 +0,0 @@ -# -# Copyright (c) 2015 YCSB contributors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. See accompanying -# LICENSE file. -# - -# Root logger option -log4j.rootLogger=WARN, stderr - -log4j.appender.stderr=org.apache.log4j.ConsoleAppender -log4j.appender.stderr.target=System.err -log4j.appender.stderr.layout=org.apache.log4j.PatternLayout -log4j.appender.stderr.layout.conversionPattern=%d{yyyy/MM/dd HH:mm:ss} %-5p %c %x - %m%n - -# Suppress messages from ZKTableStateManager: Creates a large number of table -# state change messages. -log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKTableStateManager=ERROR diff --git a/hbase098/README.md b/hbase22/README.md similarity index 74% rename from hbase098/README.md rename to hbase22/README.md index cfc8cdbf2f..b531fc440c 100644 --- a/hbase098/README.md +++ b/hbase22/README.md @@ -1,5 +1,5 @@ -# HBase (0.98.x) Driver for YCSB -This driver is a binding for the YCSB facilities to operate against a HBase 0.98.x Server cluster. -To run against an HBase >= 1.0 cluster, use the `hbase10` binding. +# HBase (2.1+) Driver for YCSB +This driver is a binding for the YCSB facilities to operate against a HBase 2 cluster, using a shaded client that tries to avoid leaking third party libraries. -## Quickstart - -### 1. Start a HBase Server +# 1. Start a HBase Server You need to start a single node or a cluster to point the client at. Please see [Apache HBase Reference Guide](http://hbase.apache.org/book.html) for more details and instructions. -### 2. Set up YCSB -You need to clone the repository and compile everything. +# 2. Set up YCSB -``` -git clone git://github.com/brianfrankcooper/YCSB.git -cd YCSB -mvn clean package -``` +Download the [latest YCSB](https://github.com/brianfrankcooper/YCSB/releases/latest) file. Follow the instructions. -### 3. Create a HBase table for testing +# 3. Create a HBase table for testing For best results, use the pre-splitting strategy recommended in [HBASE-4163](https://issues.apache.org/jira/browse/HBASE-4163): @@ -44,28 +36,28 @@ hbase(main):002:0> create 'usertable', 'family', {SPLITS => (1..n_splits).map {| *Failing to do so will cause all writes to initially target a single region server*. -### 4. Run the Workload +# 4. Run the Workload Before you can actually run the workload, you need to "load" the data first. You should specify a HBase config directory(or any other directory containing your hbase-site.xml) and a table name and a column family(-cp is used to set java classpath and -p is used to set various properties). ``` -bin/ycsb load hbase -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family +bin/ycsb load hbase21 -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family ``` Then, you can run the workload: ``` -bin/ycsb run hbase -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family +bin/ycsb run hbase21 -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family ``` Please see the general instructions in the `doc` folder if you are not sure how it all works. You can apply additional properties (as seen in the next section) like this: ``` -bin/ycsb run hbase -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family -p clientbuffering=true +bin/ycsb run hbase21 -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family -p clientbuffering=true ``` -## Configuration Options +# Configuration Options Following options can be configurable using `-p`. * `columnfamily`: The HBase column family to target. @@ -73,10 +65,12 @@ Following options can be configurable using `-p`. * `hbase.usepagefilter` : If true, HBase [PageFilter](https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/PageFilter.html)s are used to limit the number of records consumed in a scan operation. The default is true. -* `principal`: If testing need to be done against a secure HBase cluster using Kerberos Keytab, +* `principal`: If testing need to be done against a secure HBase cluster using Kerberos Keytab, this property can be used to pass the principal in the keytab file. * `keytab`: The Kerberos keytab file name and location can be passed through this property. * `clientbuffering`: Whether or not to use client side buffering and batching of write operations. This can significantly improve performance and defaults to true. * `writebuffersize`: The maximum amount, in bytes, of data to buffer on the client side before a flush is forced. The default is 12MB. Only used when `clientbuffering` is true. +* `durability`: Whether or not writes should be appended to the WAL. Bypassing the WAL can improve throughput but data cannot be recovered in the event of a crash. The default is true. Additional HBase settings should be provided in the `hbase-site.xml` file located in your `/HBASE-HOME-DIR/conf` directory. Typically this will be `/etc/hbase/conf`. + diff --git a/hbase10/pom.xml b/hbase22/pom.xml similarity index 72% rename from hbase10/pom.xml rename to hbase22/pom.xml index 753dd67914..ea2e731474 100644 --- a/hbase10/pom.xml +++ b/hbase22/pom.xml @@ -25,32 +25,25 @@ LICENSE file. ../binding-parent/ - hbase10-binding - HBase 1.0 DB Binding + hbase22-binding + HBase 2.2 DB Binding + true - true - true - - org.apache.hbase - hbase-client - ${hbase10.version} - - - jdk.tools - jdk.tools - - - site.ycsb core ${project.version} provided + + org.apache.hbase + hbase-shaded-client + ${hbase22.version} + junit junit @@ -59,15 +52,15 @@ LICENSE file. org.apache.hbase - hbase-testing-util - ${hbase10.version} + hbase-shaded-testing-util + ${hbase22.version} test - - - jdk.tools - jdk.tools - - + + + + org.slf4j + slf4j-log4j12 + 1.7.25 diff --git a/hbase10/src/main/java/site/ycsb/db/HBaseClient10.java b/hbase22/src/main/java/site/ycsb/db/hbase22/HBaseClient22.java similarity index 98% rename from hbase10/src/main/java/site/ycsb/db/HBaseClient10.java rename to hbase22/src/main/java/site/ycsb/db/hbase22/HBaseClient22.java index 9a93aa61dd..976a44aa41 100644 --- a/hbase10/src/main/java/site/ycsb/db/HBaseClient10.java +++ b/hbase22/src/main/java/site/ycsb/db/hbase22/HBaseClient22.java @@ -13,7 +13,7 @@ * LICENSE file. */ -package site.ycsb.db; +package site.ycsb.db.hbase22; import site.ycsb.ByteArrayByteIterator; import site.ycsb.ByteIterator; @@ -55,15 +55,11 @@ import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT; /** - * HBase 1.0 client for YCSB framework. + * HBase 2 client for YCSB framework. * - * A modified version of HBaseClient (which targets HBase v0.9) utilizing the - * HBase 1.0.0 API. - * - * This client also adds toggleable client-side buffering and configurable write - * durability. + * Intended for use with HBase's shaded client. */ -public class HBaseClient10 extends site.ycsb.DB { +public class HBaseClient22 extends site.ycsb.DB { private static final AtomicInteger THREAD_COUNT = new AtomicInteger(0); private Configuration config = HBaseConfiguration.create(); diff --git a/hbase10/src/main/java/site/ycsb/db/package-info.java b/hbase22/src/main/java/site/ycsb/db/hbase22/package-info.java similarity index 92% rename from hbase10/src/main/java/site/ycsb/db/package-info.java rename to hbase22/src/main/java/site/ycsb/db/hbase22/package-info.java index a6a3768fbd..e4d1dca571 100644 --- a/hbase10/src/main/java/site/ycsb/db/package-info.java +++ b/hbase22/src/main/java/site/ycsb/db/hbase22/package-info.java @@ -17,7 +17,7 @@ /** * The YCSB binding for HBase - * using the HBase 1.0.0 API. + * using the HBase 2 shaded API. */ -package site.ycsb.db; +package site.ycsb.db.hbase22; diff --git a/hbase20/src/test/java/site/ycsb/db/hbase20/HBaseClient20Test.java b/hbase22/src/test/java/site/ycsb/db/hbase22/HBaseClient22Test.java similarity index 96% rename from hbase20/src/test/java/site/ycsb/db/hbase20/HBaseClient20Test.java rename to hbase22/src/test/java/site/ycsb/db/hbase22/HBaseClient22Test.java index c1cdd569c2..d1ca69b3e7 100644 --- a/hbase20/src/test/java/site/ycsb/db/hbase20/HBaseClient20Test.java +++ b/hbase22/src/test/java/site/ycsb/db/hbase22/HBaseClient22Test.java @@ -13,7 +13,7 @@ * LICENSE file. */ -package site.ycsb.db.hbase20; +package site.ycsb.db.hbase22; import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY; import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT; @@ -52,14 +52,14 @@ import java.util.Vector; /** - * Integration tests for the YCSB HBase client 2.0, using an HBase minicluster. + * Integration tests for the YCSB HBase 2 client using an HBase minicluster. */ -public class HBaseClient20Test { +public class HBaseClient22Test { private final static String COLUMN_FAMILY = "cf"; private static HBaseTestingUtility testingUtil; - private HBaseClient20 client; + private HBaseClient22 client; private Table table = null; private String tableName; @@ -99,7 +99,7 @@ public static void tearDownClass() throws Exception { */ @Before public void setUp() throws Exception { - client = new HBaseClient20(); + client = new HBaseClient22(); client.setConfiguration(new Configuration(testingUtil.getConfiguration())); Properties p = new Properties(); @@ -119,7 +119,7 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { table.close(); - testingUtil.deleteTable(tableName); + testingUtil.deleteTable(TableName.valueOf(tableName)); } @Test diff --git a/hbase10/src/test/resources/hbase-site.xml b/hbase22/src/test/resources/hbase-site.xml similarity index 100% rename from hbase10/src/test/resources/hbase-site.xml rename to hbase22/src/test/resources/hbase-site.xml diff --git a/hbase10/src/test/resources/log4j.properties b/hbase22/src/test/resources/log4j.properties similarity index 100% rename from hbase10/src/test/resources/log4j.properties rename to hbase22/src/test/resources/log4j.properties diff --git a/pom.xml b/pom.xml index 6d1ba1af99..c86448e5e0 100644 --- a/pom.xml +++ b/pom.xml @@ -127,11 +127,8 @@ LICENSE file. 1.2.0 1.4.0 4.0.0 - 0.98.14-hadoop2 - 1.0.2 - 1.2.5 - 1.4.2 - 2.0.0 + 1.4.12 + 2.2.3 0.9.5.6 2.7.6 7.2.2.Final @@ -181,11 +178,8 @@ LICENSE file. googlebigtable googledatastore griddb - hbase098 - hbase10 - hbase12 hbase14 - hbase20 + hbase22 hypertable ignite infinispan