Stopwatch methods are not idempotent; it is an error to start or stop a
- * stopwatch that is already in the desired state.
- *
- *
When testing code that uses this class, use the {@linkplain
- * #Stopwatch(Ticker) alternate constructor} to supply a fake or mock ticker.
- * This allows you to
- * simulate any valid behavior of the stopwatch.
- *
- *
Note that the overhead of measurement can be more than a microsecond, so
- * it is generally not useful to specify {@link TimeUnit#NANOSECONDS}
- * precision here.
- *
- * @since 14.0 (since 10.0 as {@code elapsedTime()})
- */
- public long elapsed(TimeUnit desiredUnit) {
- return desiredUnit.convert(elapsedNanos(), NANOSECONDS);
- }
-
- /**
- * Returns the current elapsed time shown on this stopwatch, expressed
- * in the desired time unit, with any fraction rounded down.
- *
- *
Note that the overhead of measurement can be more than a microsecond, so
- * it is generally not useful to specify {@link TimeUnit#NANOSECONDS}
- * precision here.
- *
- * @deprecated Use {@link Stopwatch#elapsed(TimeUnit)} instead. This method is
- * scheduled to be removed in Guava release 16.0.
- */
- @Deprecated
- public long elapsedTime(TimeUnit desiredUnit) {
- return elapsed(desiredUnit);
- }
-
- /**
- * Returns the current elapsed time shown on this stopwatch, expressed
- * in milliseconds, with any fraction rounded down. This is identical to
- * {@code elapsed(TimeUnit.MILLISECONDS)}.
- *
- * @deprecated Use {@code stopwatch.elapsed(MILLISECONDS)} instead. This
- * method is scheduled to be removed in Guava release 16.0.
- */
- @Deprecated
- public long elapsedMillis() {
- return elapsed(MILLISECONDS);
- }
-
- /**
- * Returns a string representation of the current elapsed time.
- */
- @GwtIncompatible("String.format()")
- @Override public String toString() {
- long nanos = elapsedNanos();
-
- TimeUnit unit = chooseUnit(nanos);
- double value = (double) nanos / NANOSECONDS.convert(1, unit);
-
- // Too bad this functionality is not exposed as a regular method call
- return String.format("%.4g %s", value, abbreviate(unit));
- }
-
- private static TimeUnit chooseUnit(long nanos) {
- if (SECONDS.convert(nanos, NANOSECONDS) > 0) {
- return SECONDS;
- }
- if (MILLISECONDS.convert(nanos, NANOSECONDS) > 0) {
- return MILLISECONDS;
- }
- if (MICROSECONDS.convert(nanos, NANOSECONDS) > 0) {
- return MICROSECONDS;
- }
- return NANOSECONDS;
- }
-
- private static String abbreviate(TimeUnit unit) {
- switch (unit) {
- case NANOSECONDS:
- return "ns";
- case MICROSECONDS:
- return "\u03bcs"; // μs
- case MILLISECONDS:
- return "ms";
- case SECONDS:
- return "s";
- default:
- throw new AssertionError();
- }
- }
-}
\ No newline at end of file
diff --git a/asynchbase/src/test/java/com/google/common/io/Closeables.java b/asynchbase/src/test/java/com/google/common/io/Closeables.java
deleted file mode 100644
index 4a92c9c098..0000000000
--- a/asynchbase/src/test/java/com/google/common/io/Closeables.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2007 The Guava Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.google.common.io;
-
-import com.google.common.annotations.Beta;
-import com.google.common.annotations.VisibleForTesting;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-import javax.annotation.Nullable;
-
-/**
- * Utility methods for working with {@link Closeable} objects.
- *
- * @author Michael Lancaster
- * @since 1.0
- */
-@Beta
-public final class Closeables {
- @VisibleForTesting static final Logger logger
- = Logger.getLogger(Closeables.class.getName());
-
- private Closeables() {}
-
- /**
- * Closes a {@link Closeable}, with control over whether an
- * {@code IOException} may be thrown. This is primarily useful in a
- * finally block, where a thrown exception needs to be logged but not
- * propagated (otherwise the original exception will be lost).
- *
- *
If {@code swallowIOException} is true then we never throw
- * {@code IOException} but merely log it.
- *
- *
diff --git a/googlebigtable/README.md b/googlebigtable/README.md
index 81b6cf484a..d33a379d62 100644
--- a/googlebigtable/README.md
+++ b/googlebigtable/README.md
@@ -17,7 +17,7 @@ LICENSE file.
# Google Bigtable Driver for YCSB
-This driver provides a YCSB workload binding for Google's hosted Bigtable, the inspiration for a number of key-value stores like HBase and Cassandra. The Bigtable Java client provides both Protobuf based GRPC and HBase client APIs. This binding implements the Protobuf API for testing the native client. To test Bigtable using the HBase API, see the `hbase10` binding.
+This driver provides a YCSB workload binding for Google's hosted Bigtable, the inspiration for a number of key-value stores like HBase and Cassandra. The Bigtable Java client provides both Protobuf based GRPC and HBase client APIs. This binding implements the Protobuf API for testing the native client. To test Bigtable using the HBase API, see the `hbase14` binding.
## Quickstart
diff --git a/hbase098/pom.xml b/hbase098/pom.xml
deleted file mode 100644
index b35c0beb5a..0000000000
--- a/hbase098/pom.xml
+++ /dev/null
@@ -1,50 +0,0 @@
-
-
-
-
- 4.0.0
-
- site.ycsb
- binding-parent
- 0.18.0-SNAPSHOT
- ../binding-parent/
-
-
- hbase098-binding
- HBase 0.98.x DB Binding
-
-
-
- org.apache.hbase
- hbase-client
- ${hbase098.version}
-
-
- jdk.tools
- jdk.tools
-
-
-
-
- site.ycsb
- core
- ${project.version}
- provided
-
-
-
diff --git a/hbase098/src/main/java/site/ycsb/db/HBaseClient.java b/hbase098/src/main/java/site/ycsb/db/HBaseClient.java
deleted file mode 100644
index 0ce20b9065..0000000000
--- a/hbase098/src/main/java/site/ycsb/db/HBaseClient.java
+++ /dev/null
@@ -1,483 +0,0 @@
-/**
- * Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you
- * may not use this file except in compliance with the License. You
- * may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License. See accompanying
- * LICENSE file.
- */
-
-package site.ycsb.db;
-
-import site.ycsb.ByteArrayByteIterator;
-import site.ycsb.ByteIterator;
-import site.ycsb.DBException;
-import site.ycsb.Status;
-import site.ycsb.measurements.Measurements;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.*;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import java.io.IOException;
-import java.util.*;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY;
-import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT;
-
-/**
- * HBase client for YCSB framework.
- */
-public class HBaseClient extends site.ycsb.DB {
- private static final Configuration CONFIG = HBaseConfiguration.create();
- private static final AtomicInteger THREAD_COUNT = new AtomicInteger(0);
-
- private boolean debug = false;
-
- private String tableName = "";
- private static HConnection hConn = null;
- private HTableInterface hTable = null;
- private String columnFamily = "";
- private byte[] columnFamilyBytes;
- private boolean clientSideBuffering = false;
- private long writeBufferSize = 1024 * 1024 * 12;
- /**
- * Whether or not a page filter should be used to limit scan length.
- */
- private boolean usePageFilter = true;
-
- private static final Object TABLE_LOCK = new Object();
-
- /**
- * Initialize any state for this DB.
- * Called once per DB instance; there is one DB instance per client thread.
- */
- public void init() throws DBException {
- if ((getProperties().getProperty("debug") != null) &&
- (getProperties().getProperty("debug").compareTo("true") == 0)) {
- debug = true;
- }
-
- if (getProperties().containsKey("clientbuffering")) {
- clientSideBuffering = Boolean.parseBoolean(getProperties().getProperty("clientbuffering"));
- }
- if (getProperties().containsKey("writebuffersize")) {
- writeBufferSize = Long.parseLong(getProperties().getProperty("writebuffersize"));
- }
- if ("false".equals(getProperties().getProperty("hbase.usepagefilter", "true"))) {
- usePageFilter = false;
- }
- if ("kerberos".equalsIgnoreCase(CONFIG.get("hbase.security.authentication"))) {
- CONFIG.set("hadoop.security.authentication", "Kerberos");
- UserGroupInformation.setConfiguration(CONFIG);
- }
- if ((getProperties().getProperty("principal") != null) && (getProperties().getProperty("keytab") != null)) {
- try {
- UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"),
- getProperties().getProperty("keytab"));
- } catch (IOException e) {
- System.err.println("Keytab file is not readable or not found");
- throw new DBException(e);
- }
- }
- try {
- THREAD_COUNT.getAndIncrement();
- synchronized (THREAD_COUNT) {
- if (hConn == null) {
- hConn = HConnectionManager.createConnection(CONFIG);
- }
- }
- } catch (IOException e) {
- System.err.println("Connection to HBase was not successful");
- throw new DBException(e);
- }
- columnFamily = getProperties().getProperty("columnfamily");
- if (columnFamily == null) {
- System.err.println("Error, must specify a columnfamily for HBase tableName");
- throw new DBException("No columnfamily specified");
- }
- columnFamilyBytes = Bytes.toBytes(columnFamily);
-
- // Terminate right now if tableName does not exist, since the client
- // will not propagate this error upstream once the workload
- // starts.
- String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
- try {
- HTableInterface ht = hConn.getTable(table);
- ht.getTableDescriptor();
- } catch (IOException e) {
- throw new DBException(e);
- }
- }
-
- /**
- * Cleanup any state for this DB.
- * Called once per DB instance; there is one DB instance per client thread.
- */
- public void cleanup() throws DBException {
- // Get the measurements instance as this is the only client that should
- // count clean up time like an update since autoflush is off.
- Measurements measurements = Measurements.getMeasurements();
- try {
- long st = System.nanoTime();
- if (hTable != null) {
- hTable.flushCommits();
- }
- synchronized (THREAD_COUNT) {
- int threadCount = THREAD_COUNT.decrementAndGet();
- if (threadCount <= 0 && hConn != null) {
- hConn.close();
- }
- }
- long en = System.nanoTime();
- measurements.measure("UPDATE", (int) ((en - st) / 1000));
- } catch (IOException e) {
- throw new DBException(e);
- }
- }
-
- private void getHTable(String table) throws IOException {
- synchronized (TABLE_LOCK) {
- hTable = hConn.getTable(table);
- //2 suggestions from http://ryantwopointoh.blogspot.com/2009/01/performance-of-hbase-importing.html
- hTable.setAutoFlush(!clientSideBuffering, true);
- hTable.setWriteBufferSize(writeBufferSize);
- //return hTable;
- }
-
- }
-
- /**
- * Read a record from the database. Each field/value pair from the result will be stored in a HashMap.
- *
- * @param table The name of the tableName
- * @param key The record key of the record to read.
- * @param fields The list of fields to read, or null for all of them
- * @param result A HashMap of field/value pairs for the result
- * @return Zero on success, a non-zero error code on error
- */
- public Status read(String table, String key, Set fields, Map result) {
- //if this is a "new" tableName, init HTable object. Else, use existing one
- if (!this.tableName.equals(table)) {
- hTable = null;
- try {
- getHTable(table);
- this.tableName = table;
- } catch (IOException e) {
- System.err.println("Error accessing HBase tableName: " + e);
- return Status.ERROR;
- }
- }
-
- Result r;
- try {
- if (debug) {
- System.out.println("Doing read from HBase columnfamily " + columnFamily);
- System.out.println("Doing read for key: " + key);
- }
- Get g = new Get(Bytes.toBytes(key));
- if (fields == null) {
- g.addFamily(columnFamilyBytes);
- } else {
- for (String field : fields) {
- g.addColumn(columnFamilyBytes, Bytes.toBytes(field));
- }
- }
- r = hTable.get(g);
- } catch (IOException e) {
- System.err.println("Error doing get: " + e);
- return Status.ERROR;
- } catch (ConcurrentModificationException e) {
- //do nothing for now...need to understand HBase concurrency model better
- return Status.ERROR;
- }
-
- for (KeyValue kv : r.raw()) {
- result.put(
- Bytes.toString(kv.getQualifier()),
- new ByteArrayByteIterator(kv.getValue()));
- if (debug) {
- System.out.println("Result for field: " + Bytes.toString(kv.getQualifier()) +
- " is: " + Bytes.toString(kv.getValue()));
- }
-
- }
- return Status.OK;
- }
-
- /**
- * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored
- * in a HashMap.
- *
- * @param table The name of the tableName
- * @param startkey The record key of the first record to read.
- * @param recordcount The number of records to read
- * @param fields The list of fields to read, or null for all of them
- * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
- * @return Zero on success, a non-zero error code on error
- */
- public Status scan(String table, String startkey, int recordcount, Set fields,
- Vector> result) {
- //if this is a "new" tableName, init HTable object. Else, use existing one
- if (!this.tableName.equals(table)) {
- hTable = null;
- try {
- getHTable(table);
- this.tableName = table;
- } catch (IOException e) {
- System.err.println("Error accessing HBase tableName: " + e);
- return Status.ERROR;
- }
- }
-
- Scan s = new Scan(Bytes.toBytes(startkey));
- //HBase has no record limit. Here, assume recordcount is small enough to bring back in one call.
- //We get back recordcount records
- s.setCaching(recordcount);
- if (this.usePageFilter) {
- s.setFilter(new PageFilter(recordcount));
- }
-
- //add specified fields or else all fields
- if (fields == null) {
- s.addFamily(columnFamilyBytes);
- } else {
- for (String field : fields) {
- s.addColumn(columnFamilyBytes, Bytes.toBytes(field));
- }
- }
-
- //get results
- try (ResultScanner scanner = hTable.getScanner(s)) {
- int numResults = 0;
- for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
- //get row key
- String key = Bytes.toString(rr.getRow());
- if (debug) {
- System.out.println("Got scan result for key: " + key);
- }
-
- HashMap rowResult = new HashMap<>();
-
- for (KeyValue kv : rr.raw()) {
- rowResult.put(
- Bytes.toString(kv.getQualifier()),
- new ByteArrayByteIterator(kv.getValue()));
- }
- //add rowResult to result vector
- result.add(rowResult);
- numResults++;
-
- // PageFilter does not guarantee that the number of results is <= pageSize, so this
- // break is required.
- //if hit recordcount, bail out
- if (numResults >= recordcount) {
- break;
- }
- } //done with row
-
- } catch (IOException e) {
- if (debug) {
- System.out.println("Error in getting/parsing scan result: " + e);
- }
- return Status.ERROR;
- }
-
- return Status.OK;
- }
-
- /**
- * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the
- * record with the specified record key, overwriting any existing values with the same field name.
- *
- * @param table The name of the tableName
- * @param key The record key of the record to write
- * @param values A HashMap of field/value pairs to update in the record
- * @return Zero on success, a non-zero error code on error
- */
- public Status update(String table, String key, Map values) {
- //if this is a "new" tableName, init HTable object. Else, use existing one
- if (!this.tableName.equals(table)) {
- hTable = null;
- try {
- getHTable(table);
- this.tableName = table;
- } catch (IOException e) {
- System.err.println("Error accessing HBase tableName: " + e);
- return Status.ERROR;
- }
- }
-
-
- if (debug) {
- System.out.println("Setting up put for key: " + key);
- }
- Put p = new Put(Bytes.toBytes(key));
- for (Map.Entry entry : values.entrySet()) {
- byte[] value = entry.getValue().toArray();
- if (debug) {
- System.out.println("Adding field/value " + entry.getKey() + "/" +
- Bytes.toStringBinary(value) + " to put request");
- }
- p.add(columnFamilyBytes, Bytes.toBytes(entry.getKey()), value);
- }
-
- try {
- hTable.put(p);
- } catch (IOException e) {
- if (debug) {
- System.err.println("Error doing put: " + e);
- }
- return Status.ERROR;
- } catch (ConcurrentModificationException e) {
- //do nothing for now...hope this is rare
- return Status.ERROR;
- }
-
- return Status.OK;
- }
-
- /**
- * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the
- * record with the specified record key.
- *
- * @param table The name of the tableName
- * @param key The record key of the record to insert.
- * @param values A HashMap of field/value pairs to insert in the record
- * @return Zero on success, a non-zero error code on error
- */
- public Status insert(String table, String key, Map values) {
- return update(table, key, values);
- }
-
- /**
- * Delete a record from the database.
- *
- * @param table The name of the tableName
- * @param key The record key of the record to delete.
- * @return Zero on success, a non-zero error code on error
- */
- public Status delete(String table, String key) {
- //if this is a "new" tableName, init HTable object. Else, use existing one
- if (!this.tableName.equals(table)) {
- hTable = null;
- try {
- getHTable(table);
- this.tableName = table;
- } catch (IOException e) {
- System.err.println("Error accessing HBase tableName: " + e);
- return Status.ERROR;
- }
- }
-
- if (debug) {
- System.out.println("Doing delete for key: " + key);
- }
-
- Delete d = new Delete(Bytes.toBytes(key));
- try {
- hTable.delete(d);
- } catch (IOException e) {
- if (debug) {
- System.err.println("Error doing delete: " + e);
- }
- return Status.ERROR;
- }
-
- return Status.OK;
- }
-
- public static void main(String[] args) {
- if (args.length != 3) {
- System.out.println("Please specify a threadcount, columnfamily and operation count");
- System.exit(0);
- }
-
- final int keyspace = 10000; //120000000;
-
- final int threadcount = Integer.parseInt(args[0]);
-
- final String columnfamily = args[1];
-
-
- final int opcount = Integer.parseInt(args[2]) / threadcount;
-
- Vector allthreads = new Vector<>();
-
- for (int i = 0; i < threadcount; i++) {
- Thread t = new Thread() {
- public void run() {
- try {
- Random random = new Random();
-
- HBaseClient cli = new HBaseClient();
-
- Properties props = new Properties();
- props.setProperty("columnfamily", columnfamily);
- props.setProperty("debug", "true");
- cli.setProperties(props);
-
- cli.init();
-
- long accum = 0;
-
- for (int i = 0; i < opcount; i++) {
- int keynum = random.nextInt(keyspace);
- String key = "user" + keynum;
- long st = System.currentTimeMillis();
- Status result;
- Vector> scanResults = new Vector<>();
- Set scanFields = new HashSet();
- result = cli.scan("table1", "user2", 20, null, scanResults);
-
- long en = System.currentTimeMillis();
-
- accum += (en - st);
-
- if (!result.equals(Status.OK)) {
- System.out.println("Error " + result + " for " + key);
- }
-
- if (i % 10 == 0) {
- System.out.println(i + " operations, average latency: " + (((double) accum) / ((double) i)));
- }
- }
- } catch (Exception e) {
- e.printStackTrace();
- }
- }
- };
- allthreads.add(t);
- }
-
- long st = System.currentTimeMillis();
- for (Thread t : allthreads) {
- t.start();
- }
-
- for (Thread t : allthreads) {
- try {
- t.join();
- } catch (InterruptedException ignored) {
- System.err.println("interrupted");
- Thread.currentThread().interrupt();
- }
- }
- long en = System.currentTimeMillis();
-
- System.out.println("Throughput: " + ((1000.0) * (((double) (opcount * threadcount)) / ((double) (en - st))))
- + " ops/sec");
- }
-}
diff --git a/hbase098/src/main/java/site/ycsb/db/package-info.java b/hbase098/src/main/java/site/ycsb/db/package-info.java
deleted file mode 100644
index 0ba2b55f39..0000000000
--- a/hbase098/src/main/java/site/ycsb/db/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2017, Yahoo!, Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License. See accompanying LICENSE file.
- */
-
-/**
- * The YCSB binding for HBase
- * 0.98.X.
- */
-package site.ycsb.db;
diff --git a/hbase10/README.md b/hbase10/README.md
deleted file mode 100644
index 8fdc7e58ad..0000000000
--- a/hbase10/README.md
+++ /dev/null
@@ -1,110 +0,0 @@
-
-
-# HBase (1.0.x) Driver for YCSB
-This driver is a binding for the YCSB facilities to operate against a HBase 1.0.x Server cluster or Google's hosted Bigtable.
-To run against an HBase 0.98.x cluster, use the `hbase098` binding.
-
-See `hbase098/README.md` for a quickstart to setup HBase for load testing and common configuration details.
-
-## Configuration Options
-In addition to those options available for the `hbase098` binding, the following options are available for the `hbase10` binding:
-
-* `durability`: Whether or not writes should be appended to the WAL. Bypassing the WAL can improve throughput but data cannot be recovered in the event of a crash. The default is true.
-
-## Bigtable
-
-Google's Bigtable service provides an implementation of the HBase API for migrating existing applications. Users can perform load tests against Bigtable using this binding.
-
-### 1. Setup a Bigtable Cluster
-
-Login to the Google Cloud Console and follow the [Creating Cluster](https://cloud.google.com/bigtable/docs/creating-cluster) steps. Make a note of your cluster name, zone and project ID.
-
-### 2. Launch the Bigtable Shell
-
-From the Cloud Console, launch a shell and follow the [Quickstart](https://cloud.google.com/bigtable/docs/quickstart) up to step 4 where you launch the HBase shell.
-
-### 3. Create a Table
-
-For best results, use the pre-splitting strategy recommended in [HBASE-4163](https://issues.apache.org/jira/browse/HBASE-4163):
-
-```
-hbase(main):001:0> n_splits = 200 # HBase recommends (10 * number of regionservers)
-hbase(main):002:0> create 'usertable', 'cf', {SPLITS => (1..n_splits).map {|i| "user#{1000+i*(9999-1000)/n_splits}"}}
-```
-
-Make a note of the column family, in this example it's `cf``.
-
-### 4. Download the Bigtable Client Jar with required dependencies:
-
-```
-mvn -N dependency:copy -Dartifact=com.google.cloud.bigtable:bigtable-hbase-1.x-hadoop:1.0.0 -DoutputDirectory=target/bigtable-deps
-mvn -N dependency:copy -Dartifact=io.dropwizard.metrics:metrics-core:3.1.2 -DoutputDirectory=target/bigtable-deps
-```
-
-Download the latest `bigtable-hbase-1.x-hadoop` jar from [Maven](http://search.maven.org/#search%7Cgav%7C1%7Cg%3A%22com.google.cloud.bigtable%22%20AND%20a%3A%22bigtable-hbase-1.x-hadoop%22) to your host.
-
-### 5. Download JSON Credentials
-
-Follow these instructions for [Generating a JSON key](https://cloud.google.com/bigtable/docs/installing-hbase-shell#service-account) and save it to your host.
-
-### 6. Create or Edit hbase-site.xml
-
-If you have an existing HBase configuration directory with an `hbase-site.xml` file, edit the file as per below. If not, create a directory called `conf` under the `hbase10` directory. Create a file in the conf directory named `hbase-site.xml`. Provide the following settings in the XML file, making sure to replace the bracketed examples with the proper values from your Cloud console.
-
-```
-
-
- hbase.client.connection.impl
- com.google.cloud.bigtable.hbase1_x.BigtableConnection
-
-
- google.bigtable.project.id
- [YOUR-PROJECT-ID]
-
-
- google.bigtable.instance.id
- [YOUR-INSTANCE-ID]
-
-
- google.bigtable.auth.service.account.enable
- true
-
-
- google.bigtable.auth.json.keyfile
- [PATH-TO-YOUR-KEY-FILE]
-
-
-```
-
-If you have an existing HBase config directory, make sure to add it to the class path via `-cp :`.
-
-### 7. Execute a Workload
-
-Switch to the root of the YCSB repo and choose the workload you want to run and `load` it first. With the CLI you must provide the column family, cluster properties and the ALPN jar to load.
-
-```
-bin/ycsb load hbase10 -p columnfamily=cf -cp 'target/bigtable-deps/*' -P workloads/workloada
-
-```
-
-The `load` step only executes inserts into the datastore. After loading data, run the same workload to mix reads with writes.
-
-```
-bin/ycsb run hbase10 -p columnfamily=cf -cp 'target/bigtable-deps/* -P workloads/workloada
-
-```
diff --git a/hbase10/src/test/java/site/ycsb/db/HBaseClient10Test.java b/hbase10/src/test/java/site/ycsb/db/HBaseClient10Test.java
deleted file mode 100644
index ebfcc27355..0000000000
--- a/hbase10/src/test/java/site/ycsb/db/HBaseClient10Test.java
+++ /dev/null
@@ -1,214 +0,0 @@
-/**
- * Licensed under the Apache License, Version 2.0 (the "License"); you
- * may not use this file except in compliance with the License. You
- * may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License. See accompanying
- * LICENSE file.
- */
-
-package site.ycsb.db;
-
-import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY;
-import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeTrue;
-
-import site.ycsb.ByteIterator;
-import site.ycsb.Status;
-import site.ycsb.StringByteIterator;
-import site.ycsb.measurements.Measurements;
-import site.ycsb.workloads.CoreWorkload;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.List;
-import java.util.Properties;
-import java.util.Vector;
-
-/**
- * Integration tests for the YCSB HBase client 1.0, using an HBase minicluster.
- */
-public class HBaseClient10Test {
-
- private final static String COLUMN_FAMILY = "cf";
-
- private static HBaseTestingUtility testingUtil;
- private HBaseClient10 client;
- private Table table = null;
- private String tableName;
-
- private static boolean isWindows() {
- final String os = System.getProperty("os.name");
- return os.startsWith("Windows");
- }
-
- /**
- * Creates a mini-cluster for use in these tests.
- *
- * This is a heavy-weight operation, so invoked only once for the test class.
- */
- @BeforeClass
- public static void setUpClass() throws Exception {
- // Minicluster setup fails on Windows with an UnsatisfiedLinkError.
- // Skip if windows.
- assumeTrue(!isWindows());
- testingUtil = HBaseTestingUtility.createLocalHTU();
- testingUtil.startMiniCluster();
- }
-
- /**
- * Tears down mini-cluster.
- */
- @AfterClass
- public static void tearDownClass() throws Exception {
- if (testingUtil != null) {
- testingUtil.shutdownMiniCluster();
- }
- }
-
- /**
- * Sets up the mini-cluster for testing.
- *
- * We re-create the table for each test.
- */
- @Before
- public void setUp() throws Exception {
- client = new HBaseClient10();
- client.setConfiguration(new Configuration(testingUtil.getConfiguration()));
-
- Properties p = new Properties();
- p.setProperty("columnfamily", COLUMN_FAMILY);
-
- Measurements.setProperties(p);
- final CoreWorkload workload = new CoreWorkload();
- workload.init(p);
-
- tableName = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
- table = testingUtil.createTable(TableName.valueOf(tableName), Bytes.toBytes(COLUMN_FAMILY));
-
- client.setProperties(p);
- client.init();
- }
-
- @After
- public void tearDown() throws Exception {
- table.close();
- testingUtil.deleteTable(tableName);
- }
-
- @Test
- public void testRead() throws Exception {
- final String rowKey = "row1";
- final Put p = new Put(Bytes.toBytes(rowKey));
- p.addColumn(Bytes.toBytes(COLUMN_FAMILY),
- Bytes.toBytes("column1"), Bytes.toBytes("value1"));
- p.addColumn(Bytes.toBytes(COLUMN_FAMILY),
- Bytes.toBytes("column2"), Bytes.toBytes("value2"));
- table.put(p);
-
- final HashMap result = new HashMap();
- final Status status = client.read(tableName, rowKey, null, result);
- assertEquals(Status.OK, status);
- assertEquals(2, result.size());
- assertEquals("value1", result.get("column1").toString());
- assertEquals("value2", result.get("column2").toString());
- }
-
- @Test
- public void testReadMissingRow() throws Exception {
- final HashMap result = new HashMap();
- final Status status = client.read(tableName, "Missing row", null, result);
- assertEquals(Status.NOT_FOUND, status);
- assertEquals(0, result.size());
- }
-
- @Test
- public void testScan() throws Exception {
- // Fill with data
- final String colStr = "row_number";
- final byte[] col = Bytes.toBytes(colStr);
- final int n = 10;
- final List puts = new ArrayList(n);
- for(int i = 0; i < n; i++) {
- final byte[] key = Bytes.toBytes(String.format("%05d", i));
- final byte[] value = java.nio.ByteBuffer.allocate(4).putInt(i).array();
- final Put p = new Put(key);
- p.addColumn(Bytes.toBytes(COLUMN_FAMILY), col, value);
- puts.add(p);
- }
- table.put(puts);
-
- // Test
- final Vector> result =
- new Vector>();
-
- // Scan 5 records, skipping the first
- client.scan(tableName, "00001", 5, null, result);
-
- assertEquals(5, result.size());
- for(int i = 0; i < 5; i++) {
- final Map row = result.get(i);
- assertEquals(1, row.size());
- assertTrue(row.containsKey(colStr));
- final byte[] bytes = row.get(colStr).toArray();
- final ByteBuffer buf = ByteBuffer.wrap(bytes);
- final int rowNum = buf.getInt();
- assertEquals(i + 1, rowNum);
- }
- }
-
- @Test
- public void testUpdate() throws Exception{
- final String key = "key";
- final Map input = new HashMap();
- input.put("column1", "value1");
- input.put("column2", "value2");
- final Status status = client.insert(tableName, key, StringByteIterator.getByteIteratorMap(input));
- assertEquals(Status.OK, status);
-
- // Verify result
- final Get get = new Get(Bytes.toBytes(key));
- final Result result = this.table.get(get);
- assertFalse(result.isEmpty());
- assertEquals(2, result.size());
- for(final java.util.Map.Entry entry : input.entrySet()) {
- assertEquals(entry.getValue(),
- new String(result.getValue(Bytes.toBytes(COLUMN_FAMILY),
- Bytes.toBytes(entry.getKey()))));
- }
- }
-
- @Test
- @Ignore("Not yet implemented")
- public void testDelete() {
- fail("Not yet implemented");
- }
-}
-
diff --git a/hbase12/README.md b/hbase12/README.md
deleted file mode 100644
index 74bee71ff1..0000000000
--- a/hbase12/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-
-
-# HBase (1.2+) Driver for YCSB
-This driver is a binding for the YCSB facilities to operate against a HBase 1.2+ Server cluster, using a shaded client that tries to avoid leaking third party libraries.
-
-See `hbase098/README.md` for a quickstart to setup HBase for load testing and common configuration details.
-
-## Configuration Options
-In addition to those options available for the `hbase098` binding, the following options are available for the `hbase12` binding:
-
-* `durability`: Whether or not writes should be appended to the WAL. Bypassing the WAL can improve throughput but data cannot be recovered in the event of a crash. The default is true. We can set it to flase by option '-p durability=SKIP_WAL'.
-
diff --git a/hbase12/pom.xml b/hbase12/pom.xml
deleted file mode 100644
index f2bea40a25..0000000000
--- a/hbase12/pom.xml
+++ /dev/null
@@ -1,87 +0,0 @@
-
-
-
-
- 4.0.0
-
- site.ycsb
- binding-parent
- 0.18.0-SNAPSHOT
- ../binding-parent/
-
-
- hbase12-binding
- HBase 1.2 DB Binding
-
-
- true
- true
- true
-
-
- true
-
-
-
- site.ycsb
- hbase10-binding
- ${project.version}
-
-
-
- org.apache.hbase
- hbase-client
-
-
-
-
- site.ycsb
- core
- ${project.version}
- provided
-
-
- org.apache.hbase
- hbase-shaded-client
- ${hbase12.version}
-
-
- junit
- junit
- 4.12
- test
-
-
-
-
diff --git a/hbase12/src/main/java/site/ycsb/db/hbase12/HBaseClient12.java b/hbase12/src/main/java/site/ycsb/db/hbase12/HBaseClient12.java
deleted file mode 100644
index 12ac84e572..0000000000
--- a/hbase12/src/main/java/site/ycsb/db/hbase12/HBaseClient12.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed under the Apache License, Version 2.0 (the "License"); you
- * may not use this file except in compliance with the License. You
- * may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License. See accompanying
- * LICENSE file.
- */
-
-package site.ycsb.db.hbase12;
-
-/**
- * HBase 1.2 client for YCSB framework.
- *
- * A modified version of HBaseClient (which targets HBase v1.2) utilizing the
- * shaded client.
- *
- * It should run equivalent to following the hbase098 binding README.
- *
- */
-public class HBaseClient12 extends site.ycsb.db.HBaseClient10 {
-}
diff --git a/hbase12/src/main/java/site/ycsb/db/hbase12/package-info.java b/hbase12/src/main/java/site/ycsb/db/hbase12/package-info.java
deleted file mode 100644
index 1363a52715..0000000000
--- a/hbase12/src/main/java/site/ycsb/db/hbase12/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2014, Yahoo!, Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you
- * may not use this file except in compliance with the License. You
- * may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License. See accompanying
- * LICENSE file.
- */
-
-/**
- * The YCSB binding for HBase
- * using the HBase 1.2+ shaded API.
- */
-package site.ycsb.db.hbase12;
-
diff --git a/hbase12/src/test/java/site/ycsb/db/hbase12/HBaseClient12Test.java b/hbase12/src/test/java/site/ycsb/db/hbase12/HBaseClient12Test.java
deleted file mode 100644
index f8bd7c4702..0000000000
--- a/hbase12/src/test/java/site/ycsb/db/hbase12/HBaseClient12Test.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/**
- * Licensed under the Apache License, Version 2.0 (the "License"); you
- * may not use this file except in compliance with the License. You
- * may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License. See accompanying
- * LICENSE file.
- */
-
-package site.ycsb.db.hbase12;
-
-import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY;
-import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeTrue;
-
-import site.ycsb.ByteIterator;
-import site.ycsb.Status;
-import site.ycsb.StringByteIterator;
-import site.ycsb.measurements.Measurements;
-import site.ycsb.workloads.CoreWorkload;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Properties;
-import java.util.Vector;
-
-/**
- * Integration tests for the YCSB HBase client 1.2, using an HBase minicluster.
- */
-public class HBaseClient12Test {
-
- private final static String COLUMN_FAMILY = "cf";
-
- private static HBaseTestingUtility testingUtil;
- private HBaseClient12 client;
- private Table table = null;
- private String tableName;
-
- private static boolean isWindows() {
- final String os = System.getProperty("os.name");
- return os.startsWith("Windows");
- }
-
- /**
- * Creates a mini-cluster for use in these tests.
- *
- * This is a heavy-weight operation, so invoked only once for the test class.
- */
- @BeforeClass
- public static void setUpClass() throws Exception {
- // Minicluster setup fails on Windows with an UnsatisfiedLinkError.
- // Skip if windows.
- assumeTrue(!isWindows());
- testingUtil = HBaseTestingUtility.createLocalHTU();
- testingUtil.startMiniCluster();
- }
-
- /**
- * Tears down mini-cluster.
- */
- @AfterClass
- public static void tearDownClass() throws Exception {
- if (testingUtil != null) {
- testingUtil.shutdownMiniCluster();
- }
- }
-
- /**
- * Sets up the mini-cluster for testing.
- *
- * We re-create the table for each test.
- */
- @Before
- public void setUp() throws Exception {
- client = new HBaseClient12();
- client.setConfiguration(new Configuration(testingUtil.getConfiguration()));
-
- Properties p = new Properties();
- p.setProperty("columnfamily", COLUMN_FAMILY);
-
- Measurements.setProperties(p);
- final CoreWorkload workload = new CoreWorkload();
- workload.init(p);
-
- tableName = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
- table = testingUtil.createTable(TableName.valueOf(tableName), Bytes.toBytes(COLUMN_FAMILY));
-
- client.setProperties(p);
- client.init();
- }
-
- @After
- public void tearDown() throws Exception {
- table.close();
- testingUtil.deleteTable(tableName);
- }
-
- @Test
- public void testRead() throws Exception {
- final String rowKey = "row1";
- final Put p = new Put(Bytes.toBytes(rowKey));
- p.addColumn(Bytes.toBytes(COLUMN_FAMILY),
- Bytes.toBytes("column1"), Bytes.toBytes("value1"));
- p.addColumn(Bytes.toBytes(COLUMN_FAMILY),
- Bytes.toBytes("column2"), Bytes.toBytes("value2"));
- table.put(p);
-
- final HashMap result = new HashMap();
- final Status status = client.read(tableName, rowKey, null, result);
- assertEquals(Status.OK, status);
- assertEquals(2, result.size());
- assertEquals("value1", result.get("column1").toString());
- assertEquals("value2", result.get("column2").toString());
- }
-
- @Test
- public void testReadMissingRow() throws Exception {
- final HashMap result = new HashMap();
- final Status status = client.read(tableName, "Missing row", null, result);
- assertEquals(Status.NOT_FOUND, status);
- assertEquals(0, result.size());
- }
-
- @Test
- public void testScan() throws Exception {
- // Fill with data
- final String colStr = "row_number";
- final byte[] col = Bytes.toBytes(colStr);
- final int n = 10;
- final List puts = new ArrayList(n);
- for(int i = 0; i < n; i++) {
- final byte[] key = Bytes.toBytes(String.format("%05d", i));
- final byte[] value = java.nio.ByteBuffer.allocate(4).putInt(i).array();
- final Put p = new Put(key);
- p.addColumn(Bytes.toBytes(COLUMN_FAMILY), col, value);
- puts.add(p);
- }
- table.put(puts);
-
- // Test
- final Vector> result =
- new Vector>();
-
- // Scan 5 records, skipping the first
- client.scan(tableName, "00001", 5, null, result);
-
- assertEquals(5, result.size());
- for(int i = 0; i < 5; i++) {
- final HashMap row = result.get(i);
- assertEquals(1, row.size());
- assertTrue(row.containsKey(colStr));
- final byte[] bytes = row.get(colStr).toArray();
- final ByteBuffer buf = ByteBuffer.wrap(bytes);
- final int rowNum = buf.getInt();
- assertEquals(i + 1, rowNum);
- }
- }
-
- @Test
- public void testUpdate() throws Exception{
- final String key = "key";
- final HashMap input = new HashMap();
- input.put("column1", "value1");
- input.put("column2", "value2");
- final Status status = client.insert(tableName, key, StringByteIterator.getByteIteratorMap(input));
- assertEquals(Status.OK, status);
-
- // Verify result
- final Get get = new Get(Bytes.toBytes(key));
- final Result result = this.table.get(get);
- assertFalse(result.isEmpty());
- assertEquals(2, result.size());
- for(final java.util.Map.Entry entry : input.entrySet()) {
- assertEquals(entry.getValue(),
- new String(result.getValue(Bytes.toBytes(COLUMN_FAMILY),
- Bytes.toBytes(entry.getKey()))));
- }
- }
-
- @Test
- @Ignore("Not yet implemented")
- public void testDelete() {
- fail("Not yet implemented");
- }
-}
-
diff --git a/hbase12/src/test/resources/hbase-site.xml b/hbase12/src/test/resources/hbase-site.xml
deleted file mode 100644
index a8b29e451f..0000000000
--- a/hbase12/src/test/resources/hbase-site.xml
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-
-
-
- hbase.master.info.port
- -1
- The port for the hbase master web UI
- Set to -1 if you do not want the info server to run.
-
-
-
- hbase.regionserver.info.port
- -1
- The port for the hbase regionserver web UI
- Set to -1 if you do not want the info server to run.
-
-
-
diff --git a/hbase12/src/test/resources/log4j.properties b/hbase12/src/test/resources/log4j.properties
deleted file mode 100644
index a9df32e044..0000000000
--- a/hbase12/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Copyright (c) 2015 YCSB contributors. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you
-# may not use this file except in compliance with the License. You
-# may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied. See the License for the specific language governing
-# permissions and limitations under the License. See accompanying
-# LICENSE file.
-#
-
-# Root logger option
-log4j.rootLogger=WARN, stderr
-
-log4j.appender.stderr=org.apache.log4j.ConsoleAppender
-log4j.appender.stderr.target=System.err
-log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
-log4j.appender.stderr.layout.conversionPattern=%d{yyyy/MM/dd HH:mm:ss} %-5p %c %x - %m%n
-
-# Suppress messages from ZKTableStateManager: Creates a large number of table
-# state change messages.
-log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKTableStateManager=ERROR
diff --git a/hbase14/README.md b/hbase14/README.md
index 01ab0ed2b7..271cdce5c3 100644
--- a/hbase14/README.md
+++ b/hbase14/README.md
@@ -18,10 +18,142 @@ LICENSE file.
# HBase (1.4+) Driver for YCSB
This driver is a binding for the YCSB facilities to operate against a HBase 1.4+ Server cluster, using a shaded client that tries to avoid leaking third party libraries.
-See `hbase098/README.md` for a quickstart to setup HBase for load testing and common configuration details.
+# Testing HBase
+## 1. Start a HBase Server
+You need to start a single node or a cluster to point the client at. Please see [Apache HBase Reference Guide](http://hbase.apache.org/book.html) for more details and instructions.
+
+## 2. Set up YCSB
+
+Download the [latest YCSB](https://github.com/brianfrankcooper/YCSB/releases/latest) file. Follow the instructions.
+
+## 3. Create a HBase table for testing
+
+For best results, use the pre-splitting strategy recommended in [HBASE-4163](https://issues.apache.org/jira/browse/HBASE-4163):
+
+```
+hbase(main):001:0> n_splits = 200 # HBase recommends (10 * number of regionservers)
+hbase(main):002:0> create 'usertable', 'family', {SPLITS => (1..n_splits).map {|i| "user#{1000+i*(9999-1000)/n_splits}"}}
+```
+
+*Failing to do so will cause all writes to initially target a single region server*.
+
+## 4. Run the Workload
+Before you can actually run the workload, you need to "load" the data first.
+
+You should specify a HBase config directory(or any other directory containing your hbase-site.xml) and a table name and a column family(-cp is used to set java classpath and -p is used to set various properties).
+
+```
+bin/ycsb load hbase14 -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family
+```
+
+Then, you can run the workload:
+
+```
+bin/ycsb run hbase14 -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family
+```
+
+Please see the general instructions in the `doc` folder if you are not sure how it all works. You can apply additional properties (as seen in the next section) like this:
+
+```
+bin/ycsb run hbase14 -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family -p clientbuffering=true
+```
## Configuration Options
-In addition to those options available for the `hbase098` binding, the following options are available for the `hbase14` binding:
+Following options can be configurable using `-p`.
+* `columnfamily`: The HBase column family to target.
+* `debug` : If true, debugging logs are activated. The default is false.
+* `hbase.usepagefilter` : If true, HBase
+ [PageFilter](https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/PageFilter.html)s
+ are used to limit the number of records consumed in a scan operation. The default is true.
+* `principal`: If testing need to be done against a secure HBase cluster using Kerberos Keytab,
+ this property can be used to pass the principal in the keytab file.
+* `keytab`: The Kerberos keytab file name and location can be passed through this property.
+* `clientbuffering`: Whether or not to use client side buffering and batching of write operations. This can significantly improve performance and defaults to true.
+* `writebuffersize`: The maximum amount, in bytes, of data to buffer on the client side before a flush is forced. The default is 12MB. Only used when `clientbuffering` is true.
* `durability`: Whether or not writes should be appended to the WAL. Bypassing the WAL can improve throughput but data cannot be recovered in the event of a crash. The default is true.
+Additional HBase settings should be provided in the `hbase-site.xml` file located in your `/HBASE-HOME-DIR/conf` directory. Typically this will be `/etc/hbase/conf`.
+
+## Bigtable
+
+Google's Bigtable service provides an implementation of the HBase API for migrating existing applications. Users can perform load tests against Bigtable using this binding.
+
+### 1. Setup a Bigtable Cluster
+
+Login to the Google Cloud Console and follow the [Creating Cluster](https://cloud.google.com/bigtable/docs/creating-cluster) steps. Make a note of your cluster name, zone and project ID.
+
+### 2. Launch the Bigtable Shell
+
+From the Cloud Console, launch a shell and follow the [Quickstart](https://cloud.google.com/bigtable/docs/quickstart) up to step 4 where you launch the HBase shell.
+
+### 3. Create a Table
+
+For best results, use the pre-splitting strategy recommended in [HBASE-4163](https://issues.apache.org/jira/browse/HBASE-4163):
+
+```
+hbase(main):001:0> n_splits = 200 # HBase recommends (10 * number of regionservers)
+hbase(main):002:0> create 'usertable', 'cf', {SPLITS => (1..n_splits).map {|i| "user#{1000+i*(9999-1000)/n_splits}"}}
+```
+
+Make a note of the column family, in this example it's `cf``.
+
+### 4. Download the Bigtable Client Jar with required dependencies:
+
+```
+mvn -N dependency:copy -Dartifact=com.google.cloud.bigtable:bigtable-hbase-1.x-hadoop:1.0.0 -DoutputDirectory=target/bigtable-deps
+mvn -N dependency:copy -Dartifact=io.dropwizard.metrics:metrics-core:3.1.2 -DoutputDirectory=target/bigtable-deps
+```
+
+Download the latest `bigtable-hbase-1.x-hadoop` jar from [Maven](http://search.maven.org/#search%7Cgav%7C1%7Cg%3A%22com.google.cloud.bigtable%22%20AND%20a%3A%22bigtable-hbase-1.x-hadoop%22) to your host.
+
+### 5. Download JSON Credentials
+
+Follow these instructions for [Generating a JSON key](https://cloud.google.com/bigtable/docs/installing-hbase-shell#service-account) and save it to your host.
+
+### 6. Create or Edit hbase-site.xml
+
+If you have an existing HBase configuration directory with an `hbase-site.xml` file, edit the file as per below. If not, create a directory called `conf` under the `hbase10` directory. Create a file in the conf directory named `hbase-site.xml`. Provide the following settings in the XML file, making sure to replace the bracketed examples with the proper values from your Cloud console.
+
+```
+
+
+ hbase.client.connection.impl
+ com.google.cloud.bigtable.hbase1_x.BigtableConnection
+
+
+ google.bigtable.project.id
+ [YOUR-PROJECT-ID]
+
+
+ google.bigtable.instance.id
+ [YOUR-INSTANCE-ID]
+
+
+ google.bigtable.auth.service.account.enable
+ true
+
+
+ google.bigtable.auth.json.keyfile
+ [PATH-TO-YOUR-KEY-FILE]
+
+
+```
+
+If you have an existing HBase config directory, make sure to add it to the class path via `-cp :`.
+
+### 7. Execute a Workload
+
+Switch to the root of the YCSB repo and choose the workload you want to run and `load` it first. With the CLI you must provide the column family, cluster properties and the ALPN jar to load.
+
+```
+bin/ycsb load hbase14 -p columnfamily=cf -cp 'target/bigtable-deps/*' -P workloads/workloada
+
+```
+
+The `load` step only executes inserts into the datastore. After loading data, run the same workload to mix reads with writes.
+
+```
+bin/ycsb run hbase14 -p columnfamily=cf -cp 'target/bigtable-deps/* -P workloads/workloada
+
+```
diff --git a/hbase14/pom.xml b/hbase14/pom.xml
index 5058406e1e..eb1701e7ca 100644
--- a/hbase14/pom.xml
+++ b/hbase14/pom.xml
@@ -31,25 +31,8 @@ LICENSE file.
true
-
- true
-
- site.ycsb
- hbase10-binding
- ${project.version}
-
-
-
- org.apache.hbase
- hbase-client
-
-
-
site.ycsb
core
@@ -67,19 +50,11 @@ LICENSE file.
4.12
test
-
diff --git a/hbase14/src/main/java/site/ycsb/db/hbase14/HBaseClient14.java b/hbase14/src/main/java/site/ycsb/db/hbase14/HBaseClient14.java
index e27a10031d..917040a1f4 100644
--- a/hbase14/src/main/java/site/ycsb/db/hbase14/HBaseClient14.java
+++ b/hbase14/src/main/java/site/ycsb/db/hbase14/HBaseClient14.java
@@ -15,14 +15,516 @@
package site.ycsb.db.hbase14;
+import site.ycsb.ByteArrayByteIterator;
+import site.ycsb.ByteIterator;
+import site.ycsb.DBException;
+import site.ycsb.Status;
+import site.ycsb.measurements.Measurements;
+
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.BufferedMutator;
+import org.apache.hadoop.hbase.client.BufferedMutatorParams;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.io.IOException;
+import java.util.ConcurrentModificationException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.Vector;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY;
+import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT;
+
/**
- * HBase 1.4 client for YCSB framework.
- *
- * A modified version of HBaseClient (which targets HBase v1.4) utilizing the
- * shaded client.
- *
- * It should run equivalent to following the hbase098 binding README.
+ * HBase 1 client for YCSB framework.
*
+ * Intended for use with HBase's shaded client.
*/
-public class HBaseClient14 extends site.ycsb.db.HBaseClient10 {
+public class HBaseClient14 extends site.ycsb.DB {
+ private static final AtomicInteger THREAD_COUNT = new AtomicInteger(0);
+
+ private Configuration config = HBaseConfiguration.create();
+
+ private boolean debug = false;
+
+ private String tableName = "";
+
+ /**
+ * A Cluster Connection instance that is shared by all running ycsb threads.
+ * Needs to be initialized late so we pick up command-line configs if any.
+ * To ensure one instance only in a multi-threaded context, guard access
+ * with a 'lock' object.
+ * @See #CONNECTION_LOCK.
+ */
+ private static Connection connection = null;
+
+ // Depending on the value of clientSideBuffering, either bufferedMutator
+ // (clientSideBuffering) or currentTable (!clientSideBuffering) will be used.
+ private Table currentTable = null;
+ private BufferedMutator bufferedMutator = null;
+
+ private String columnFamily = "";
+ private byte[] columnFamilyBytes;
+
+ /**
+ * Durability to use for puts and deletes.
+ */
+ private Durability durability = Durability.USE_DEFAULT;
+
+ /** Whether or not a page filter should be used to limit scan length. */
+ private boolean usePageFilter = true;
+
+ /**
+ * If true, buffer mutations on the client. This is the default behavior for
+ * HBaseClient. For measuring insert/update/delete latencies, client side
+ * buffering should be disabled.
+ */
+ private boolean clientSideBuffering = false;
+ private long writeBufferSize = 1024 * 1024 * 12;
+
+ /**
+ * Initialize any state for this DB. Called once per DB instance; there is one
+ * DB instance per client thread.
+ */
+ @Override
+ public void init() throws DBException {
+ if ("true"
+ .equals(getProperties().getProperty("clientbuffering", "false"))) {
+ this.clientSideBuffering = true;
+ }
+ if (getProperties().containsKey("writebuffersize")) {
+ writeBufferSize =
+ Long.parseLong(getProperties().getProperty("writebuffersize"));
+ }
+
+ if (getProperties().getProperty("durability") != null) {
+ this.durability =
+ Durability.valueOf(getProperties().getProperty("durability"));
+ }
+
+ if ("kerberos".equalsIgnoreCase(config.get("hbase.security.authentication"))) {
+ config.set("hadoop.security.authentication", "Kerberos");
+ UserGroupInformation.setConfiguration(config);
+ }
+
+ if ((getProperties().getProperty("principal")!=null)
+ && (getProperties().getProperty("keytab")!=null)) {
+ try {
+ UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"),
+ getProperties().getProperty("keytab"));
+ } catch (IOException e) {
+ System.err.println("Keytab file is not readable or not found");
+ throw new DBException(e);
+ }
+ }
+
+ String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
+ try {
+ THREAD_COUNT.getAndIncrement();
+ synchronized (THREAD_COUNT) {
+ if (connection == null) {
+ // Initialize if not set up already.
+ connection = ConnectionFactory.createConnection(config);
+
+ // Terminate right now if table does not exist, since the client
+ // will not propagate this error upstream once the workload
+ // starts.
+ final TableName tName = TableName.valueOf(table);
+ try (Admin admin = connection.getAdmin()) {
+ if (!admin.tableExists(tName)) {
+ throw new DBException("Table " + tName + " does not exists");
+ }
+ }
+ }
+ }
+ } catch (java.io.IOException e) {
+ throw new DBException(e);
+ }
+
+ if ((getProperties().getProperty("debug") != null)
+ && (getProperties().getProperty("debug").compareTo("true") == 0)) {
+ debug = true;
+ }
+
+ if ("false"
+ .equals(getProperties().getProperty("hbase.usepagefilter", "true"))) {
+ usePageFilter = false;
+ }
+
+ columnFamily = getProperties().getProperty("columnfamily");
+ if (columnFamily == null) {
+ System.err.println("Error, must specify a columnfamily for HBase table");
+ throw new DBException("No columnfamily specified");
+ }
+ columnFamilyBytes = Bytes.toBytes(columnFamily);
+ }
+
+ /**
+ * Cleanup any state for this DB. Called once per DB instance; there is one DB
+ * instance per client thread.
+ */
+ @Override
+ public void cleanup() throws DBException {
+ // Get the measurements instance as this is the only client that should
+ // count clean up time like an update if client-side buffering is
+ // enabled.
+ Measurements measurements = Measurements.getMeasurements();
+ try {
+ long st = System.nanoTime();
+ if (bufferedMutator != null) {
+ bufferedMutator.close();
+ }
+ if (currentTable != null) {
+ currentTable.close();
+ }
+ long en = System.nanoTime();
+ final String type = clientSideBuffering ? "UPDATE" : "CLEANUP";
+ measurements.measure(type, (int) ((en - st) / 1000));
+ int threadCount = THREAD_COUNT.decrementAndGet();
+ if (threadCount <= 0) {
+ // Means we are done so ok to shut down the Connection.
+ synchronized (THREAD_COUNT) {
+ if (connection != null) {
+ connection.close();
+ connection = null;
+ }
+ }
+ }
+ } catch (IOException e) {
+ throw new DBException(e);
+ }
+ }
+
+ public void getHTable(String table) throws IOException {
+ final TableName tName = TableName.valueOf(table);
+ this.currentTable = connection.getTable(tName);
+ if (clientSideBuffering) {
+ final BufferedMutatorParams p = new BufferedMutatorParams(tName);
+ p.writeBufferSize(writeBufferSize);
+ this.bufferedMutator = connection.getBufferedMutator(p);
+ }
+ }
+
+ /**
+ * Read a record from the database. Each field/value pair from the result will
+ * be stored in a HashMap.
+ *
+ * @param table
+ * The name of the table
+ * @param key
+ * The record key of the record to read.
+ * @param fields
+ * The list of fields to read, or null for all of them
+ * @param result
+ * A HashMap of field/value pairs for the result
+ * @return Zero on success, a non-zero error code on error
+ */
+ public Status read(String table, String key, Set fields,
+ Map result) {
+ // if this is a "new" table, init HTable object. Else, use existing one
+ if (!tableName.equals(table)) {
+ currentTable = null;
+ try {
+ getHTable(table);
+ tableName = table;
+ } catch (IOException e) {
+ System.err.println("Error accessing HBase table: " + e);
+ return Status.ERROR;
+ }
+ }
+
+ Result r = null;
+ try {
+ if (debug) {
+ System.out
+ .println("Doing read from HBase columnfamily " + columnFamily);
+ System.out.println("Doing read for key: " + key);
+ }
+ Get g = new Get(Bytes.toBytes(key));
+ if (fields == null) {
+ g.addFamily(columnFamilyBytes);
+ } else {
+ for (String field : fields) {
+ g.addColumn(columnFamilyBytes, Bytes.toBytes(field));
+ }
+ }
+ r = currentTable.get(g);
+ } catch (IOException e) {
+ if (debug) {
+ System.err.println("Error doing get: " + e);
+ }
+ return Status.ERROR;
+ } catch (ConcurrentModificationException e) {
+ // do nothing for now...need to understand HBase concurrency model better
+ return Status.ERROR;
+ }
+
+ if (r.isEmpty()) {
+ return Status.NOT_FOUND;
+ }
+
+ while (r.advance()) {
+ final Cell c = r.current();
+ result.put(Bytes.toString(CellUtil.cloneQualifier(c)),
+ new ByteArrayByteIterator(CellUtil.cloneValue(c)));
+ if (debug) {
+ System.out.println(
+ "Result for field: " + Bytes.toString(CellUtil.cloneQualifier(c))
+ + " is: " + Bytes.toString(CellUtil.cloneValue(c)));
+ }
+ }
+ return Status.OK;
+ }
+
+ /**
+ * Perform a range scan for a set of records in the database. Each field/value
+ * pair from the result will be stored in a HashMap.
+ *
+ * @param table
+ * The name of the table
+ * @param startkey
+ * The record key of the first record to read.
+ * @param recordcount
+ * The number of records to read
+ * @param fields
+ * The list of fields to read, or null for all of them
+ * @param result
+ * A Vector of HashMaps, where each HashMap is a set field/value
+ * pairs for one record
+ * @return Zero on success, a non-zero error code on error
+ */
+ @Override
+ public Status scan(String table, String startkey, int recordcount,
+ Set fields, Vector> result) {
+ // if this is a "new" table, init HTable object. Else, use existing one
+ if (!tableName.equals(table)) {
+ currentTable = null;
+ try {
+ getHTable(table);
+ tableName = table;
+ } catch (IOException e) {
+ System.err.println("Error accessing HBase table: " + e);
+ return Status.ERROR;
+ }
+ }
+
+ Scan s = new Scan(Bytes.toBytes(startkey));
+ // HBase has no record limit. Here, assume recordcount is small enough to
+ // bring back in one call.
+ // We get back recordcount records
+ s.setCaching(recordcount);
+ if (this.usePageFilter) {
+ s.setFilter(new PageFilter(recordcount));
+ }
+
+ // add specified fields or else all fields
+ if (fields == null) {
+ s.addFamily(columnFamilyBytes);
+ } else {
+ for (String field : fields) {
+ s.addColumn(columnFamilyBytes, Bytes.toBytes(field));
+ }
+ }
+
+ // get results
+ ResultScanner scanner = null;
+ try {
+ scanner = currentTable.getScanner(s);
+ int numResults = 0;
+ for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
+ // get row key
+ String key = Bytes.toString(rr.getRow());
+
+ if (debug) {
+ System.out.println("Got scan result for key: " + key);
+ }
+
+ HashMap rowResult =
+ new HashMap();
+
+ while (rr.advance()) {
+ final Cell cell = rr.current();
+ rowResult.put(Bytes.toString(CellUtil.cloneQualifier(cell)),
+ new ByteArrayByteIterator(CellUtil.cloneValue(cell)));
+ }
+
+ // add rowResult to result vector
+ result.add(rowResult);
+ numResults++;
+
+ // PageFilter does not guarantee that the number of results is <=
+ // pageSize, so this
+ // break is required.
+ if (numResults >= recordcount) {// if hit recordcount, bail out
+ break;
+ }
+ } // done with row
+ } catch (IOException e) {
+ if (debug) {
+ System.out.println("Error in getting/parsing scan result: " + e);
+ }
+ return Status.ERROR;
+ } finally {
+ if (scanner != null) {
+ scanner.close();
+ }
+ }
+
+ return Status.OK;
+ }
+
+ /**
+ * Update a record in the database. Any field/value pairs in the specified
+ * values HashMap will be written into the record with the specified record
+ * key, overwriting any existing values with the same field name.
+ *
+ * @param table
+ * The name of the table
+ * @param key
+ * The record key of the record to write
+ * @param values
+ * A HashMap of field/value pairs to update in the record
+ * @return Zero on success, a non-zero error code on error
+ */
+ @Override
+ public Status update(String table, String key,
+ Map values) {
+ // if this is a "new" table, init HTable object. Else, use existing one
+ if (!tableName.equals(table)) {
+ currentTable = null;
+ try {
+ getHTable(table);
+ tableName = table;
+ } catch (IOException e) {
+ System.err.println("Error accessing HBase table: " + e);
+ return Status.ERROR;
+ }
+ }
+
+ if (debug) {
+ System.out.println("Setting up put for key: " + key);
+ }
+ Put p = new Put(Bytes.toBytes(key));
+ p.setDurability(durability);
+ for (Map.Entry entry : values.entrySet()) {
+ byte[] value = entry.getValue().toArray();
+ if (debug) {
+ System.out.println("Adding field/value " + entry.getKey() + "/"
+ + Bytes.toStringBinary(value) + " to put request");
+ }
+ p.addColumn(columnFamilyBytes, Bytes.toBytes(entry.getKey()), value);
+ }
+
+ try {
+ if (clientSideBuffering) {
+ // removed Preconditions.checkNotNull, which throws NPE, in favor of NPE on next line
+ bufferedMutator.mutate(p);
+ } else {
+ currentTable.put(p);
+ }
+ } catch (IOException e) {
+ if (debug) {
+ System.err.println("Error doing put: " + e);
+ }
+ return Status.ERROR;
+ } catch (ConcurrentModificationException e) {
+ // do nothing for now...hope this is rare
+ return Status.ERROR;
+ }
+
+ return Status.OK;
+ }
+
+ /**
+ * Insert a record in the database. Any field/value pairs in the specified
+ * values HashMap will be written into the record with the specified record
+ * key.
+ *
+ * @param table
+ * The name of the table
+ * @param key
+ * The record key of the record to insert.
+ * @param values
+ * A HashMap of field/value pairs to insert in the record
+ * @return Zero on success, a non-zero error code on error
+ */
+ @Override
+ public Status insert(String table, String key,
+ Map values) {
+ return update(table, key, values);
+ }
+
+ /**
+ * Delete a record from the database.
+ *
+ * @param table
+ * The name of the table
+ * @param key
+ * The record key of the record to delete.
+ * @return Zero on success, a non-zero error code on error
+ */
+ @Override
+ public Status delete(String table, String key) {
+ // if this is a "new" table, init HTable object. Else, use existing one
+ if (!tableName.equals(table)) {
+ currentTable = null;
+ try {
+ getHTable(table);
+ tableName = table;
+ } catch (IOException e) {
+ System.err.println("Error accessing HBase table: " + e);
+ return Status.ERROR;
+ }
+ }
+
+ if (debug) {
+ System.out.println("Doing delete for key: " + key);
+ }
+
+ final Delete d = new Delete(Bytes.toBytes(key));
+ d.setDurability(durability);
+ try {
+ if (clientSideBuffering) {
+ // removed Preconditions.checkNotNull, which throws NPE, in favor of NPE on next line
+ bufferedMutator.mutate(d);
+ } else {
+ currentTable.delete(d);
+ }
+ } catch (IOException e) {
+ if (debug) {
+ System.err.println("Error doing delete: " + e);
+ }
+ return Status.ERROR;
+ }
+
+ return Status.OK;
+ }
+
+ // Only non-private for testing.
+ void setConfiguration(final Configuration newConfig) {
+ this.config = newConfig;
+ }
}
+
+/*
+ * For customized vim control set autoindent set si set shiftwidth=4
+ */
diff --git a/hbase20/README.md b/hbase20/README.md
deleted file mode 100644
index 27e183b359..0000000000
--- a/hbase20/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-
-
-# HBase (2.0+) Driver for YCSB
-This driver is a binding for the YCSB facilities to operate against a HBase 2.0+ Server cluster, using a shaded client that tries to avoid leaking third party libraries.
-
-See `hbase098/README.md` for a quickstart to setup HBase for load testing and common configuration details.
-
-## Configuration Options
-In addition to those options available for the `hbase098` binding, the following options are available for the `hbase20` binding:
-
-* `durability`: Whether or not writes should be appended to the WAL. Bypassing the WAL can improve throughput but data cannot be recovered in the event of a crash. The default is true.
-
diff --git a/hbase20/pom.xml b/hbase20/pom.xml
deleted file mode 100644
index a0dd77f45a..0000000000
--- a/hbase20/pom.xml
+++ /dev/null
@@ -1,85 +0,0 @@
-
-
-
-
- 4.0.0
-
- site.ycsb
- binding-parent
- 0.18.0-SNAPSHOT
- ../binding-parent/
-
-
- hbase20-binding
- HBase 2.0 DB Binding
-
-
-
- true
-
- true
-
-
-
- site.ycsb
- hbase10-binding
- ${project.version}
-
-
-
- org.apache.hbase
- hbase-client
-
-
-
-
- site.ycsb
- core
- ${project.version}
- provided
-
-
- org.apache.hbase
- hbase-shaded-client
- ${hbase20.version}
-
-
- junit
- junit
- 4.12
- test
-
-
-
-
diff --git a/hbase20/src/main/java/site/ycsb/db/hbase20/HBaseClient20.java b/hbase20/src/main/java/site/ycsb/db/hbase20/HBaseClient20.java
deleted file mode 100644
index 77b6eb8598..0000000000
--- a/hbase20/src/main/java/site/ycsb/db/hbase20/HBaseClient20.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed under the Apache License, Version 2.0 (the "License"); you
- * may not use this file except in compliance with the License. You
- * may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License. See accompanying
- * LICENSE file.
- */
-
-package site.ycsb.db.hbase20;
-
-/**
- * HBase 2.0 client for YCSB framework.
- *
- * A modified version of HBaseClient (which targets HBase v2.0) utilizing the
- * shaded client.
- *
- * It should run equivalent to following the hbase098 binding README.
- *
- */
-public class HBaseClient20 extends site.ycsb.db.HBaseClient10 {
-}
diff --git a/hbase20/src/main/java/site/ycsb/db/hbase20/package-info.java b/hbase20/src/main/java/site/ycsb/db/hbase20/package-info.java
deleted file mode 100644
index 027bb432c3..0000000000
--- a/hbase20/src/main/java/site/ycsb/db/hbase20/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2014, Yahoo!, Inc. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you
- * may not use this file except in compliance with the License. You
- * may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License. See accompanying
- * LICENSE file.
- */
-
-/**
- * The YCSB binding for HBase
- * using the HBase 2.0+ shaded API.
- */
-package site.ycsb.db.hbase20;
-
diff --git a/hbase20/src/test/resources/hbase-site.xml b/hbase20/src/test/resources/hbase-site.xml
deleted file mode 100644
index a8b29e451f..0000000000
--- a/hbase20/src/test/resources/hbase-site.xml
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-
-
-
- hbase.master.info.port
- -1
- The port for the hbase master web UI
- Set to -1 if you do not want the info server to run.
-
-
-
- hbase.regionserver.info.port
- -1
- The port for the hbase regionserver web UI
- Set to -1 if you do not want the info server to run.
-
-
-
diff --git a/hbase20/src/test/resources/log4j.properties b/hbase20/src/test/resources/log4j.properties
deleted file mode 100644
index a9df32e044..0000000000
--- a/hbase20/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Copyright (c) 2015 YCSB contributors. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you
-# may not use this file except in compliance with the License. You
-# may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied. See the License for the specific language governing
-# permissions and limitations under the License. See accompanying
-# LICENSE file.
-#
-
-# Root logger option
-log4j.rootLogger=WARN, stderr
-
-log4j.appender.stderr=org.apache.log4j.ConsoleAppender
-log4j.appender.stderr.target=System.err
-log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
-log4j.appender.stderr.layout.conversionPattern=%d{yyyy/MM/dd HH:mm:ss} %-5p %c %x - %m%n
-
-# Suppress messages from ZKTableStateManager: Creates a large number of table
-# state change messages.
-log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKTableStateManager=ERROR
diff --git a/hbase098/README.md b/hbase22/README.md
similarity index 74%
rename from hbase098/README.md
rename to hbase22/README.md
index cfc8cdbf2f..b531fc440c 100644
--- a/hbase098/README.md
+++ b/hbase22/README.md
@@ -1,5 +1,5 @@
-# HBase (0.98.x) Driver for YCSB
-This driver is a binding for the YCSB facilities to operate against a HBase 0.98.x Server cluster.
-To run against an HBase >= 1.0 cluster, use the `hbase10` binding.
+# HBase (2.1+) Driver for YCSB
+This driver is a binding for the YCSB facilities to operate against a HBase 2 cluster, using a shaded client that tries to avoid leaking third party libraries.
-## Quickstart
-
-### 1. Start a HBase Server
+# 1. Start a HBase Server
You need to start a single node or a cluster to point the client at. Please see [Apache HBase Reference Guide](http://hbase.apache.org/book.html) for more details and instructions.
-### 2. Set up YCSB
-You need to clone the repository and compile everything.
+# 2. Set up YCSB
-```
-git clone git://github.com/brianfrankcooper/YCSB.git
-cd YCSB
-mvn clean package
-```
+Download the [latest YCSB](https://github.com/brianfrankcooper/YCSB/releases/latest) file. Follow the instructions.
-### 3. Create a HBase table for testing
+# 3. Create a HBase table for testing
For best results, use the pre-splitting strategy recommended in [HBASE-4163](https://issues.apache.org/jira/browse/HBASE-4163):
@@ -44,28 +36,28 @@ hbase(main):002:0> create 'usertable', 'family', {SPLITS => (1..n_splits).map {|
*Failing to do so will cause all writes to initially target a single region server*.
-### 4. Run the Workload
+# 4. Run the Workload
Before you can actually run the workload, you need to "load" the data first.
You should specify a HBase config directory(or any other directory containing your hbase-site.xml) and a table name and a column family(-cp is used to set java classpath and -p is used to set various properties).
```
-bin/ycsb load hbase -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family
+bin/ycsb load hbase21 -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family
```
Then, you can run the workload:
```
-bin/ycsb run hbase -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family
+bin/ycsb run hbase21 -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family
```
Please see the general instructions in the `doc` folder if you are not sure how it all works. You can apply additional properties (as seen in the next section) like this:
```
-bin/ycsb run hbase -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family -p clientbuffering=true
+bin/ycsb run hbase21 -P workloads/workloada -cp /HBASE-HOME-DIR/conf -p table=usertable -p columnfamily=family -p clientbuffering=true
```
-## Configuration Options
+# Configuration Options
Following options can be configurable using `-p`.
* `columnfamily`: The HBase column family to target.
@@ -73,10 +65,12 @@ Following options can be configurable using `-p`.
* `hbase.usepagefilter` : If true, HBase
[PageFilter](https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/filter/PageFilter.html)s
are used to limit the number of records consumed in a scan operation. The default is true.
-* `principal`: If testing need to be done against a secure HBase cluster using Kerberos Keytab,
+* `principal`: If testing need to be done against a secure HBase cluster using Kerberos Keytab,
this property can be used to pass the principal in the keytab file.
* `keytab`: The Kerberos keytab file name and location can be passed through this property.
* `clientbuffering`: Whether or not to use client side buffering and batching of write operations. This can significantly improve performance and defaults to true.
* `writebuffersize`: The maximum amount, in bytes, of data to buffer on the client side before a flush is forced. The default is 12MB. Only used when `clientbuffering` is true.
+* `durability`: Whether or not writes should be appended to the WAL. Bypassing the WAL can improve throughput but data cannot be recovered in the event of a crash. The default is true.
Additional HBase settings should be provided in the `hbase-site.xml` file located in your `/HBASE-HOME-DIR/conf` directory. Typically this will be `/etc/hbase/conf`.
+
diff --git a/hbase10/pom.xml b/hbase22/pom.xml
similarity index 72%
rename from hbase10/pom.xml
rename to hbase22/pom.xml
index 753dd67914..ea2e731474 100644
--- a/hbase10/pom.xml
+++ b/hbase22/pom.xml
@@ -25,32 +25,25 @@ LICENSE file.
../binding-parent/
- hbase10-binding
- HBase 1.0 DB Binding
+ hbase22-binding
+ HBase 2.2 DB Binding
+
true
- true
- true
-
- org.apache.hbase
- hbase-client
- ${hbase10.version}
-
-
- jdk.tools
- jdk.tools
-
-
-
site.ycsb
core
${project.version}
provided
+
+ org.apache.hbase
+ hbase-shaded-client
+ ${hbase22.version}
+
junit
junit
@@ -59,15 +52,15 @@ LICENSE file.
org.apache.hbase
- hbase-testing-util
- ${hbase10.version}
+ hbase-shaded-testing-util
+ ${hbase22.version}
test
-
-
- jdk.tools
- jdk.tools
-
-
+
+
+
+ org.slf4j
+ slf4j-log4j12
+ 1.7.25
diff --git a/hbase10/src/main/java/site/ycsb/db/HBaseClient10.java b/hbase22/src/main/java/site/ycsb/db/hbase22/HBaseClient22.java
similarity index 98%
rename from hbase10/src/main/java/site/ycsb/db/HBaseClient10.java
rename to hbase22/src/main/java/site/ycsb/db/hbase22/HBaseClient22.java
index 9a93aa61dd..976a44aa41 100644
--- a/hbase10/src/main/java/site/ycsb/db/HBaseClient10.java
+++ b/hbase22/src/main/java/site/ycsb/db/hbase22/HBaseClient22.java
@@ -13,7 +13,7 @@
* LICENSE file.
*/
-package site.ycsb.db;
+package site.ycsb.db.hbase22;
import site.ycsb.ByteArrayByteIterator;
import site.ycsb.ByteIterator;
@@ -55,15 +55,11 @@
import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT;
/**
- * HBase 1.0 client for YCSB framework.
+ * HBase 2 client for YCSB framework.
*
- * A modified version of HBaseClient (which targets HBase v0.9) utilizing the
- * HBase 1.0.0 API.
- *
- * This client also adds toggleable client-side buffering and configurable write
- * durability.
+ * Intended for use with HBase's shaded client.
*/
-public class HBaseClient10 extends site.ycsb.DB {
+public class HBaseClient22 extends site.ycsb.DB {
private static final AtomicInteger THREAD_COUNT = new AtomicInteger(0);
private Configuration config = HBaseConfiguration.create();
diff --git a/hbase10/src/main/java/site/ycsb/db/package-info.java b/hbase22/src/main/java/site/ycsb/db/hbase22/package-info.java
similarity index 92%
rename from hbase10/src/main/java/site/ycsb/db/package-info.java
rename to hbase22/src/main/java/site/ycsb/db/hbase22/package-info.java
index a6a3768fbd..e4d1dca571 100644
--- a/hbase10/src/main/java/site/ycsb/db/package-info.java
+++ b/hbase22/src/main/java/site/ycsb/db/hbase22/package-info.java
@@ -17,7 +17,7 @@
/**
* The YCSB binding for HBase
- * using the HBase 1.0.0 API.
+ * using the HBase 2 shaded API.
*/
-package site.ycsb.db;
+package site.ycsb.db.hbase22;
diff --git a/hbase20/src/test/java/site/ycsb/db/hbase20/HBaseClient20Test.java b/hbase22/src/test/java/site/ycsb/db/hbase22/HBaseClient22Test.java
similarity index 96%
rename from hbase20/src/test/java/site/ycsb/db/hbase20/HBaseClient20Test.java
rename to hbase22/src/test/java/site/ycsb/db/hbase22/HBaseClient22Test.java
index c1cdd569c2..d1ca69b3e7 100644
--- a/hbase20/src/test/java/site/ycsb/db/hbase20/HBaseClient20Test.java
+++ b/hbase22/src/test/java/site/ycsb/db/hbase22/HBaseClient22Test.java
@@ -13,7 +13,7 @@
* LICENSE file.
*/
-package site.ycsb.db.hbase20;
+package site.ycsb.db.hbase22;
import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY;
import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT;
@@ -52,14 +52,14 @@
import java.util.Vector;
/**
- * Integration tests for the YCSB HBase client 2.0, using an HBase minicluster.
+ * Integration tests for the YCSB HBase 2 client using an HBase minicluster.
*/
-public class HBaseClient20Test {
+public class HBaseClient22Test {
private final static String COLUMN_FAMILY = "cf";
private static HBaseTestingUtility testingUtil;
- private HBaseClient20 client;
+ private HBaseClient22 client;
private Table table = null;
private String tableName;
@@ -99,7 +99,7 @@ public static void tearDownClass() throws Exception {
*/
@Before
public void setUp() throws Exception {
- client = new HBaseClient20();
+ client = new HBaseClient22();
client.setConfiguration(new Configuration(testingUtil.getConfiguration()));
Properties p = new Properties();
@@ -119,7 +119,7 @@ public void setUp() throws Exception {
@After
public void tearDown() throws Exception {
table.close();
- testingUtil.deleteTable(tableName);
+ testingUtil.deleteTable(TableName.valueOf(tableName));
}
@Test
diff --git a/hbase10/src/test/resources/hbase-site.xml b/hbase22/src/test/resources/hbase-site.xml
similarity index 100%
rename from hbase10/src/test/resources/hbase-site.xml
rename to hbase22/src/test/resources/hbase-site.xml
diff --git a/hbase10/src/test/resources/log4j.properties b/hbase22/src/test/resources/log4j.properties
similarity index 100%
rename from hbase10/src/test/resources/log4j.properties
rename to hbase22/src/test/resources/log4j.properties
diff --git a/pom.xml b/pom.xml
index 6d1ba1af99..c86448e5e0 100644
--- a/pom.xml
+++ b/pom.xml
@@ -127,11 +127,8 @@ LICENSE file.
1.2.0
1.4.0
4.0.0
- 0.98.14-hadoop2
- 1.0.2
- 1.2.5
- 1.4.2
- 2.0.0
+ 1.4.12
+ 2.2.3
0.9.5.6
2.7.6
7.2.2.Final
@@ -181,11 +178,8 @@ LICENSE file.
googlebigtable
googledatastore
griddb
- hbase098
- hbase10
- hbase12
hbase14
- hbase20
+ hbase22
hypertable
ignite
infinispan