diff --git a/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingContext.scala b/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingContext.scala index 4edf8fa13a205..613683ca40501 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingContext.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingContext.scala @@ -141,8 +141,12 @@ class JavaStreamingContext(val ssc: StreamingContext) { */ def this(path: String, hadoopConf: Configuration) = this(new StreamingContext(path, hadoopConf)) + + @deprecated("use sparkContext", "0.9.0") + val sc: JavaSparkContext = sparkContext + /** The underlying SparkContext */ - val sc: JavaSparkContext = new JavaSparkContext(ssc.sc) + val sparkContext = new JavaSparkContext(ssc.sc) /** * Create a input stream from network source hostname:port. Data is received using diff --git a/streaming/src/main/scala/org/apache/spark/streaming/util/RateLimitedOutputStream.scala b/streaming/src/main/scala/org/apache/spark/streaming/util/RateLimitedOutputStream.scala index b9c0596378b4f..179fd7593982c 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/util/RateLimitedOutputStream.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/util/RateLimitedOutputStream.scala @@ -22,6 +22,7 @@ import scala.annotation.tailrec import java.io.OutputStream import java.util.concurrent.TimeUnit._ +private[streaming] class RateLimitedOutputStream(out: OutputStream, bytesPerSec: Int) extends OutputStream { val SYNC_INTERVAL = NANOSECONDS.convert(10, SECONDS) val CHUNK_SIZE = 8192 diff --git a/streaming/src/main/scala/org/apache/spark/streaming/util/RawTextHelper.scala b/streaming/src/main/scala/org/apache/spark/streaming/util/RawTextHelper.scala index 5b6c048a39620..07021ebb5802a 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/util/RawTextHelper.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/util/RawTextHelper.scala @@ -22,6 +22,7 @@ import org.apache.spark.SparkContext._ import it.unimi.dsi.fastutil.objects.{Object2LongOpenHashMap => OLMap} import scala.collection.JavaConversions.mapAsScalaMap +private[streaming] object RawTextHelper { /** diff --git a/streaming/src/main/scala/org/apache/spark/streaming/util/RawTextSender.scala b/streaming/src/main/scala/org/apache/spark/streaming/util/RawTextSender.scala index 463617a713b22..684b38e8b3102 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/util/RawTextSender.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/util/RawTextSender.scala @@ -33,6 +33,7 @@ import org.apache.spark.util.IntParam * A helper program that sends blocks of Kryo-serialized text strings out on a socket at a * specified rate. Used to feed data into RawInputDStream. */ +private[streaming] object RawTextSender extends Logging { def main(args: Array[String]) { if (args.length != 4) { diff --git a/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java b/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java index 8b7d7709bf2c5..4fbbce9b8b90e 100644 --- a/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/java/org/apache/spark/streaming/JavaAPISuite.java @@ -297,9 +297,9 @@ public void testQueueStream() { Arrays.asList(7,8,9)); JavaSparkContext jsc = new JavaSparkContext(ssc.ssc().sc()); - JavaRDD rdd1 = ssc.sc().parallelize(Arrays.asList(1, 2, 3)); - JavaRDD rdd2 = ssc.sc().parallelize(Arrays.asList(4, 5, 6)); - JavaRDD rdd3 = ssc.sc().parallelize(Arrays.asList(7,8,9)); + JavaRDD rdd1 = ssc.sparkContext().parallelize(Arrays.asList(1, 2, 3)); + JavaRDD rdd2 = ssc.sparkContext().parallelize(Arrays.asList(4, 5, 6)); + JavaRDD rdd3 = ssc.sparkContext().parallelize(Arrays.asList(7,8,9)); LinkedList> rdds = Lists.newLinkedList(); rdds.add(rdd1);