Skip to content

Commit

Permalink
Removing examples in Hadoop and RDD class
Browse files Browse the repository at this point in the history
  • Loading branch information
pwendell committed Jul 29, 2014
1 parent 64d405f commit 8407308
Show file tree
Hide file tree
Showing 4 changed files with 17 additions and 13 deletions.
3 changes: 0 additions & 3 deletions core/src/main/scala/org/apache/spark/CacheManager.scala
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,6 @@ private[spark] class CacheManager(blockManager: BlockManager) extends Logging {
case Some(blockResult) =>
// Partition is already materialized, so just return its values
context.taskMetrics.inputMetrics = Some(blockResult.inputMetrics)
if (blockResult.inputMetrics.bytesRead > 0) {
rdd.inputBytes += blockResult.inputMetrics.bytesRead
}
new InterruptibleIterator(context, blockResult.data.asInstanceOf[Iterator[T]])

case None =>
Expand Down
4 changes: 0 additions & 4 deletions core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -119,9 +119,6 @@ class HadoopRDD[K, V](
minPartitions)
}

private val accName = s"rdd-$id.input.bytes.hadoop"
val hadoopInputBytes = sc.accumulator(0L, accName)(SparkContext.LongAccumulatorParam)

protected val jobConfCacheKey = "rdd_%d_job_conf".format(id)

protected val inputFormatCacheKey = "rdd_%d_input_format".format(id)
Expand Down Expand Up @@ -208,7 +205,6 @@ class HadoopRDD[K, V](
* always at record boundaries, so tasks may need to read into other splits to complete
* a record. */
inputMetrics.bytesRead = split.inputSplit.value.getLength()
hadoopInputBytes += split.inputSplit.value.getLength()
} catch {
case e: java.io.IOException =>
logWarning("Unable to get input size to set InputMetrics for task", e)
Expand Down
6 changes: 0 additions & 6 deletions core/src/main/scala/org/apache/spark/rdd/RDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -1270,10 +1270,4 @@ abstract class RDD[T: ClassTag](
def toJavaRDD() : JavaRDD[T] = {
new JavaRDD(this)(elementClassTag)
}

// =======================================================================
// Common metrics
// =======================================================================
// Input bytes if this RDD was read from persisted data or a filesystem
val inputBytes = sc.accumulator(0L, s"rdd-$id.input.bytes.persisted")
}
Original file line number Diff line number Diff line change
@@ -1,3 +1,20 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.scheduler

import org.apache.spark.annotation.DeveloperApi
Expand Down

0 comments on commit 8407308

Please sign in to comment.