Skip to content

Commit

Permalink
Merge branch 'master' into SPARK-6263
Browse files Browse the repository at this point in the history
  • Loading branch information
Lewuathe committed May 11, 2015
2 parents 62a9c7e + d70a076 commit d254be7
Show file tree
Hide file tree
Showing 26 changed files with 295 additions and 53 deletions.
4 changes: 2 additions & 2 deletions R/pkg/R/RDD.R
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,8 @@ setMethod("initialize", "RDD", function(.Object, jrdd, serializedMode,
})

setMethod("show", "RDD",
function(.Object) {
cat(paste(callJMethod(.Object@jrdd, "toString"), "\n", sep=""))
function(object) {
cat(paste(callJMethod(getJRDD(object), "toString"), "\n", sep=""))
})

setMethod("initialize", "PipelinedRDD", function(.Object, prev, func, jrdd_val) {
Expand Down
19 changes: 18 additions & 1 deletion docs/mllib-dimensionality-reduction.md
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ statistical method to find a rotation such that the first coordinate has the lar
possible, and each succeeding coordinate in turn has the largest variance possible. The columns of
the rotation matrix are called principal components. PCA is used widely in dimensionality reduction.

MLlib supports PCA for tall-and-skinny matrices stored in row-oriented format.
MLlib supports PCA for tall-and-skinny matrices stored in row-oriented format and any Vectors.

<div class="codetabs">
<div data-lang="scala" markdown="1">
Expand All @@ -157,6 +157,23 @@ val pc: Matrix = mat.computePrincipalComponents(10) // Principal components are
// Project the rows to the linear space spanned by the top 10 principal components.
val projected: RowMatrix = mat.multiply(pc)
{% endhighlight %}

The following code demonstrates how to compute principal components on source vectors
and use them to project the vectors into a low-dimensional space while keeping associated labels:

{% highlight scala %}
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.feature.PCA

val data: RDD[LabeledPoint] = ...

// Compute the top 10 principal components.
val pca = new PCA(10).fit(data.map(_.features))

// Project vectors to the linear space spanned by the top 10 principal components, keeping the label
val projected = data.map(p => p.copy(features = pca.transform(p.features)))
{% endhighlight %}

</div>

<div data-lang="java" markdown="1">
Expand Down
55 changes: 54 additions & 1 deletion docs/mllib-feature-extraction.md
Original file line number Diff line number Diff line change
Expand Up @@ -507,7 +507,6 @@ v_N

This example below demonstrates how to load a simple vectors file, extract a set of vectors, then transform those vectors using a transforming vector value.


<div class="codetabs">
<div data-lang="scala">
{% highlight scala %}
Expand All @@ -531,3 +530,57 @@ val transformedData2 = parsedData.map(x => transformer.transform(x))
</div>


## PCA

A feature transformer that projects vectors to a low-dimensional space using PCA.
Details you can read at [dimensionality reduction](mllib-dimensionality-reduction.html).

### Example

The following code demonstrates how to compute principal components on a `Vector`
and use them to project the vectors into a low-dimensional space while keeping associated labels
for calculation a [Linear Regression]((mllib-linear-methods.html))

<div class="codetabs">
<div data-lang="scala">
{% highlight scala %}
import org.apache.spark.mllib.regression.LinearRegressionWithSGD
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.feature.PCA

val data = sc.textFile("data/mllib/ridge-data/lpsa.data").map { line =>
val parts = line.split(',')
LabeledPoint(parts(0).toDouble, Vectors.dense(parts(1).split(' ').map(_.toDouble)))
}.cache()

val splits = data.randomSplit(Array(0.6, 0.4), seed = 11L)
val training = splits(0).cache()
val test = splits(1)

val pca = new PCA(training.first().features.size/2).fit(data.map(_.features))
val training_pca = training.map(p => p.copy(features = pca.transform(p.features)))
val test_pca = test.map(p => p.copy(features = pca.transform(p.features)))

val numIterations = 100
val model = LinearRegressionWithSGD.train(training, numIterations)
val model_pca = LinearRegressionWithSGD.train(training_pca, numIterations)

val valuesAndPreds = test.map { point =>
val score = model.predict(point.features)
(score, point.label)
}

val valuesAndPreds_pca = test_pca.map { point =>
val score = model_pca.predict(point.features)
(score, point.label)
}

val MSE = valuesAndPreds.map{case(v, p) => math.pow((v - p), 2)}.mean()
val MSE_pca = valuesAndPreds_pca.map{case(v, p) => math.pow((v - p), 2)}.mean()

println("Mean Squared Error = " + MSE)
println("PCA Mean Squared Error = " + MSE_pca)
{% endhighlight %}
</div>
</div>
93 changes: 93 additions & 0 deletions mllib/src/main/scala/org/apache/spark/mllib/feature/PCA.scala
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.mllib.feature

import org.apache.spark.api.java.JavaRDD
import org.apache.spark.mllib.linalg._
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import org.apache.spark.rdd.RDD

/**
* A feature transformer that projects vectors to a low-dimensional space using PCA.
*
* @param k number of principal components
*/
class PCA(val k: Int) {
require(k >= 1, s"PCA requires a number of principal components k >= 1 but was given $k")

/**
* Computes a [[PCAModel]] that contains the principal components of the input vectors.
*
* @param sources source vectors
*/
def fit(sources: RDD[Vector]): PCAModel = {
require(k <= sources.first().size,
s"source vector size is ${sources.first().size} must be greater than k=$k")

val mat = new RowMatrix(sources)
val pc = mat.computePrincipalComponents(k) match {
case dm: DenseMatrix =>
dm
case sm: SparseMatrix =>
/* Convert a sparse matrix to dense.
*
* RowMatrix.computePrincipalComponents always returns a dense matrix.
* The following code is a safeguard.
*/
sm.toDense
case m =>
throw new IllegalArgumentException("Unsupported matrix format. Expected " +
s"SparseMatrix or DenseMatrix. Instead got: ${m.getClass}")

}
new PCAModel(k, pc)
}

/** Java-friendly version of [[fit()]] */
def fit(sources: JavaRDD[Vector]): PCAModel = fit(sources.rdd)
}

/**
* Model fitted by [[PCA]] that can project vectors to a low-dimensional space using PCA.
*
* @param k number of principal components.
* @param pc a principal components Matrix. Each column is one principal component.
*/
class PCAModel private[mllib] (val k: Int, val pc: DenseMatrix) extends VectorTransformer {
/**
* Transform a vector by computed Principal Components.
*
* @param vector vector to be transformed.
* Vector must be the same length as the source vectors given to [[PCA.fit()]].
* @return transformed vector. Vector will be of length k.
*/
override def transform(vector: Vector): Vector = {
vector match {
case dv: DenseVector =>
pc.transpose.multiply(dv)
case SparseVector(size, indices, values) =>
/* SparseVector -> single row SparseMatrix */
val sm = Matrices.sparse(size, 1, Array(0, indices.length), indices, values).transpose
val projection = sm.multiply(pc)
Vectors.dense(projection.values)
case _ =>
throw new IllegalArgumentException("Unsupported vector format. Expected " +
s"SparseVector or DenseVector. Instead got: ${vector.getClass}")
}
}
}
48 changes: 48 additions & 0 deletions mllib/src/test/scala/org/apache/spark/mllib/feature/PCASuite.scala
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.mllib.feature

import org.scalatest.FunSuite

import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import org.apache.spark.mllib.util.MLlibTestSparkContext

class PCASuite extends FunSuite with MLlibTestSparkContext {

private val data = Array(
Vectors.sparse(5, Seq((1, 1.0), (3, 7.0))),
Vectors.dense(2.0, 0.0, 3.0, 4.0, 5.0),
Vectors.dense(4.0, 0.0, 0.0, 6.0, 7.0)
)

private lazy val dataRDD = sc.parallelize(data, 2)

test("Correct computing use a PCA wrapper") {
val k = dataRDD.count().toInt
val pca = new PCA(k).fit(dataRDD)

val mat = new RowMatrix(dataRDD)
val pc = mat.computePrincipalComponents(k)

val pca_transform = pca.transform(dataRDD).collect()
val mat_multiply = mat.multiply(pc).rows.collect()

assert(pca_transform.toSet === mat_multiply.toSet)
}
}
6 changes: 3 additions & 3 deletions python/pyspark/ml/param/_shared_params_code_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,12 +88,12 @@ def get$Name(self):
print("\n# DO NOT MODIFY THIS FILE! It was generated by _shared_params_code_gen.py.\n")
print("from pyspark.ml.param import Param, Params\n\n")
shared = [
("maxIter", "max number of iterations", None),
("regParam", "regularization constant", None),
("maxIter", "max number of iterations (>= 0)", None),
("regParam", "regularization parameter (>= 0)", None),
("featuresCol", "features column name", "'features'"),
("labelCol", "label column name", "'label'"),
("predictionCol", "prediction column name", "'prediction'"),
("rawPredictionCol", "raw prediction column name", "'rawPrediction'"),
("rawPredictionCol", "raw prediction (a.k.a. confidence) column name", "'rawPrediction'"),
("inputCol", "input column name", None),
("inputCols", "input column names", None),
("outputCol", "output column name", None),
Expand Down
30 changes: 14 additions & 16 deletions python/pyspark/ml/param/shared.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,16 @@

class HasMaxIter(Params):
"""
Mixin for param maxIter: max number of iterations.
Mixin for param maxIter: max number of iterations (>= 0).
"""

# a placeholder to make it appear in the generated doc
maxIter = Param(Params._dummy(), "maxIter", "max number of iterations")
maxIter = Param(Params._dummy(), "maxIter", "max number of iterations (>= 0)")

def __init__(self):
super(HasMaxIter, self).__init__()
#: param for max number of iterations
self.maxIter = Param(self, "maxIter", "max number of iterations")
#: param for max number of iterations (>= 0)
self.maxIter = Param(self, "maxIter", "max number of iterations (>= 0)")
if None is not None:
self._setDefault(maxIter=None)

Expand All @@ -51,16 +51,16 @@ def getMaxIter(self):

class HasRegParam(Params):
"""
Mixin for param regParam: regularization constant.
Mixin for param regParam: regularization parameter (>= 0).
"""

# a placeholder to make it appear in the generated doc
regParam = Param(Params._dummy(), "regParam", "regularization constant")
regParam = Param(Params._dummy(), "regParam", "regularization parameter (>= 0)")

def __init__(self):
super(HasRegParam, self).__init__()
#: param for regularization constant
self.regParam = Param(self, "regParam", "regularization constant")
#: param for regularization parameter (>= 0)
self.regParam = Param(self, "regParam", "regularization parameter (>= 0)")
if None is not None:
self._setDefault(regParam=None)

Expand Down Expand Up @@ -167,16 +167,16 @@ def getPredictionCol(self):

class HasRawPredictionCol(Params):
"""
Mixin for param rawPredictionCol: raw prediction column name.
Mixin for param rawPredictionCol: raw prediction (a.k.a. confidence) column name.
"""

# a placeholder to make it appear in the generated doc
rawPredictionCol = Param(Params._dummy(), "rawPredictionCol", "raw prediction column name")
rawPredictionCol = Param(Params._dummy(), "rawPredictionCol", "raw prediction (a.k.a. confidence) column name")

def __init__(self):
super(HasRawPredictionCol, self).__init__()
#: param for raw prediction column name
self.rawPredictionCol = Param(self, "rawPredictionCol", "raw prediction column name")
#: param for raw prediction (a.k.a. confidence) column name
self.rawPredictionCol = Param(self, "rawPredictionCol", "raw prediction (a.k.a. confidence) column name")
if 'rawPrediction' is not None:
self._setDefault(rawPredictionCol='rawPrediction')

Expand Down Expand Up @@ -403,14 +403,12 @@ class HasStepSize(Params):
"""

# a placeholder to make it appear in the generated doc
stepSize = Param(Params._dummy(), "stepSize",
"Step size to be used for each iteration of optimization.")
stepSize = Param(Params._dummy(), "stepSize", "Step size to be used for each iteration of optimization.")

def __init__(self):
super(HasStepSize, self).__init__()
#: param for Step size to be used for each iteration of optimization.
self.stepSize = Param(self, "stepSize",
"Step size to be used for each iteration of optimization.")
self.stepSize = Param(self, "stepSize", "Step size to be used for each iteration of optimization.")
if None is not None:
self._setDefault(stepSize=None)

Expand Down
2 changes: 1 addition & 1 deletion python/pyspark/ml/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ def transform(self, dataset, params={}):
return dataset


class Evaluator(object):
class Evaluator(Params):
"""
Base class for evaluators that compute metrics from predictions.
"""
Expand Down
8 changes: 4 additions & 4 deletions python/pyspark/ml/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
from pyspark.sql import DataFrame
from pyspark.ml.param import Param
from pyspark.ml.param.shared import HasMaxIter, HasInputCol
from pyspark.ml.pipeline import Transformer, Estimator, Pipeline
from pyspark.ml.pipeline import Estimator, Model, Pipeline, Transformer


class MockDataset(DataFrame):
Expand Down Expand Up @@ -77,7 +77,7 @@ def fit(self, dataset, params={}):
return model


class MockModel(MockTransformer, Transformer):
class MockModel(MockTransformer, Model):

def __init__(self):
super(MockModel, self).__init__()
Expand Down Expand Up @@ -128,7 +128,7 @@ def test_param(self):
testParams = TestParams()
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0)")
self.assertTrue(maxIter.parent is testParams)

def test_params(self):
Expand Down Expand Up @@ -156,7 +156,7 @@ def test_params(self):
self.assertEquals(
testParams.explainParams(),
"\n".join(["inputCol: input column name (undefined)",
"maxIter: max number of iterations (default: 10, current: 100)"]))
"maxIter: max number of iterations (>= 0) (default: 10, current: 100)"]))


if __name__ == "__main__":
Expand Down
Loading

0 comments on commit d254be7

Please sign in to comment.