diff --git a/docs/samples/Microsoft.ML.Samples/Dynamic/Trainers/BinaryClassification/AveragedPerceptron.cs b/docs/samples/Microsoft.ML.Samples/Dynamic/Trainers/BinaryClassification/AveragedPerceptron.cs
index fd1ff9457a..b0dd1613fb 100644
--- a/docs/samples/Microsoft.ML.Samples/Dynamic/Trainers/BinaryClassification/AveragedPerceptron.cs
+++ b/docs/samples/Microsoft.ML.Samples/Dynamic/Trainers/BinaryClassification/AveragedPerceptron.cs
@@ -19,7 +19,7 @@ public static void Example()
var trainTestData = mlContext.Data.TrainTestSplit(data, testFraction: 0.1);
// Create data training pipeline.
- var pipeline = mlContext.BinaryClassification.Trainers.AveragedPerceptron(numIterations: 10);
+ var pipeline = mlContext.BinaryClassification.Trainers.AveragedPerceptron(numberOfIterations: 10);
// Fit this pipeline to the training data.
var model = pipeline.Fit(trainTestData.TrainSet);
diff --git a/docs/samples/Microsoft.ML.Samples/Dynamic/Trainers/BinaryClassification/AveragedPerceptronWithOptions.cs b/docs/samples/Microsoft.ML.Samples/Dynamic/Trainers/BinaryClassification/AveragedPerceptronWithOptions.cs
index 45b4e0b591..b34926f658 100644
--- a/docs/samples/Microsoft.ML.Samples/Dynamic/Trainers/BinaryClassification/AveragedPerceptronWithOptions.cs
+++ b/docs/samples/Microsoft.ML.Samples/Dynamic/Trainers/BinaryClassification/AveragedPerceptronWithOptions.cs
@@ -25,7 +25,7 @@ public static void Example()
{
LossFunction = new SmoothedHingeLoss(),
LearningRate = 0.1f,
- DoLazyUpdates = false,
+ LazyUpdate = false,
RecencyGain = 0.1f,
NumberOfIterations = 10
};
diff --git a/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedLinear.cs b/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedLinear.cs
index 5be37297d7..516f6c7cc0 100644
--- a/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedLinear.cs
+++ b/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedLinear.cs
@@ -57,16 +57,16 @@ public abstract class AveragedLinearOptions : OnlineLinearOptions
/// to update averaged weights on every example.
/// Default is .
///
- [Argument(ArgumentType.AtMostOnce, HelpText = "Instead of updating averaged weights on every example, only update when loss is nonzero", ShortName = "lazy")]
- public bool DoLazyUpdates = true;
+ [Argument(ArgumentType.AtMostOnce, HelpText = "Instead of updating averaged weights on every example, only update when loss is nonzero", ShortName = "lazy,DoLazyUpdates")]
+ public bool LazyUpdate = true;
///
/// The L2 weight for regularization.
///
- [Argument(ArgumentType.AtMostOnce, HelpText = "L2 Regularization Weight", ShortName = "reg", SortOrder = 50)]
+ [Argument(ArgumentType.AtMostOnce, HelpText = "L2 Regularization Weight", ShortName = "reg,L2RegularizerWeight", SortOrder = 50)]
[TGUI(Label = "L2 Regularization Weight")]
[TlcModule.SweepableFloatParam("L2RegularizerWeight", 0.0f, 0.4f)]
- public float L2RegularizerWeight = AveragedDefault.L2RegularizerWeight;
+ public float L2Regularization = AveragedDefault.L2Regularization;
///
/// Extra weight given to more recent updates.
@@ -85,8 +85,8 @@ public abstract class AveragedLinearOptions : OnlineLinearOptions
/// means is additive.
/// Default is .
///
- [Argument(ArgumentType.AtMostOnce, HelpText = "Whether Recency Gain is multiplicative (vs. additive)", ShortName = "rgm")]
- public bool RecencyGainMulti = false;
+ [Argument(ArgumentType.AtMostOnce, HelpText = "Whether Recency Gain is multiplicative (vs. additive)", ShortName = "rgm,RecencyGainMulti")]
+ public bool RecencyGainMultiplicative = false;
///
/// Determines whether to do averaging or not.
@@ -109,7 +109,7 @@ internal class AveragedDefault : OnlineLinearOptions.OnlineDefault
{
public const float LearningRate = 1;
public const bool DecreaseLearningRate = false;
- public const float L2RegularizerWeight = 0;
+ public const float L2Regularization = 0;
}
internal abstract IComponentFactory LossFunctionFactory { get; }
@@ -186,7 +186,7 @@ public override void FinishIteration(IChannel ch)
// Finalize things
if (Averaged)
{
- if (_args.DoLazyUpdates && NumNoUpdates > 0)
+ if (_args.LazyUpdate && NumNoUpdates > 0)
{
// Update the total weights to include the final loss=0 updates
VectorUtils.AddMult(in Weights, NumNoUpdates * WeightsScale, ref TotalWeights);
@@ -221,10 +221,10 @@ public override void ProcessDataInstance(IChannel ch, in VBuffer feat, fl
// REVIEW: Should this be biasUpdate != 0?
// This loss does not incorporate L2 if present, but the chance of that addition to the loss
// exactly cancelling out loss is remote.
- if (loss != 0 || _args.L2RegularizerWeight > 0)
+ if (loss != 0 || _args.L2Regularization > 0)
{
// If doing lazy weights, we need to update the totalWeights and totalBias before updating weights/bias
- if (_args.DoLazyUpdates && _args.Averaged && NumNoUpdates > 0 && TotalMultipliers * _args.AveragedTolerance <= PendingMultipliers)
+ if (_args.LazyUpdate && _args.Averaged && NumNoUpdates > 0 && TotalMultipliers * _args.AveragedTolerance <= PendingMultipliers)
{
VectorUtils.AddMult(in Weights, NumNoUpdates * WeightsScale, ref TotalWeights);
TotalBias += Bias * NumNoUpdates * WeightsScale;
@@ -242,7 +242,7 @@ public override void ProcessDataInstance(IChannel ch, in VBuffer feat, fl
// Perform the update to weights and bias.
VectorUtils.AddMult(in feat, biasUpdate / WeightsScale, ref Weights);
- WeightsScale *= 1 - 2 * _args.L2RegularizerWeight; // L2 regularization.
+ WeightsScale *= 1 - 2 * _args.L2Regularization; // L2 regularization.
ScaleWeightsIfNeeded();
Bias += biasUpdate;
PendingMultipliers += Math.Abs(biasUpdate);
@@ -251,7 +251,7 @@ public override void ProcessDataInstance(IChannel ch, in VBuffer feat, fl
// Add to averaged weights and increment the count.
if (Averaged)
{
- if (!_args.DoLazyUpdates)
+ if (!_args.LazyUpdate)
IncrementAverageNonLazy();
else
NumNoUpdates++;
@@ -282,7 +282,7 @@ private void IncrementAverageNonLazy()
VectorUtils.AddMult(in Weights, Gain * WeightsScale, ref TotalWeights);
TotalBias += Gain * Bias;
NumWeightUpdates += Gain;
- Gain = (_args.RecencyGainMulti ? Gain * _args.RecencyGain : Gain + _args.RecencyGain);
+ Gain = (_args.RecencyGainMultiplicative ? Gain * _args.RecencyGain : Gain + _args.RecencyGain);
// If gains got too big, rescale!
if (Gain > 1000)
@@ -303,11 +303,11 @@ private protected AveragedLinearTrainer(AveragedLinearOptions options, IHostEnvi
Contracts.CheckUserArg(!options.ResetWeightsAfterXExamples.HasValue || options.ResetWeightsAfterXExamples > 0, nameof(options.ResetWeightsAfterXExamples), UserErrorPositive);
// Weights are scaled down by 2 * L2 regularization on each update step, so 0.5 would scale all weights to 0, which is not sensible.
- Contracts.CheckUserArg(0 <= options.L2RegularizerWeight && options.L2RegularizerWeight < 0.5, nameof(options.L2RegularizerWeight), "must be in range [0, 0.5)");
+ Contracts.CheckUserArg(0 <= options.L2Regularization && options.L2Regularization < 0.5, nameof(options.L2Regularization), "must be in range [0, 0.5)");
Contracts.CheckUserArg(options.RecencyGain >= 0, nameof(options.RecencyGain), UserErrorNonNegative);
Contracts.CheckUserArg(options.AveragedTolerance >= 0, nameof(options.AveragedTolerance), UserErrorNonNegative);
// Verify user didn't specify parameters that conflict
- Contracts.Check(!options.DoLazyUpdates || !options.RecencyGainMulti && options.RecencyGain == 0, "Cannot have both recency gain and lazy updates.");
+ Contracts.Check(!options.LazyUpdate || !options.RecencyGainMultiplicative && options.RecencyGain == 0, "Cannot have both recency gain and lazy updates.");
AveragedLinearTrainerOptions = options;
}
diff --git a/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedPerceptron.cs b/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedPerceptron.cs
index f4cb2096d8..142f8c1c1d 100644
--- a/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedPerceptron.cs
+++ b/src/Microsoft.ML.StandardTrainers/Standard/Online/AveragedPerceptron.cs
@@ -131,24 +131,24 @@ internal AveragedPerceptronTrainer(IHostEnvironment env, Options options)
/// The name of the feature column.
/// The learning rate.
/// Whether to decrease learning rate as iterations progress.
- /// L2 Regularization Weight.
- /// The number of training iterations.
+ /// Weight of L2 regularization term.
+ /// The number of training iterations.
internal AveragedPerceptronTrainer(IHostEnvironment env,
string labelColumnName = DefaultColumnNames.Label,
string featureColumnName = DefaultColumnNames.Features,
IClassificationLoss lossFunction = null,
float learningRate = Options.AveragedDefault.LearningRate,
bool decreaseLearningRate = Options.AveragedDefault.DecreaseLearningRate,
- float l2RegularizerWeight = Options.AveragedDefault.L2RegularizerWeight,
- int numIterations = Options.AveragedDefault.NumIterations)
+ float l2Regularization = Options.AveragedDefault.L2Regularization,
+ int numberOfIterations = Options.AveragedDefault.NumberOfIterations)
: this(env, new Options
{
LabelColumnName = labelColumnName,
FeatureColumnName = featureColumnName,
LearningRate = learningRate,
DecreaseLearningRate = decreaseLearningRate,
- L2RegularizerWeight = l2RegularizerWeight,
- NumberOfIterations = numIterations,
+ L2Regularization = l2Regularization,
+ NumberOfIterations = numberOfIterations,
LossFunction = lossFunction ?? new HingeLoss()
})
{
diff --git a/src/Microsoft.ML.StandardTrainers/Standard/Online/LinearSvm.cs b/src/Microsoft.ML.StandardTrainers/Standard/Online/LinearSvm.cs
index 60f379465e..fb6b5f4814 100644
--- a/src/Microsoft.ML.StandardTrainers/Standard/Online/LinearSvm.cs
+++ b/src/Microsoft.ML.StandardTrainers/Standard/Online/LinearSvm.cs
@@ -69,8 +69,8 @@ public sealed class Options : OnlineLinearOptions
///
/// Column to use for example weight.
///
- [Argument(ArgumentType.AtMostOnce, HelpText = "Column to use for example weight", ShortName = "weight", SortOrder = 4, Visibility = ArgumentAttribute.VisibilityType.EntryPointsOnly)]
- public string WeightColumn = null;
+ [Argument(ArgumentType.AtMostOnce, HelpText = "Column to use for example weight", ShortName = "weight,WeightColumn", SortOrder = 4, Visibility = ArgumentAttribute.VisibilityType.EntryPointsOnly)]
+ public string ExampleWeightColumnName = null;
}
private sealed class TrainState : TrainStateBase
@@ -232,20 +232,20 @@ public override LinearBinaryModelParameters CreatePredictor()
/// The environment to use.
/// The name of the label column.
/// The name of the feature column.
- /// The optional name of the weight column.
- /// The number of training iteraitons.
+ /// The name of the example weight column (optional).
+ /// The number of training iteraitons.
[BestFriend]
internal LinearSvmTrainer(IHostEnvironment env,
string labelColumn = DefaultColumnNames.Label,
string featureColumn = DefaultColumnNames.Features,
- string weightColumn = null,
- int numIterations = Options.OnlineDefault.NumIterations)
+ string exampleWeightColumnName = null,
+ int numberOfIterations = Options.OnlineDefault.NumberOfIterations)
: this(env, new Options
{
LabelColumnName = labelColumn,
FeatureColumnName = featureColumn,
- WeightColumn = weightColumn,
- NumberOfIterations = numIterations,
+ ExampleWeightColumnName = exampleWeightColumnName,
+ NumberOfIterations = numberOfIterations,
})
{
}
diff --git a/src/Microsoft.ML.StandardTrainers/Standard/Online/OnlineGradientDescent.cs b/src/Microsoft.ML.StandardTrainers/Standard/Online/OnlineGradientDescent.cs
index 7843b6ad6f..aa3ee1a340 100644
--- a/src/Microsoft.ML.StandardTrainers/Standard/Online/OnlineGradientDescent.cs
+++ b/src/Microsoft.ML.StandardTrainers/Standard/Online/OnlineGradientDescent.cs
@@ -97,23 +97,23 @@ public override LinearRegressionModelParameters CreatePredictor()
/// Name of the feature column.
/// The learning Rate.
/// Decrease learning rate as iterations progress.
- /// L2 Regularization Weight.
- /// Number of training iterations through the data.
+ /// Weight of L2 regularization term.
+ /// Number of training iterations through the data.
/// The custom loss functions. Defaults to if not provided.
internal OnlineGradientDescentTrainer(IHostEnvironment env,
string labelColumn = DefaultColumnNames.Label,
string featureColumn = DefaultColumnNames.Features,
float learningRate = Options.OgdDefaultArgs.LearningRate,
bool decreaseLearningRate = Options.OgdDefaultArgs.DecreaseLearningRate,
- float l2RegularizerWeight = Options.OgdDefaultArgs.L2RegularizerWeight,
- int numIterations = Options.OgdDefaultArgs.NumIterations,
+ float l2Regularization = Options.OgdDefaultArgs.L2Regularization,
+ int numberOfIterations = Options.OgdDefaultArgs.NumberOfIterations,
IRegressionLoss lossFunction = null)
: this(env, new Options
{
LearningRate = learningRate,
DecreaseLearningRate = decreaseLearningRate,
- L2RegularizerWeight = l2RegularizerWeight,
- NumberOfIterations = numIterations,
+ L2Regularization= l2Regularization,
+ NumberOfIterations = numberOfIterations,
LabelColumnName = labelColumn,
FeatureColumnName = featureColumn,
LossFunction = lossFunction ?? new SquaredLoss()
diff --git a/src/Microsoft.ML.StandardTrainers/Standard/Online/OnlineLinear.cs b/src/Microsoft.ML.StandardTrainers/Standard/Online/OnlineLinear.cs
index 13b6c32bf0..697f0f3906 100644
--- a/src/Microsoft.ML.StandardTrainers/Standard/Online/OnlineLinear.cs
+++ b/src/Microsoft.ML.StandardTrainers/Standard/Online/OnlineLinear.cs
@@ -27,7 +27,7 @@ public abstract class OnlineLinearOptions : TrainerInputBaseWithLabel
[Argument(ArgumentType.AtMostOnce, HelpText = "Number of iterations", ShortName = "iter,numIterations", SortOrder = 50)]
[TGUI(Label = "Number of Iterations", Description = "Number of training iterations through data", SuggestedSweeps = "1,10,100")]
[TlcModule.SweepableLongParamAttribute("NumIterations", 1, 100, stepSize: 10, isLogScale: true)]
- public int NumberOfIterations = OnlineDefault.NumIterations;
+ public int NumberOfIterations = OnlineDefault.NumberOfIterations;
///
/// Initial weights and bias, comma-separated.
@@ -62,7 +62,7 @@ public abstract class OnlineLinearOptions : TrainerInputBaseWithLabel
[BestFriend]
internal class OnlineDefault
{
- public const int NumIterations = 1;
+ public const int NumberOfIterations = 1;
}
}
diff --git a/src/Microsoft.ML.StandardTrainers/StandardTrainersCatalog.cs b/src/Microsoft.ML.StandardTrainers/StandardTrainersCatalog.cs
index e0d8900a31..4be5a0ada9 100644
--- a/src/Microsoft.ML.StandardTrainers/StandardTrainersCatalog.cs
+++ b/src/Microsoft.ML.StandardTrainers/StandardTrainersCatalog.cs
@@ -340,8 +340,8 @@ public static SdcaMultiClassTrainer StochasticDualCoordinateAscent(this Multicla
/// to decrease the as iterations progress; otherwise, .
/// Default is .
///
- /// The L2 weight for regularization.
- /// Number of passes through the training dataset.
+ /// The L2 weight for regularization.
+ /// Number of passes through the training dataset.
///
///
///
@@ -411,21 +411,21 @@ public IClassificationLoss CreateComponent(IHostEnvironment env)
/// The custom loss. Defaults to if not provided.
/// The learning Rate.
/// Decrease learning rate as iterations progress.
- /// L2 regularization weight.
- /// Number of training iterations through the data.
+ /// The L2 weight for regularization.
+ /// Number of training iterations through the data.
public static OnlineGradientDescentTrainer OnlineGradientDescent(this RegressionCatalog.RegressionTrainers catalog,
string labelColumnName = DefaultColumnNames.Label,
string featureColumnName = DefaultColumnNames.Features,
IRegressionLoss lossFunction = null,
float learningRate = OnlineGradientDescentTrainer.Options.OgdDefaultArgs.LearningRate,
bool decreaseLearningRate = OnlineGradientDescentTrainer.Options.OgdDefaultArgs.DecreaseLearningRate,
- float l2RegularizerWeight = AveragedLinearOptions.AveragedDefault.L2RegularizerWeight,
- int numIterations = OnlineLinearOptions.OnlineDefault.NumIterations)
+ float l2Regularization = AveragedLinearOptions.AveragedDefault.L2Regularization,
+ int numberOfIterations = OnlineLinearOptions.OnlineDefault.NumberOfIterations)
{
Contracts.CheckValue(catalog, nameof(catalog));
var env = CatalogUtils.GetEnvironment(catalog);
- return new OnlineGradientDescentTrainer(env, labelColumnName, featureColumnName, learningRate, decreaseLearningRate, l2RegularizerWeight,
- numIterations, lossFunction);
+ return new OnlineGradientDescentTrainer(env, labelColumnName, featureColumnName, learningRate, decreaseLearningRate, l2Regularization,
+ numberOfIterations, lossFunction);
}
///
@@ -694,15 +694,15 @@ public static PairwiseCouplingTrainer PairwiseCoupling(this MulticlassCl
/// The name of the label column.
/// The name of the feature column.
/// The name of the example weight column (optional).
- /// The number of training iteraitons.
+ /// The number of training iteraitons.
public static LinearSvmTrainer LinearSupportVectorMachines(this BinaryClassificationCatalog.BinaryClassificationTrainers catalog,
string labelColumnName = DefaultColumnNames.Label,
string featureColumnName = DefaultColumnNames.Features,
string exampleWeightColumnName = null,
- int numIterations = OnlineLinearOptions.OnlineDefault.NumIterations)
+ int numberOfIterations = OnlineLinearOptions.OnlineDefault.NumberOfIterations)
{
Contracts.CheckValue(catalog, nameof(catalog));
- return new LinearSvmTrainer(CatalogUtils.GetEnvironment(catalog), labelColumnName, featureColumnName, exampleWeightColumnName, numIterations);
+ return new LinearSvmTrainer(CatalogUtils.GetEnvironment(catalog), labelColumnName, featureColumnName, exampleWeightColumnName, numberOfIterations);
}
///
diff --git a/src/Microsoft.ML.StaticPipe/OnlineLearnerStatic.cs b/src/Microsoft.ML.StaticPipe/OnlineLearnerStatic.cs
index 72a65aebc5..5e3f478e67 100644
--- a/src/Microsoft.ML.StaticPipe/OnlineLearnerStatic.cs
+++ b/src/Microsoft.ML.StaticPipe/OnlineLearnerStatic.cs
@@ -23,7 +23,7 @@ public static class AveragedPerceptronStaticExtensions
/// The optional example weights.
/// The learning Rate.
/// Decrease learning rate as iterations progress.
- /// L2 regularization weight.
+ /// L2 regularization weight.
/// Number of training iterations through the data.
/// A delegate that is called every time the
/// method is called on the
@@ -47,12 +47,12 @@ public static (Scalar score, Scalar predictedLabel) AveragedPercept
IClassificationLoss lossFunction = null,
float learningRate = AveragedLinearOptions.AveragedDefault.LearningRate,
bool decreaseLearningRate = AveragedLinearOptions.AveragedDefault.DecreaseLearningRate,
- float l2RegularizerWeight = AveragedLinearOptions.AveragedDefault.L2RegularizerWeight,
- int numIterations = AveragedLinearOptions.AveragedDefault.NumIterations,
+ float l2Regularization = AveragedLinearOptions.AveragedDefault.L2Regularization,
+ int numIterations = AveragedLinearOptions.AveragedDefault.NumberOfIterations,
Action onFit = null
)
{
- OnlineLinearStaticUtils.CheckUserParams(label, features, weights, learningRate, l2RegularizerWeight, numIterations, onFit);
+ OnlineLinearStaticUtils.CheckUserParams(label, features, weights, learningRate, l2Regularization, numIterations, onFit);
bool hasProbs = lossFunction is LogLoss;
@@ -61,7 +61,7 @@ public static (Scalar score, Scalar predictedLabel) AveragedPercept
{
var trainer = new AveragedPerceptronTrainer(env, labelName, featuresName, lossFunction,
- learningRate, decreaseLearningRate, l2RegularizerWeight, numIterations);
+ learningRate, decreaseLearningRate, l2Regularization, numIterations);
if (onFit != null)
return trainer.WithOnFitDelegate(trans => onFit(trans.Model));
@@ -148,7 +148,7 @@ public static class OnlineGradientDescentExtensions
/// The custom loss. Defaults to if not provided.
/// The learning Rate.
/// Decrease learning rate as iterations progress.
- /// L2 regularization weight.
+ /// L2 regularization weight.
/// Number of training iterations through the data.
/// A delegate that is called every time the
/// method is called on the
@@ -166,18 +166,18 @@ public static Scalar OnlineGradientDescent(this RegressionCatalog.Regress
IRegressionLoss lossFunction = null,
float learningRate = OnlineGradientDescentTrainer.Options.OgdDefaultArgs.LearningRate,
bool decreaseLearningRate = OnlineGradientDescentTrainer.Options.OgdDefaultArgs.DecreaseLearningRate,
- float l2RegularizerWeight = OnlineGradientDescentTrainer.Options.OgdDefaultArgs.L2RegularizerWeight,
- int numIterations = OnlineLinearOptions.OnlineDefault.NumIterations,
+ float l2Regularization = OnlineGradientDescentTrainer.Options.OgdDefaultArgs.L2Regularization,
+ int numIterations = OnlineLinearOptions.OnlineDefault.NumberOfIterations,
Action onFit = null)
{
- OnlineLinearStaticUtils.CheckUserParams(label, features, weights, learningRate, l2RegularizerWeight, numIterations, onFit);
+ OnlineLinearStaticUtils.CheckUserParams(label, features, weights, learningRate, l2Regularization, numIterations, onFit);
Contracts.CheckValueOrNull(lossFunction);
var rec = new TrainerEstimatorReconciler.Regression(
(env, labelName, featuresName, weightsName) =>
{
var trainer = new OnlineGradientDescentTrainer(env, labelName, featuresName, learningRate,
- decreaseLearningRate, l2RegularizerWeight, numIterations, lossFunction);
+ decreaseLearningRate, l2Regularization, numIterations, lossFunction);
if (onFit != null)
return trainer.WithOnFitDelegate(trans => onFit(trans.Model));
diff --git a/test/BaselineOutput/Common/EntryPoints/core_manifest.json b/test/BaselineOutput/Common/EntryPoints/core_manifest.json
index dcb299c02c..bb728063a0 100644
--- a/test/BaselineOutput/Common/EntryPoints/core_manifest.json
+++ b/test/BaselineOutput/Common/EntryPoints/core_manifest.json
@@ -4286,11 +4286,12 @@
}
},
{
- "Name": "L2RegularizerWeight",
+ "Name": "L2Regularization",
"Type": "Float",
"Desc": "L2 Regularization Weight",
"Aliases": [
- "reg"
+ "reg",
+ "L2RegularizerWeight"
],
"Required": false,
"SortOrder": 50.0,
@@ -4377,11 +4378,12 @@
"Default": null
},
{
- "Name": "DoLazyUpdates",
+ "Name": "LazyUpdate",
"Type": "Bool",
"Desc": "Instead of updating averaged weights on every example, only update when loss is nonzero",
"Aliases": [
- "lazy"
+ "lazy",
+ "DoLazyUpdates"
],
"Required": false,
"SortOrder": 150.0,
@@ -4401,11 +4403,12 @@
"Default": 0.0
},
{
- "Name": "RecencyGainMulti",
+ "Name": "RecencyGainMultiplicative",
"Type": "Bool",
"Desc": "Whether Recency Gain is multiplicative (vs. additive)",
"Aliases": [
- "rgm"
+ "rgm",
+ "RecencyGainMulti"
],
"Required": false,
"SortOrder": 150.0,
@@ -13113,11 +13116,12 @@
"Default": "Label"
},
{
- "Name": "WeightColumn",
+ "Name": "ExampleWeightColumnName",
"Type": "String",
"Desc": "Column to use for example weight",
"Aliases": [
- "weight"
+ "weight",
+ "WeightColumn"
],
"Required": false,
"SortOrder": 4.0,
@@ -14214,11 +14218,12 @@
}
},
{
- "Name": "L2RegularizerWeight",
+ "Name": "L2Regularization",
"Type": "Float",
"Desc": "L2 Regularization Weight",
"Aliases": [
- "reg"
+ "reg",
+ "L2RegularizerWeight"
],
"Required": false,
"SortOrder": 50.0,
@@ -14282,11 +14287,12 @@
"Default": null
},
{
- "Name": "DoLazyUpdates",
+ "Name": "LazyUpdate",
"Type": "Bool",
"Desc": "Instead of updating averaged weights on every example, only update when loss is nonzero",
"Aliases": [
- "lazy"
+ "lazy",
+ "DoLazyUpdates"
],
"Required": false,
"SortOrder": 150.0,
@@ -14306,11 +14312,12 @@
"Default": 0.0
},
{
- "Name": "RecencyGainMulti",
+ "Name": "RecencyGainMultiplicative",
"Type": "Bool",
"Desc": "Whether Recency Gain is multiplicative (vs. additive)",
"Aliases": [
- "rgm"
+ "rgm",
+ "RecencyGainMulti"
],
"Required": false,
"SortOrder": 150.0,
diff --git a/test/Microsoft.ML.Benchmarks/RffTransform.cs b/test/Microsoft.ML.Benchmarks/RffTransform.cs
index 38ad710948..8d4a33f958 100644
--- a/test/Microsoft.ML.Benchmarks/RffTransform.cs
+++ b/test/Microsoft.ML.Benchmarks/RffTransform.cs
@@ -47,7 +47,7 @@ public void CV_Multiclass_Digits_RffTransform_OVAAveragedPerceptron()
.AppendCacheCheckpoint(mlContext)
.Append(mlContext.Transforms.Concatenate("Features", "FeaturesRFF"))
.Append(new ValueToKeyMappingEstimator(mlContext, "Label"))
- .Append(mlContext.MulticlassClassification.Trainers.OneVersusAll(mlContext.BinaryClassification.Trainers.AveragedPerceptron(numIterations: 10)));
+ .Append(mlContext.MulticlassClassification.Trainers.OneVersusAll(mlContext.BinaryClassification.Trainers.AveragedPerceptron(numberOfIterations: 10)));
var cvResults = mlContext.MulticlassClassification.CrossValidate(data, pipeline, numFolds: 5);
}