Skip to content

Commit

Permalink
good params for every example with updated, actually good params (exc…
Browse files Browse the repository at this point in the history
…ept boa)
  • Loading branch information
rcoreilly committed Apr 5, 2024
1 parent ba45c5b commit b2ddd6f
Show file tree
Hide file tree
Showing 54 changed files with 2,264 additions and 1,523 deletions.
3 changes: 2 additions & 1 deletion axon/pvlv_layers.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,8 @@ func (ly *Layer) BLADefaults() {
lp.CT.DecayTau = 0
lp.CT.GeGain = 0.1 // 0.1 has effect, can go a bit lower if need to

lp.Learn.NeuroMod.DAModGain = 0.5
// has been 0:
// lp.Learn.NeuroMod.DAModGain = 0.5
if isAcq {
lp.Learn.NeuroMod.DALRateMod = 0.5
lp.Learn.NeuroMod.BurstGain = 0.2
Expand Down
8 changes: 4 additions & 4 deletions examples/bench_lvis/bench_lvis.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,13 +104,13 @@ func ConfigNet(ctx *axon.Context, net *axon.Network, inputNeurs, inputPools, pat
v4[pi] = net.AddLayer4D("V4_"+pnm, v4Pools, v4Pools, hiddenNeurs, hiddenNeurs, axon.SuperLayer)
te[pi] = net.AddLayer2D("TE_"+pnm, teNeurs, teNeurs, axon.SuperLayer)

v1[pi].SetClass("V1m")
v2[pi].SetClass("V2m V2")
v4[pi].SetClass("V4")
v1[pi].AddClass("V1m")
v2[pi].AddClass("V2m V2")
v4[pi].AddClass("V4")

net.ConnectLayers(v1[pi], v2[pi], Prjn4x4Skp2, axon.ForwardPrjn)
net.BidirConnectLayers(v2[pi], v4[pi], Prjn4x4Skp2)
net.ConnectLayers(v1[pi], v4[pi], sparseRandom, axon.ForwardPrjn).SetClass("V1SC")
net.ConnectLayers(v1[pi], v4[pi], sparseRandom, axon.ForwardPrjn).AddClass("V1SC")
net.BidirConnectLayers(v4[pi], te[pi], full)
net.BidirConnectLayers(te[pi], outLay, full)
}
Expand Down
26 changes: 13 additions & 13 deletions examples/boa/params.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ var ParamSets = netparams.Sets{
}},
{Sel: ".PTMaintLayer", Desc: "time integration params",
Params: params.Params{
"Layer.Inhib.Layer.Gi": "2.4",
"Layer.Inhib.Pool.Gi": "2.4",
// "Layer.Inhib.Layer.Gi": "2.4",
// "Layer.Inhib.Pool.Gi": "2.4",
"Layer.Acts.Dend.ModGain": "1.5", // 2 min -- reduces maint early
"Layer.Learn.NeuroMod.AChDisInhib": "0.0", // not much effect here..
}},
Expand All @@ -46,29 +46,29 @@ var ParamSets = netparams.Sets{
{Sel: "#OFCposUSPT", Desc: "",
Params: params.Params{
"Layer.Inhib.ActAvg.Nominal": "0.2",
"Layer.Inhib.Pool.Gi": "3.0",
// "Layer.Inhib.Pool.Gi": "0.5",
}},
{Sel: "#OFCposUSPTp", Desc: "",
Params: params.Params{
"Layer.Inhib.Pool.Gi": "1.4",
"Layer.Inhib.Pool.Gi": "1.0",
}},
{Sel: "#OFCnegUS", Desc: "",
Params: params.Params{
"Layer.Inhib.ActAvg.Nominal": "0.1",
"Layer.Inhib.Layer.Gi": "1.2", // weaker in general so needs to be lower
// "Layer.Inhib.Layer.Gi": "0.5", // weaker in general so needs to be lower
}},
{Sel: "#OFCnegUSPT", Desc: "",
Params: params.Params{
"Layer.Inhib.ActAvg.Nominal": "0.2",
"Layer.Inhib.Pool.Gi": "3.0",
// "Layer.Inhib.Pool.Gi": "3.0",
}},
{Sel: "#OFCnegUSPTp", Desc: "",
Params: params.Params{
"Layer.Inhib.Pool.Gi": "1.4",
// "Layer.Inhib.Pool.Gi": "1.4",
}},
{Sel: "#ILpos", Desc: "",
Params: params.Params{
"Layer.Inhib.Pool.Gi": "1",
// "Layer.Inhib.Pool.Gi": "1",
}},
{Sel: ".VSMatrixLayer", Desc: "vs mtx",
Params: params.Params{
Expand Down Expand Up @@ -128,11 +128,11 @@ var ParamSets = netparams.Sets{
Params: params.Params{
"Prjn.PrjnScale.Abs": "1", // todo: try 3?
}},
{Sel: ".PTSelfMaint", Desc: "",
Params: params.Params{
"Prjn.PrjnScale.Abs": "4",
"Prjn.Learn.LRate.Base": "0.0001", // this is not a problem
}},
// {Sel: ".PTSelfMaint", Desc: "",
// Params: params.Params{
// "Prjn.PrjnScale.Abs": "4",
// "Prjn.Learn.LRate.Base": "0.0001", // this is not a problem
// }},
{Sel: ".ToPTp", Desc: "",
Params: params.Params{
"Prjn.PrjnScale.Abs": "4",
Expand Down
16 changes: 1 addition & 15 deletions examples/boa/params_good/params.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@
Desc = "time integration params"
[Base.Params]
"Layer.Acts.Dend.ModGain" = "1.5"
"Layer.Inhib.Layer.Gi" = "2.4"
"Layer.Inhib.Pool.Gi" = "2.4"
"Layer.Learn.NeuroMod.AChDisInhib" = "0.0"

[[Base]]
Expand Down Expand Up @@ -45,39 +43,34 @@
Desc = ""
[Base.Params]
"Layer.Inhib.ActAvg.Nominal" = "0.2"
"Layer.Inhib.Pool.Gi" = "3.0"

[[Base]]
Sel = "#OFCposUSPTp"
Desc = ""
[Base.Params]
"Layer.Inhib.Pool.Gi" = "1.4"
"Layer.Inhib.Pool.Gi" = "1.0"

[[Base]]
Sel = "#OFCnegUS"
Desc = ""
[Base.Params]
"Layer.Inhib.ActAvg.Nominal" = "0.1"
"Layer.Inhib.Layer.Gi" = "1.2"

[[Base]]
Sel = "#OFCnegUSPT"
Desc = ""
[Base.Params]
"Layer.Inhib.ActAvg.Nominal" = "0.2"
"Layer.Inhib.Pool.Gi" = "3.0"

[[Base]]
Sel = "#OFCnegUSPTp"
Desc = ""
[Base.Params]
"Layer.Inhib.Pool.Gi" = "1.4"

[[Base]]
Sel = "#ILpos"
Desc = ""
[Base.Params]
"Layer.Inhib.Pool.Gi" = "1"

[[Base]]
Sel = ".VSMatrixLayer"
Expand Down Expand Up @@ -157,13 +150,6 @@
[Base.Params]
"Prjn.PrjnScale.Abs" = "1"

[[Base]]
Sel = ".PTSelfMaint"
Desc = ""
[Base.Params]
"Prjn.Learn.LRate.Base" = "0.0001"
"Prjn.PrjnScale.Abs" = "4"

[[Base]]
Sel = ".ToPTp"
Desc = ""
Expand Down
Loading

0 comments on commit b2ddd6f

Please sign in to comment.