Skip to content

Commit

Permalink
emergent all tests passing and updated to be actual tests for several…
Browse files Browse the repository at this point in the history
… cases.
  • Loading branch information
rcoreilly committed May 2, 2024
1 parent c5641f5 commit 56c7e23
Show file tree
Hide file tree
Showing 7 changed files with 594 additions and 259 deletions.
63 changes: 62 additions & 1 deletion esg/esg_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,10 @@ package esg

import (
"fmt"
"math/rand"
"testing"

"github.com/stretchr/testify/assert"
)

func TestParse(t *testing.T) {
Expand Down Expand Up @@ -36,10 +39,68 @@ func TestGen(t *testing.T) {
t.Error("validation errors occured as logged above")
}
// rls.Trace = true
rand.Seed(10)
genstr := ""
for i := 0; i < 50; i++ {
str := rls.Gen()
fmt.Println(str)
genstr += fmt.Sprintf("%v\n", str)
// fmt.Println(str)
}

ex := `[schoolgirl consumed food in park]
[someone ate crackers with finger]
[busdriver ate soup in kitchen]
[busdriver consumed steak in kitchen]
[busdriver ate steak with something]
[pitcherpers ate something with pleasure]
[someone consumed soup with gusto]
[pitcherpers ate steak with gusto]
[child ate food in kitchen]
[child ate food in park]
[pitcherpers ate crackers with finger]
[pitcherpers ate soup with crackers]
[pitcherpers ate icecream in park]
[pitcherpers consumed food with jelly]
[adult ate something with teacher]
[busdriver ate steak in kitchen]
[busdriver consumed steak in kitchen]
[busdriver consumed food in kitchen]
[adult ate food with gusto]
[busdriver ate steak in kitchen]
[busdriver ate steak with teacher]
[adult ate icecream in park]
[busdriver ate steak in kitchen]
[busdriver ate steak with teacher]
[adult consumed steak with utensil]
[busdriver ate soup in kitchen]
[busdriver ate icecream with spoon]
[busdriver ate steak with gusto]
[adult ate something in kitchen]
[someone ate food with teacher]
[busdriver ate icecream in park]
[adult ate crackers in kitchen]
[busdriver consumed food in kitchen]
[adult ate food with gusto]
[adult consumed soup with crackers]
[teacher ate something in kitchen]
[teacher ate soup with crackers]
[teacher consumed crackers with finger]
[someone ate food with utensil]
[teacher ate food in kitchen]
[teacher ate food with daintiness]
[someone ate food with crackers]
[someone ate something with something]
[teacher ate soup in kitchen]
[teacher ate crackers with daintiness]
[teacher ate soup in kitchen]
[teacher ate crackers with daintiness]
[pitcherpers ate food in kitchen]
[pitcherpers ate icecream in park]
[pitcherpers ate soup with gusto]
`

assert.Equal(t, ex, genstr)

}

// func TestGenIto(t *testing.T) {
Expand Down
173 changes: 4 additions & 169 deletions params/tweak_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ import (
"testing"

"cogentcore.org/core/base/reflectx"
"github.com/andreyvit/diff"
"github.com/stretchr/testify/assert"
)

var tweakSets = Sets{
Expand Down Expand Up @@ -73,171 +73,8 @@ func TestTweak(t *testing.T) {
}
}

var trgSearch = `[
{
"Param": "Layer.Inhib.Layer.Gi",
"Sel": {
"Sel": "#Hidden",
"Desc": "output definitely needs lower inhib -- true for smaller layers in general",
"Params": {
"Layer.Inhib.Layer.Gi": "1.4"
},
"Hypers": {
"Layer.Inhib.Layer.Gi": {
"Tweak": "incr"
}
}
},
"Search": [
{
"Name": "Hidden",
"Type": "Layer",
"Path": "Layer.Inhib.Layer.Gi",
"Start": 1.4,
"Values": [
1.3,
1.5
]
}
]
},
{
"Param": "Prjn.WtScale.Rel",
"Sel": {
"Sel": ".Back",
"Desc": "top-down back-projections MUST have lower relative weight scale, otherwise network hallucinates",
"Params": {
"Prjn.WtScale.Rel": "0.2"
},
"Hypers": {
"Prjn.WtScale.Rel": {
"Tweak": "log"
}
}
},
"Search": [
{
"Name": "HiddenToInput",
"Type": "Prjn",
"Path": "Prjn.WtScale.Rel",
"Start": 0.2,
"Values": [
0.1,
0.5
]
}
]
},
{
"Param": "Layer.Inhib.Layer.Gi",
"Sel": {
"Sel": "Layer",
"Desc": "using default 1.8 inhib for all of network -- can explore",
"Params": {
"Layer.Inhib.Layer.Gi": "1.8"
},
"Hypers": {
"Layer.Inhib.Layer.Gi": {
"Tweak": "[1.75, 1.85]"
}
}
},
"Search": [
{
"Name": "Input",
"Type": "Layer",
"Path": "Layer.Inhib.Layer.Gi",
"Start": 1.8,
"Values": [
1.75,
1.85
]
}
]
},
{
"Param": "Prjn.Learn.LRate",
"Sel": {
"Sel": "Prjn",
"Desc": "norm and momentum on works better, but wt bal is not better for smaller nets",
"Params": {
"Prjn.Learn.LRate": "0.02",
"Prjn.Learn.Momentum": "0.9"
},
"Hypers": {
"Prjn.Learn.LRate": {
"Tweak": "log"
},
"Prjn.Learn.Momentum": {
"Tweak": "incr"
}
}
},
"Search": [
{
"Name": "HiddenToInput",
"Type": "Prjn",
"Path": "Prjn.Learn.LRate",
"Start": 0.02,
"Values": [
0.01,
0.05
]
},
{
"Name": "InputToHidden",
"Type": "Prjn",
"Path": "Prjn.Learn.LRate",
"Start": 0.02,
"Values": [
0.01,
0.05
]
}
]
},
{
"Param": "Prjn.Learn.Momentum",
"Sel": {
"Sel": "Prjn",
"Desc": "norm and momentum on works better, but wt bal is not better for smaller nets",
"Params": {
"Prjn.Learn.LRate": "0.02",
"Prjn.Learn.Momentum": "0.9"
},
"Hypers": {
"Prjn.Learn.LRate": {
"Tweak": "log"
},
"Prjn.Learn.Momentum": {
"Tweak": "incr"
}
}
},
"Search": [
{
"Name": "HiddenToInput",
"Type": "Prjn",
"Path": "Prjn.Learn.Momentum",
"Start": 0.9,
"Values": [
0.8,
1
]
},
{
"Name": "InputToHidden",
"Type": "Prjn",
"Path": "Prjn.Learn.Momentum",
"Start": 0.9,
"Values": [
0.8,
1
]
}
]
}
]`
var trgSearch = `[{"Param":"Layer.Inhib.Layer.Gi","Sel":{"Sel":"#Hidden","Desc":"output definitely needs lower inhib -- true for smaller layers in general","Params":{"Layer.Inhib.Layer.Gi":"1.4"},"Hypers":{"Layer.Inhib.Layer.Gi":{"Tweak":"incr"}}},"Search":[{"Name":"Hidden","Type":"Layer","Path":"Layer.Inhib.Layer.Gi","Start":1.4,"Values":[1.3,1.5]}]},{"Param":"Prjn.WtScale.Rel","Sel":{"Sel":".Back","Desc":"top-down back-projections MUST have lower relative weight scale, otherwise network hallucinates","Params":{"Prjn.WtScale.Rel":"0.2"},"Hypers":{"Prjn.WtScale.Rel":{"Tweak":"log"}}},"Search":[{"Name":"HiddenToInput","Type":"Prjn","Path":"Prjn.WtScale.Rel","Start":0.2,"Values":[0.1,0.5]}]},{"Param":"Layer.Inhib.Layer.Gi","Sel":{"Sel":"Layer","Desc":"using default 1.8 inhib for all of network -- can explore","Params":{"Layer.Inhib.Layer.Gi":"1.8"},"Hypers":{"Layer.Inhib.Layer.Gi":{"Tweak":"[1.75, 1.85]"}}},"Search":[{"Name":"Input","Type":"Layer","Path":"Layer.Inhib.Layer.Gi","Start":1.8,"Values":[1.75,1.85]}]},{"Param":"Prjn.Learn.LRate","Sel":{"Sel":"Prjn","Desc":"norm and momentum on works better, but wt bal is not better for smaller nets","Params":{"Prjn.Learn.LRate":"0.02","Prjn.Learn.Momentum":"0.9"},"Hypers":{"Prjn.Learn.LRate":{"Tweak":"log"},"Prjn.Learn.Momentum":{"Tweak":"incr"}}},"Search":[{"Name":"HiddenToInput","Type":"Prjn","Path":"Prjn.Learn.LRate","Start":0.02,"Values":[0.01,0.05]},{"Name":"InputToHidden","Type":"Prjn","Path":"Prjn.Learn.LRate","Start":0.02,"Values":[0.01,0.05]}]},{"Param":"Prjn.Learn.Momentum","Sel":{"Sel":"Prjn","Desc":"norm and momentum on works better, but wt bal is not better for smaller nets","Params":{"Prjn.Learn.LRate":"0.02","Prjn.Learn.Momentum":"0.9"},"Hypers":{"Prjn.Learn.LRate":{"Tweak":"log"},"Prjn.Learn.Momentum":{"Tweak":"incr"}}},"Search":[{"Name":"HiddenToInput","Type":"Prjn","Path":"Prjn.Learn.Momentum","Start":0.9,"Values":[0.8,1]},{"Name":"InputToHidden","Type":"Prjn","Path":"Prjn.Learn.Momentum","Start":0.9,"Values":[0.8,1]}]}]
`

func TestTweakHypers(t *testing.T) {
hypers := Flex{}
Expand All @@ -255,7 +92,5 @@ func TestTweakHypers(t *testing.T) {
srch := TweaksFromHypers(hypers)
ss := reflectx.StringJSON(srch)
// fmt.Println("\n\n##########\n", ss)
if ss != trgSearch {
t.Errorf("Test Tweak Search output incorrect at: %v!\n", diff.LineDiff(ss, trgSearch))
}
assert.Equal(t, trgSearch, ss)
}
2 changes: 1 addition & 1 deletion popcode/popcode2d.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ func (pc *TwoD) EncodeImpl(pat tensor.Tensor, val math32.Vector2, add bool) erro
switch pc.Code {
case GaussBump:
dist := trg.Sub(val).Mul(gnrm)
act = math32.Exp(-dist.LengthSq())
act = math32.Exp(-dist.LengthSquared())

Check failure on line 138 in popcode/popcode2d.go

View workflow job for this annotation

GitHub Actions / build

dist.LengthSquared undefined (type "cogentcore.org/core/math32".Vector2 has no field or method LengthSquared)
case Localist:
dist := trg.Sub(val)
dist.X = math32.Abs(dist.X)
Expand Down
8 changes: 4 additions & 4 deletions popcode/popcode_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ func TestPopCode2D(t *testing.T) {
CmprFloats(valsY, corValues, "valsY for 11 units", t)

var pat tensor.Float32
pat.SetShape([]int{11, 11}, nil, nil)
pat.SetShape([]int{11, 11})
pc.Encode(&pat, math32.Vec2(0.3, 0.9), Set)
// fmt.Printf("pat for 0.5: %v\n", pat)

Expand All @@ -119,7 +119,7 @@ func TestPopCode2DMulti(t *testing.T) {

var pat tensor.Float32
// note: usually you'd use a larger pattern size for multiple values
pat.SetShape([]int{11, 11}, nil, nil)
pat.SetShape([]int{11, 11})
pc.Encode(&pat, math32.Vec2(0.1, 0.9), Set)
pc.Encode(&pat, math32.Vec2(0.9, 0.1), Add)

Expand All @@ -137,7 +137,7 @@ func TestPopCode2DMulti(t *testing.T) {
// fmt.Printf("decode pat for 0.1, 0.9; 0.9, 0.1: %v\n", vals)
for _, valv := range vals {
for d := 0; d < 2; d++ {
val := valv.DimSize(math32.Dims(d))
val := valv.Dim(math32.Dims(d))
if val > 0.5 {
if math32.Abs(val-0.9) > difTolMulti {
t.Errorf("did not decode properly: val: %v != 0.9", val)
Expand Down Expand Up @@ -203,7 +203,7 @@ func TestTwoDWrap(t *testing.T) {
pc.Clip = false

var pat tensor.Float32
pat.SetShape([]int{21, 21}, nil, nil)
pat.SetShape([]int{21, 21})
tangs := []float32{-179, -90, 0, 90, 179}
for _, ang := range tangs {
v := math32.Vec2(ang, .5)
Expand Down
2 changes: 1 addition & 1 deletion prjn/circle.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ func (cr *Circle) Connect(send, recv *tensor.Shape, same bool) (sendn, recvn *te
sp.X = edge.WrapMinDist(sp.X, float32(sNx), sctr.X)
sp.Y = edge.WrapMinDist(sp.Y, float32(sNy), sctr.Y)
}
d := int(math32.Round(sp.DistTo(sctr)))
d := int(math32.Round(sp.DistanceTo(sctr)))
if d <= cr.Radius {
ri := tensor.Prjn2DIndex(recv, false, ry, rx)
si := tensor.Prjn2DIndex(send, false, sy, sx)
Expand Down
Loading

0 comments on commit 56c7e23

Please sign in to comment.