Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added MapIdParallel to the View #28

Merged
merged 2 commits into from
Oct 14, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 21 additions & 5 deletions internal/gen/main.go
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
package main

import (
_ "embed"
"os"
"strings"
"text/template"
_ "embed"
"text/template"
)


//go:embed view.tgo
var viewTemplate string

Expand Down Expand Up @@ -35,6 +34,9 @@ func main() {
}
funcs := template.FuncMap{
"join": strings.Join,
"lower": func(val string) string {
return strings.ToLower(val)
},
"nils": func(n int) string {
val := make([]string, 0)
for i := 0; i < n; i++ {
Expand Down Expand Up @@ -63,15 +65,29 @@ func main() {
}
return strings.Join(ret, ", ")
},
"parallelLambdaStructArgs": func(val []string) string {
ret := make([]string, len(val))
for i := range val {
ret[i] = strings.ToLower(val[i]) + " []" + val[i]
}
return strings.Join(ret, "; ")
},
"parallelLambdaArgsFromStruct": func(val []string) string {
ret := make([]string, len(val))
for i := range val {
ret[i] = "param" + val[i]
}
return strings.Join(ret, ", ")
},
}

t := template.Must(template.New("ViewTemplate").Funcs(funcs).Parse(viewTemplate))
t := template.Must(template.New("ViewTemplate").Funcs(funcs).Parse(viewTemplate))

viewFile, err := os.OpenFile("view_gen.go", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
panic(err)
}
defer viewFile.Close()

t.Execute(viewFile, data)
t.Execute(viewFile, data)
}
91 changes: 91 additions & 0 deletions internal/gen/view.tgo
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
package ecs

import (
"sync"
"runtime"
)

// Warning: This is an autogenerated file. Do not modify!!

{{range $i, $element := .Views}}
Expand Down Expand Up @@ -176,6 +181,92 @@ func (v *View{{len $element}}[{{join $element ","}}]) MapId(lambda func(id Id, {
// }
}

// Maps the lambda function across every entity which matched the specified filters. Splits components into chunks of size up to `chunkSize` and then maps them in parallel. Smaller chunks results in highter overhead for small lambdas, but execution time is more predictable. If the chunk size is too hight, there is posibillity that not all the resources will utilized.
func (v *View{{len $element}}[{{join $element ","}}]) MapIdParallel(chunkSize int, lambda func(id Id, {{lambdaArgs $element}})) {
v.filter.regenerate(v.world)

{{range $ii, $arg := $element}}
var slice{{$arg}} *componentSlice[{{$arg}}]
var comp{{$arg}} []{{$arg}}
{{end}}


workDone := &sync.WaitGroup{}
type workPackage struct{start int; end int; ids []Id; {{parallelLambdaStructArgs $element}}}
newWorkChanel := make(chan workPackage)
mapWorker := func() {
defer workDone.Done()

for {
newWork, ok := <-newWorkChanel
if !ok {
return
}

// TODO: most probably this part ruins vectorization and SIMD. Maybe create new (faster) function where this will not occure?
{{range $ii, $arg := $element}}
var param{{$arg}} *{{$arg}}{{end}}

for i := newWork.start; i < newWork.end; i++ {
{{range $ii, $arg := $element}}
if newWork.{{lower $arg}} != nil { param{{$arg}} = &newWork.{{lower $arg}}[i]}{{end}}

lambda(newWork.ids[i], {{parallelLambdaArgsFromStruct $element}})
}
}
}
parallelLevel := runtime.NumCPU()*2
for i := 0; i < parallelLevel; i++ {
go mapWorker()
}


for _, archId := range v.filter.archIds {
{{range $ii, $arg := $element}}
slice{{$arg}}, _ = v.storage{{$arg}}.slice[archId]{{end}}

lookup := v.world.engine.lookup[archId]
if lookup == nil { panic("LookupList is missing!") }
// lookup, ok := v.world.engine.lookup[archId]
// if !ok { panic("LookupList is missing!") }
ids := lookup.id

{{range $ii, $arg := $element}}
comp{{$arg}} = nil
if slice{{$arg}} != nil {
comp{{$arg}} = slice{{$arg}}.comp
}{{end}}

startWorkRangeIndex := -1
for idx := range ids {
//TODO: chunks may be very small because of holes. Some clever heuristic is required. Most probably this is a problem of storage segmentation, but not this map algorithm.
if ids[idx] == InvalidEntity {
if startWorkRangeIndex != -1 {
newWorkChanel <- workPackage{start: startWorkRangeIndex, end: idx, ids: ids, {{range $ii, $arg := $element}} {{lower $arg}}: comp{{$arg}},{{end}}}
startWorkRangeIndex = -1
}
continue
} // Skip if its a hole

if startWorkRangeIndex == -1 {
startWorkRangeIndex = idx
}

if idx - startWorkRangeIndex >= chunkSize {
newWorkChanel <- workPackage{start: startWorkRangeIndex, end: idx+1, ids: ids, {{range $ii, $arg := $element}} {{lower $arg}}: comp{{$arg}},{{end}}}
startWorkRangeIndex = -1
}
}

if startWorkRangeIndex != -1 {
newWorkChanel <- workPackage{start: startWorkRangeIndex, end: len(ids), {{range $ii, $arg := $element}} {{lower $arg}}: comp{{$arg}},{{end}}}
}
}

close(newWorkChanel)
workDone.Wait()
}

// Deprecated: This API is a tentative alternative way to map
func (v *View{{len $element}}[{{join $element ","}}]) MapSlices(lambda func(id []Id, {{sliceLambdaArgs $element}})) {
v.filter.regenerate(v.world)
Expand Down
Loading