-
Thanks for the great project! Please answer the following before submitting your issue:
I have a question to validate, if my assumption and tests are correct and are fine to use. To summarize I am maintainer of https://github.com/zalando/skipper proxy and we have a lua() filter that uses this project to implement it. A filter is an instance that is part of a route. It's not shared between routes. A user just needs to define code like this to execute lua code in request and response path: function request(ctx, params)
print(c.request.url);
end
function response(ctx, params)
print(c.response.status_code);
end Right now we use for every filter instance a separate lua statepool, but if you think about having 40000 routes and maybe 4000 routes with Right now I am testing it more in depth and tried to share the func newState() (*lua.LState, error) {
L := lua.NewState()
L.PreloadModule("base64", base64.Loader)
L.PreloadModule("http", gluahttp.NewHttpModule(&http.Client{}).Loader)
L.PreloadModule("url", gluaurl.Loader)
L.PreloadModule("json", gjson.Loader)
L.SetGlobal("print", L.NewFunction(printToLog))
L.SetGlobal("sleep", L.NewFunction(sleep))
return L, nil
} When the filter is called, we get a LState from the pool and pass it and the compiled lua code from the filter to execute: func createScript(L *lua.LState, proto *lua.FunctionProto) (*lua.LState, error) {
L.Push(L.NewFunctionFromProto(proto))
err := L.PCall(0, lua.MultRet, nil)
if err != nil {
L.Close()
return nil, err
}
return L, nil
} As far as I see it's not a documented behavior that we can reuse Is it a safe assumption that overwriting a Function is fine? Thanks, sandor |
Beta Was this translation helpful? Give feedback.
Replies: 8 comments 2 replies
-
i just made something glu, see the http server impl part maybe help.
func compile(code string, path string) (*FunctionProto, error) {
name := fmt.Sprintf("handler(%s)", path)
chunk, err := parse.Parse(strings.NewReader(code), name)
if err != nil {
return nil, err
}
return Compile(chunk, name)
}
func executeHandler(chunk *FunctionProto, c *Ctx) {
x := glu.Get()
defer glu.Put(x)
fn := x.NewFunctionFromProto(chunk)
x.Push(fn)
_ = CtxType.New(x, c)
err := x.PCall(1, 0, nil)
if err != nil {
c.SetStatus(500)
c.SendString(err.Error())
fmt.Printf("error handle %+v : %s", c.URL, err)
return
}
} |
Beta Was this translation helpful? Give feedback.
-
the callFrame poped after call, so it may gc in next time. |
Beta Was this translation helpful? Give feedback.
-
@ZenLiuCN I don't really understand how this is related. Maybe you can explain a bit more? |
Beta Was this translation helpful? Give feedback.
-
If I'm not miss understand, your question is about pooling, right?
func createScript(L *lua.LState, proto *lua.FunctionProto) (*lua.LState, error) {
L.Push(L.NewFunctionFromProto(proto))
err := L.PCall(0, lua.MultRet, nil)
if err != nil {
L.Close() -- <-- why do this ? even some error happened , this LState may remain useable.
return nil, err
}
return L, nil
}
//region Pool
type statePool struct {
m sync.Mutex // lock to make threadsafe
saved []*LState // if want to limit max size, may do some size check on put
}
func create() *statePool {
return &statePool{saved: make([]*LState, 0, PoolSize)}
}
func (pl *statePool) get() *LState {
pl.m.Lock()
defer pl.m.Unlock()
n := len(pl.saved)
if n == 0 {
return pl.new()
}
x := pl.saved[n-1]
pl.saved = pl.saved[0 : n-1]
return x
}
func (pl *statePool) new() *LState {
L := NewState(Option)
configurer(L)
return L
}
func (pl *statePool) put(L *LState) {
if L.IsClosed() { //closed should not be used again
return
}
L.Pop(L.GetTop()) // this should clean stack
pl.m.Lock()
defer pl.m.Unlock()
pl.saved = append(pl.saved, L)
}
func (pl *statePool) Shutdown() {
for _, L := range pl.saved {
L.Close()
}
}
//endregion |
Beta Was this translation helpful? Give feedback.
-
much more simply.
import (
lua "github.com/yuin/gopher-lua"
"testing"
)
var(
chunk1 *lua.FunctionProto
chunk2 *lua.FunctionProto
)
func init() {
var err error
chunk1,err=CompileChunk(`local a=1+1`,`bench`)
if err!=nil{
panic(err)
}
chunk2,err=CompileChunk(`local a=1+1; assert(a~=2)`,`bench`)
if err!=nil{
panic(err)
}
}
func BenchmarkPoolWithoutClose(b *testing.B) {
for i := 0; i < b.N; i++ {
x:=Get()
if i%2==0{
x.Push(x.NewFunctionFromProto(chunk1))
}else{
x.Push(x.NewFunctionFromProto(chunk2))
}
err := x.PCall(0, 0, nil)
if err != nil {
Put(x)
continue
}
Put(x)
}
}
func BenchmarkPoolWithClose(b *testing.B) {
for i := 0; i < b.N; i++ {
x:=Get()
if i%2==0{
x.Push(x.NewFunctionFromProto(chunk1))
}else{
x.Push(x.NewFunctionFromProto(chunk2))
}
err := x.PCall(0, 0, nil)
if err != nil {
x.Close()
continue
}
Put(x)
}
} result
|
Beta Was this translation helpful? Give feedback.
-
another note:
|
Beta Was this translation helpful? Give feedback.
-
here is a wrap solution for control Global pollution. //StoredState take Env snapshot to protect from Global pollution
type StoredState struct {
*LState
env *LTable
snap []LValue
}
//Polluted check if the Env is polluted
func (s *StoredState) Polluted() (r bool) {
s.LState.Env.ForEach(func(k LValue, v LValue) {
for _, value := range s.snap {
if k == value {
return
}
}
r = true
return
})
return
}
//snapshot take snapshot for Env
func (s *StoredState) snapshot() *StoredState {
s.env = s.NewTable()
s.LState.Env.ForEach(func(k LValue, v LValue) {
s.env.RawSet(k, v)
s.snap = append(s.snap, k)
})
return s
}
//restore reset Env
func (s *StoredState) restore() (r *StoredState) {
//safeguard
defer func() {
rc := recover()
if rc != nil {
r = nil
}
}()
s.LState.Pop(s.LState.GetTop())
if s.Polluted() {
s.LState.Env = s.NewTable()
s.env.ForEach(func(k LValue, v LValue) {
s.LState.Env.RawSet(k, v)
})
}
return s
} |
Beta Was this translation helpful? Give feedback.
-
@ZenLiuCN thanks so much. I will dig more into it when I have more time again and I will 100% consider to drop the Close(). LState is in a buffered channel, because the implementer in the past created it like this. It's also fine to have a channel as sync point, you do not need to lock/unlock. A matter of taste IMO. |
Beta Was this translation helpful? Give feedback.
If I'm not miss understand, your question is about pooling, right?
As far as I see it's not a documented behavior that we can reuse LState
in repo's readme there is an sample of pool.LState
is not somethingThreadSafe
.LState
isa
lua vm instance.L.Push(L.NewFunctionFromProto(proto))
will createLFunction
and push to stackL.PCall
will execute on the stack, and then push result to stack if there are any.as
function request(ctx, params) print(c.request.url); end
there will no such action. check state.go:1970 you may see the implment.LState
to do such actions. and not need to do any resource release actions as I see.