From 91c17f25fbe9cd56afaa1264bc3d6bb8261b9574 Mon Sep 17 00:00:00 2001 From: Justin Chadwell Date: Tue, 16 May 2023 12:20:22 +0100 Subject: [PATCH 1/7] build: tidy up print func Signed-off-by: Justin Chadwell --- build/build.go | 89 +++++++++++++++++++++++++------------------------- 1 file changed, 45 insertions(+), 44 deletions(-) diff --git a/build/build.go b/build/build.go index 4fd3cb11..dcb0194f 100644 --- a/build/build.go +++ b/build/build.go @@ -886,59 +886,60 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s cc := c var printRes map[string][]byte rr, err := c.Build(ctx, so, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { - var isFallback bool - var origErr error - for { - if opt.PrintFunc != nil { - if _, ok := req.FrontendOpt["frontend.caps"]; !ok { - req.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests+forward" - } else { - req.FrontendOpt["frontend.caps"] += ",moby.buildkit.frontend.subrequests+forward" - } - req.FrontendOpt["requestid"] = "frontend." + opt.PrintFunc.Name - if isFallback { - req.FrontendOpt["build-arg:BUILDKIT_SYNTAX"] = printFallbackImage - } + if opt.PrintFunc != nil { + if _, ok := req.FrontendOpt["frontend.caps"]; !ok { + req.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests+forward" + } else { + req.FrontendOpt["frontend.caps"] += ",moby.buildkit.frontend.subrequests+forward" } - res, err := c.Solve(ctx, req) - if err != nil { - if origErr != nil { + req.FrontendOpt["requestid"] = "frontend." + opt.PrintFunc.Name + } + + res, err := c.Solve(ctx, req) + if err != nil { + fallback := false + var reqErr *errdefs.UnsupportedSubrequestError + if errors.As(err, &reqErr) { + switch reqErr.Name { + case "frontend.outline", "frontend.targets": + fallback = true + default: return nil, err } - var reqErr *errdefs.UnsupportedSubrequestError - if !isFallback { - if errors.As(err, &reqErr) { - switch reqErr.Name { - case "frontend.outline", "frontend.targets": - isFallback = true - origErr = err - continue - } - return nil, err - } - // buildkit v0.8 vendored in Docker 20.10 does not support typed errors - if strings.Contains(err.Error(), "unsupported request frontend.outline") || strings.Contains(err.Error(), "unsupported request frontend.targets") { - isFallback = true - origErr = err - continue - } - } + } else { return nil, err } - if opt.PrintFunc != nil { - printRes = res.Metadata + // buildkit v0.8 vendored in Docker 20.10 does not support typed errors + if strings.Contains(err.Error(), "unsupported request frontend.outline") || strings.Contains(err.Error(), "unsupported request frontend.targets") { + fallback = true } - results.Set(resultKey(dp.driverIndex, k), res) - if resultHandleFunc != nil { - resultCtx, err := NewResultContext(ctx, cc, so, res) - if err == nil { - resultHandleFunc(dp.driverIndex, resultCtx) - } else { - logrus.Warnf("failed to record result: %s", err) + + if fallback { + fmt.Println("falling back!") + req.FrontendOpt["build-arg:BUILDKIT_SYNTAX"] = printFallbackImage + res2, err2 := c.Solve(ctx, req) + if err2 != nil { + return nil, err } + res = res2 + } else { + return nil, err + } + } + if opt.PrintFunc != nil { + printRes = res.Metadata + } + + results.Set(resultKey(dp.driverIndex, k), res) + if resultHandleFunc != nil { + resultCtx, err := NewResultContext(ctx, cc, so, res) + if err == nil { + resultHandleFunc(dp.driverIndex, resultCtx) + } else { + logrus.Warnf("failed to record result: %s", err) } - return res, nil } + return res, nil }, ch) if err != nil { return err From 2dae553d18a05f196b29c1b2fa5463173540c87c Mon Sep 17 00:00:00 2001 From: Justin Chadwell Date: Fri, 26 May 2023 10:37:50 +0100 Subject: [PATCH 2/7] build: update outline fallback image Subrequests have been included in docker/dockerfile:1.5 labs, so we can update the fallback to point to this release. Signed-off-by: Justin Chadwell --- build/build.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/build.go b/build/build.go index dcb0194f..8a9bba90 100644 --- a/build/build.go +++ b/build/build.go @@ -65,7 +65,7 @@ var ( ) const ( - printFallbackImage = "docker/dockerfile-upstream:1.4-outline@sha256:627443ff4e2d0f635d429cfc1da5388bcd5a70949c38adcd3cd7c4e5df67c73c" + printFallbackImage = "docker/dockerfile:1.5.2-labs@sha256:f2e91734a84c0922ff47aa4098ab775f1dfa932430d2888dd5cad5251fafdac4" ) type Options struct { From 8d822fb06c115ca0edf8e54e73d907ee924e5d3f Mon Sep 17 00:00:00 2001 From: Justin Chadwell Date: Tue, 16 May 2023 17:25:36 +0100 Subject: [PATCH 3/7] build: move main solve request into main gateway call Now, we always perform the full solve request in the main gateway call. This ensures that progress works properly, and makes the lifetime semantics much clearer. NewResultContext abstracts the details of a successful/failed build, to always return a single ResultContext, even though the details of how a gateway is created is different: - For a failed build, we can just keep the gateway open. - For a successful build, we immediately open another gateway and re-evaluate the build definition in that gateway. This should give an instant cache hit (since the build was just successful). Signed-off-by: Justin Chadwell --- build/build.go | 21 ++-- build/result.go | 323 +++++++++++++++++++++++++++++++----------------- 2 files changed, 218 insertions(+), 126 deletions(-) diff --git a/build/build.go b/build/build.go index 8a9bba90..3968d351 100644 --- a/build/build.go +++ b/build/build.go @@ -885,7 +885,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s cc := c var printRes map[string][]byte - rr, err := c.Build(ctx, so, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + buildFunc := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { if opt.PrintFunc != nil { if _, ok := req.FrontendOpt["frontend.caps"]; !ok { req.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests+forward" @@ -915,7 +915,6 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s } if fallback { - fmt.Println("falling back!") req.FrontendOpt["build-arg:BUILDKIT_SYNTAX"] = printFallbackImage res2, err2 := c.Solve(ctx, req) if err2 != nil { @@ -931,16 +930,16 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s } results.Set(resultKey(dp.driverIndex, k), res) - if resultHandleFunc != nil { - resultCtx, err := NewResultContext(ctx, cc, so, res) - if err == nil { - resultHandleFunc(dp.driverIndex, resultCtx) - } else { - logrus.Warnf("failed to record result: %s", err) - } - } return res, nil - }, ch) + } + var rr *client.SolveResponse + if resultHandleFunc != nil { + var resultCtx *ResultContext + resultCtx, rr, err = NewResultContext(ctx, cc, so, "buildx", buildFunc, ch) + resultHandleFunc(dp.driverIndex, resultCtx) + } else { + rr, err = c.Build(ctx, so, "buildx", buildFunc, ch) + } if err != nil { return err } diff --git a/build/result.go b/build/result.go index de8a6728..26813c44 100644 --- a/build/result.go +++ b/build/result.go @@ -6,8 +6,6 @@ import ( "encoding/json" "io" "sync" - "sync/atomic" - "time" controllerapi "github.com/docker/buildx/controller/pb" "github.com/moby/buildkit/client" @@ -22,140 +20,232 @@ import ( "golang.org/x/sync/errgroup" ) -func NewResultContext(ctx context.Context, c *client.Client, solveOpt client.SolveOpt, res *gateway.Result) (*ResultContext, error) { - def, err := getDefinition(ctx, res) - if err != nil { - return nil, err - } - return getResultAt(ctx, c, solveOpt, def, nil) -} - -func getDefinition(ctx context.Context, res *gateway.Result) (*result.Result[*pb.Definition], error) { - return result.ConvertResult(res, func(ref gateway.Reference) (*pb.Definition, error) { - st, err := ref.ToState() - if err != nil { - return nil, err - } - def, err := st.Marshal(ctx) - if err != nil { - return nil, err +// NewResultContext wraps a call to client.Build, additionally returning a +// ResultContext alongside the standard response and error. +// +// This ResultContext can be used to execute additional build steps in the same +// context as the build occurred, which can allow easy debugging of build +// failures and successes. +// +// If the returned ResultContext is not nil, the caller must call Done() on it. +func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOpt, product string, buildFunc gateway.BuildFunc, ch chan *client.SolveStatus) (*ResultContext, *client.SolveResponse, error) { + // Create a new context to wrap the original, and cancel it when the + // caller-provided context is cancelled. + // + // We derive the context from the background context so that we can forbid + // cancellation of the build request after <-done is closed (which we do + // before returning the ResultContext). + baseCtx := ctx + ctx, cancel := context.WithCancelCause(context.Background()) + done := make(chan struct{}) + go func() { + select { + case <-baseCtx.Done(): + cancel(baseCtx.Err()) + case <-done: + // Once done is closed, we've recorded a ResultContext, so we + // shouldn't allow cancelling the underlying build request anymore. } - return def.ToPB(), nil - }) -} - -func getResultAt(ctx context.Context, c *client.Client, solveOpt client.SolveOpt, targets *result.Result[*pb.Definition], statusChan chan *client.SolveStatus) (*ResultContext, error) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() + }() - // forward SolveStatus - done := new(atomic.Bool) - defer done.Store(true) - ch := make(chan *client.SolveStatus) + // Create a new channel to forward status messages to the original. + // + // We do this so that we can discard status messages after the main portion + // of the build is complete. This is necessary for the solve error case, + // where the original gateway is kept open until the ResultContext is + // closed - we don't want progress messages from operations in that + // ResultContext to display after this function exits. + // + // Additionally, callers should wait for the progress channel to be closed. + // If we keep the session open and never close the progress channel, the + // caller will likely hang. + baseCh := ch + ch = make(chan *client.SolveStatus) go func() { for { - s := <-ch - if s == nil { + s, ok := <-ch + if !ok { return } - if done.Load() { - // Do not forward if the function returned because statusChan is possibly closed - continue - } select { - case statusChan <- s: - case <-ctx.Done(): + case <-baseCh: + // base channel is closed, discard status messages + default: + baseCh <- s } } }() + defer close(baseCh) + + var resCtx *ResultContext + var resp *client.SolveResponse + var respErr error - // get result - resultCtxCh := make(chan *ResultContext) - errCh := make(chan error) go func() { - solveOpt := solveOpt - solveOpt.Ref = "" - buildDoneCh := make(chan struct{}) - _, err := c.Build(context.Background(), solveOpt, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { - doneErr := errors.Errorf("done") - ctx, cancel := context.WithCancelCause(ctx) - defer cancel(doneErr) - - // force evaluation of all targets in parallel - results := make(map[*pb.Definition]*gateway.Result) - resultsMu := sync.Mutex{} - eg, egCtx := errgroup.WithContext(ctx) - targets.EachRef(func(def *pb.Definition) error { - eg.Go(func() error { - res2, err := c.Solve(egCtx, gateway.SolveRequest{ - Evaluate: true, - Definition: def, - }) - if err != nil { - return err - } - resultsMu.Lock() - results[def] = res2 - resultsMu.Unlock() - return nil - }) - return nil - }) - resultCtx := ResultContext{} - if err := eg.Wait(); err != nil { + defer cancel(context.Canceled) // ensure no dangling processes + + var res *gateway.Result + var err error + resp, err = cc.Build(ctx, opt, product, func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + var err error + res, err = buildFunc(ctx, c) + + if res != nil && err == nil { + // Force evaluation of the build result (otherwise, we likely + // won't get a solve error) + def, err2 := getDefinition(ctx, res) + if err2 != nil { + return nil, err2 + } + res, err = evalDefinition(ctx, c, def) + } + + if err != nil { + // Scenario 1: we failed to evaluate a node somewhere in the + // build graph. + // + // In this case, we construct a ResultContext from this + // original Build session, and return it alongside the original + // build error. We then need to keep the gateway session open + // until the caller explicitly closes the ResultContext. + var se *errdefs.SolveError if errors.As(err, &se) { - resultCtx.solveErr = se - } else { - return nil, err + resCtx = &ResultContext{ + done: make(chan struct{}), + solveErr: se, + gwClient: c, + gwCtx: ctx, + } + respErr = se + close(done) + + // Block until the caller closes the ResultContext. + select { + case <-resCtx.done: + case <-ctx.Done(): + } } } - res2, _ := result.ConvertResult(targets, func(def *pb.Definition) (gateway.Reference, error) { - if res, ok := results[def]; ok { - return res.Ref, nil - } - return nil, nil - }) + return res, err + }, ch) + if resCtx != nil { + return + } + if err != nil { + // Something unexpected failed during the build, we didn't succeed, + // but we also didn't make it far enough to create a ResultContext. + respErr = err + close(done) + return + } - // Record the client and ctx as well so that containers can be created from the SolveError. - resultCtx.res = res2 - resultCtx.gwClient = c - resultCtx.gwCtx = ctx - resultCtx.gwDone = func() { - cancel(doneErr) - // wait for Build() completion(or timeout) to ensure the Build's finalizing and avoiding an error "context canceled" - select { - case <-buildDoneCh: - case <-time.After(5 * time.Second): - } + // Scenario 2: we successfully built the image with no errors. + // + // In this case, the original gateway session has now been closed + // since the Build has been completed. So, we need to create a new + // gateway session to populate the ResultContext. To do this, we + // need to re-evaluate the target result, in this new session. This + // should be instantaneous since the result should be cached. + + def, err := getDefinition(ctx, res) + if err != nil { + respErr = err + close(done) + return + } + + // NOTE: ideally this second connection should be lazily opened + opt := opt + opt.Ref = "" + _, respErr = cc.Build(ctx, opt, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { + res, err := evalDefinition(ctx, c, def) + if err != nil { + // This should probably not happen, since we've previously + // successfully evaluated the same result with no issues. + return nil, errors.Wrap(err, "inconsistent solve result") } - select { - case resultCtxCh <- &resultCtx: - case <-ctx.Done(): - return nil, ctx.Err() + resCtx = &ResultContext{ + done: make(chan struct{}), + res: res, + gwClient: c, + gwCtx: ctx, } + close(done) - // wait for cleanup or cancel - <-ctx.Done() - if context.Cause(ctx) != doneErr { // doneErr is not an error. - return nil, ctx.Err() + // Block until the caller closes the ResultContext. + select { + case <-resCtx.done: + case <-ctx.Done(): } - return nil, nil - }, ch) - close(buildDoneCh) - if err != nil { - errCh <- err + return nil, ctx.Err() + }, nil) + if resCtx != nil { + return } + close(done) }() + // Block until the other thread signals that it's completed the build. select { - case resultCtx := <-resultCtxCh: - return resultCtx, nil - case err := <-errCh: + case <-done: + case <-baseCtx.Done(): + if respErr == nil { + respErr = baseCtx.Err() + } + } + return resCtx, resp, respErr +} + +// getDefinition converts a gateway result into a collection of definitions for +// each ref in the result. +func getDefinition(ctx context.Context, res *gateway.Result) (*result.Result[*pb.Definition], error) { + return result.ConvertResult(res, func(ref gateway.Reference) (*pb.Definition, error) { + st, err := ref.ToState() + if err != nil { + return nil, err + } + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + return def.ToPB(), nil + }) +} + +// evalDefinition performs the reverse of getDefinition, converting a +// collection of definitions into a gateway result. +func evalDefinition(ctx context.Context, c gateway.Client, defs *result.Result[*pb.Definition]) (*gateway.Result, error) { + // force evaluation of all targets in parallel + results := make(map[*pb.Definition]*gateway.Result) + resultsMu := sync.Mutex{} + eg, egCtx := errgroup.WithContext(ctx) + defs.EachRef(func(def *pb.Definition) error { + eg.Go(func() error { + res, err := c.Solve(egCtx, gateway.SolveRequest{ + Evaluate: true, + Definition: def, + }) + if err != nil { + return err + } + resultsMu.Lock() + results[def] = res + resultsMu.Unlock() + return nil + }) + return nil + }) + if err := eg.Wait(); err != nil { return nil, err - case <-ctx.Done(): - return nil, ctx.Err() } + res, _ := result.ConvertResult(defs, func(def *pb.Definition) (gateway.Reference, error) { + if res, ok := results[def]; ok { + return res.Ref, nil + } + return nil, nil + }) + return res, nil } // ResultContext is a build result with the client that built it. @@ -163,17 +253,18 @@ type ResultContext struct { res *gateway.Result solveErr *errdefs.SolveError - gwClient gateway.Client - gwCtx context.Context - gwDone func() - gwDoneOnce sync.Once + done chan struct{} + doneOnce sync.Once + + gwClient gateway.Client + gwCtx context.Context cleanups []func() cleanupsMu sync.Mutex } func (r *ResultContext) Done() { - r.gwDoneOnce.Do(func() { + r.doneOnce.Do(func() { r.cleanupsMu.Lock() cleanups := r.cleanups r.cleanups = nil @@ -181,7 +272,9 @@ func (r *ResultContext) Done() { for _, f := range cleanups { f() } - r.gwDone() + + close(r.done) + <-r.gwCtx.Done() }) } From cd1648192e3909220647da93a0712461c4ed3877 Mon Sep 17 00:00:00 2001 From: Justin Chadwell Date: Wed, 31 May 2023 09:50:46 +0100 Subject: [PATCH 4/7] build: rename ResultContext to ResultHandle Signed-off-by: Justin Chadwell --- build/build.go | 8 ++--- build/invoke.go | 6 ++-- build/result.go | 60 +++++++++++++++---------------- controller/build/build.go | 14 ++++---- controller/local/controller.go | 4 +-- controller/processes/processes.go | 2 +- controller/remote/controller.go | 2 +- controller/remote/server.go | 6 ++-- 8 files changed, 51 insertions(+), 51 deletions(-) diff --git a/build/build.go b/build/build.go index 3968d351..6d2e3175 100644 --- a/build/build.go +++ b/build/build.go @@ -667,7 +667,7 @@ func Build(ctx context.Context, nodes []builder.Node, opt map[string]Options, do return BuildWithResultHandler(ctx, nodes, opt, docker, configDir, w, nil) } -func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultContext)) (resp map[string]*client.SolveResponse, err error) { +func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultHandle)) (resp map[string]*client.SolveResponse, err error) { if len(nodes) == 0 { return nil, errors.Errorf("driver required for build") } @@ -934,9 +934,9 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s } var rr *client.SolveResponse if resultHandleFunc != nil { - var resultCtx *ResultContext - resultCtx, rr, err = NewResultContext(ctx, cc, so, "buildx", buildFunc, ch) - resultHandleFunc(dp.driverIndex, resultCtx) + var resultHandle *ResultHandle + resultHandle, rr, err = NewResultHandle(ctx, cc, so, "buildx", buildFunc, ch) + resultHandleFunc(dp.driverIndex, resultHandle) } else { rr, err = c.Build(ctx, so, "buildx", buildFunc, ch) } diff --git a/build/invoke.go b/build/invoke.go index 38425ec3..eb17e0b2 100644 --- a/build/invoke.go +++ b/build/invoke.go @@ -21,10 +21,10 @@ type Container struct { initStarted atomic.Bool container gateway.Container releaseCh chan struct{} - resultCtx *ResultContext + resultCtx *ResultHandle } -func NewContainer(ctx context.Context, resultCtx *ResultContext, cfg *controllerapi.InvokeConfig) (*Container, error) { +func NewContainer(ctx context.Context, resultCtx *ResultHandle, cfg *controllerapi.InvokeConfig) (*Container, error) { mainCtx := ctx ctrCh := make(chan *Container) @@ -112,7 +112,7 @@ func (c *Container) Exec(ctx context.Context, cfg *controllerapi.InvokeConfig, s return err } -func exec(ctx context.Context, resultCtx *ResultContext, cfg *controllerapi.InvokeConfig, ctr gateway.Container, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error { +func exec(ctx context.Context, resultCtx *ResultHandle, cfg *controllerapi.InvokeConfig, ctr gateway.Container, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error { processCfg, err := resultCtx.getProcessConfig(cfg, stdin, stdout, stderr) if err != nil { return err diff --git a/build/result.go b/build/result.go index 26813c44..6c58db20 100644 --- a/build/result.go +++ b/build/result.go @@ -20,21 +20,21 @@ import ( "golang.org/x/sync/errgroup" ) -// NewResultContext wraps a call to client.Build, additionally returning a -// ResultContext alongside the standard response and error. +// NewResultHandle wraps a call to client.Build, additionally returning a +// ResultHandle alongside the standard response and error. // -// This ResultContext can be used to execute additional build steps in the same +// This ResultHandle can be used to execute additional build steps in the same // context as the build occurred, which can allow easy debugging of build // failures and successes. // -// If the returned ResultContext is not nil, the caller must call Done() on it. -func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOpt, product string, buildFunc gateway.BuildFunc, ch chan *client.SolveStatus) (*ResultContext, *client.SolveResponse, error) { +// If the returned ResultHandle is not nil, the caller must call Done() on it. +func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt, product string, buildFunc gateway.BuildFunc, ch chan *client.SolveStatus) (*ResultHandle, *client.SolveResponse, error) { // Create a new context to wrap the original, and cancel it when the // caller-provided context is cancelled. // // We derive the context from the background context so that we can forbid // cancellation of the build request after <-done is closed (which we do - // before returning the ResultContext). + // before returning the ResultHandle). baseCtx := ctx ctx, cancel := context.WithCancelCause(context.Background()) done := make(chan struct{}) @@ -43,7 +43,7 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp case <-baseCtx.Done(): cancel(baseCtx.Err()) case <-done: - // Once done is closed, we've recorded a ResultContext, so we + // Once done is closed, we've recorded a ResultHandle, so we // shouldn't allow cancelling the underlying build request anymore. } }() @@ -52,9 +52,9 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp // // We do this so that we can discard status messages after the main portion // of the build is complete. This is necessary for the solve error case, - // where the original gateway is kept open until the ResultContext is + // where the original gateway is kept open until the ResultHandle is // closed - we don't want progress messages from operations in that - // ResultContext to display after this function exits. + // ResultHandle to display after this function exits. // // Additionally, callers should wait for the progress channel to be closed. // If we keep the session open and never close the progress channel, the @@ -77,9 +77,9 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp }() defer close(baseCh) - var resCtx *ResultContext var resp *client.SolveResponse var respErr error + var respHandle *ResultHandle go func() { defer cancel(context.Canceled) // ensure no dangling processes @@ -104,14 +104,14 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp // Scenario 1: we failed to evaluate a node somewhere in the // build graph. // - // In this case, we construct a ResultContext from this + // In this case, we construct a ResultHandle from this // original Build session, and return it alongside the original // build error. We then need to keep the gateway session open - // until the caller explicitly closes the ResultContext. + // until the caller explicitly closes the ResultHandle. var se *errdefs.SolveError if errors.As(err, &se) { - resCtx = &ResultContext{ + respHandle = &ResultHandle{ done: make(chan struct{}), solveErr: se, gwClient: c, @@ -120,21 +120,21 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp respErr = se close(done) - // Block until the caller closes the ResultContext. + // Block until the caller closes the ResultHandle. select { - case <-resCtx.done: + case <-respHandle.done: case <-ctx.Done(): } } } return res, err }, ch) - if resCtx != nil { + if respHandle != nil { return } if err != nil { // Something unexpected failed during the build, we didn't succeed, - // but we also didn't make it far enough to create a ResultContext. + // but we also didn't make it far enough to create a ResultHandle. respErr = err close(done) return @@ -144,7 +144,7 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp // // In this case, the original gateway session has now been closed // since the Build has been completed. So, we need to create a new - // gateway session to populate the ResultContext. To do this, we + // gateway session to populate the ResultHandle. To do this, we // need to re-evaluate the target result, in this new session. This // should be instantaneous since the result should be cached. @@ -165,7 +165,7 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp // successfully evaluated the same result with no issues. return nil, errors.Wrap(err, "inconsistent solve result") } - resCtx = &ResultContext{ + respHandle = &ResultHandle{ done: make(chan struct{}), res: res, gwClient: c, @@ -173,14 +173,14 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp } close(done) - // Block until the caller closes the ResultContext. + // Block until the caller closes the ResultHandle. select { - case <-resCtx.done: + case <-respHandle.done: case <-ctx.Done(): } return nil, ctx.Err() }, nil) - if resCtx != nil { + if respHandle != nil { return } close(done) @@ -194,7 +194,7 @@ func NewResultContext(ctx context.Context, cc *client.Client, opt client.SolveOp respErr = baseCtx.Err() } } - return resCtx, resp, respErr + return respHandle, resp, respErr } // getDefinition converts a gateway result into a collection of definitions for @@ -248,8 +248,8 @@ func evalDefinition(ctx context.Context, c gateway.Client, defs *result.Result[* return res, nil } -// ResultContext is a build result with the client that built it. -type ResultContext struct { +// ResultHandle is a build result with the client that built it. +type ResultHandle struct { res *gateway.Result solveErr *errdefs.SolveError @@ -263,7 +263,7 @@ type ResultContext struct { cleanupsMu sync.Mutex } -func (r *ResultContext) Done() { +func (r *ResultHandle) Done() { r.doneOnce.Do(func() { r.cleanupsMu.Lock() cleanups := r.cleanups @@ -278,18 +278,18 @@ func (r *ResultContext) Done() { }) } -func (r *ResultContext) registerCleanup(f func()) { +func (r *ResultHandle) registerCleanup(f func()) { r.cleanupsMu.Lock() r.cleanups = append(r.cleanups, f) r.cleanupsMu.Unlock() } -func (r *ResultContext) build(buildFunc gateway.BuildFunc) (err error) { +func (r *ResultHandle) build(buildFunc gateway.BuildFunc) (err error) { _, err = buildFunc(r.gwCtx, r.gwClient) return err } -func (r *ResultContext) getContainerConfig(ctx context.Context, c gateway.Client, cfg *controllerapi.InvokeConfig) (containerCfg gateway.NewContainerRequest, _ error) { +func (r *ResultHandle) getContainerConfig(ctx context.Context, c gateway.Client, cfg *controllerapi.InvokeConfig) (containerCfg gateway.NewContainerRequest, _ error) { if r.res != nil && r.solveErr == nil { logrus.Debugf("creating container from successful build") ccfg, err := containerConfigFromResult(ctx, r.res, c, *cfg) @@ -308,7 +308,7 @@ func (r *ResultContext) getContainerConfig(ctx context.Context, c gateway.Client return containerCfg, nil } -func (r *ResultContext) getProcessConfig(cfg *controllerapi.InvokeConfig, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) (_ gateway.StartRequest, err error) { +func (r *ResultHandle) getProcessConfig(cfg *controllerapi.InvokeConfig, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) (_ gateway.StartRequest, err error) { processCfg := newStartRequest(stdin, stdout, stderr) if r.res != nil && r.solveErr == nil { logrus.Debugf("creating container from successful build") diff --git a/controller/build/build.go b/controller/build/build.go index 23b87472..64069d78 100644 --- a/controller/build/build.go +++ b/controller/build/build.go @@ -33,10 +33,10 @@ const defaultTargetName = "default" // RunBuild runs the specified build and returns the result. // -// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext, +// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle, // this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can // inspect the result and debug the cause of that error. -func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) { +func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, error) { if in.NoCache && len(in.NoCacheFilter) > 0 { return nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together") } @@ -176,7 +176,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build resp, res, err := buildTargets(ctx, dockerCli, b.NodeGroup, nodes, map[string]build.Options{defaultTargetName: opts}, progress, generateResult) err = wrapBuildError(err, false) if err != nil { - // NOTE: buildTargets can return *build.ResultContext even on error. + // NOTE: buildTargets can return *build.ResultHandle even on error. return nil, res, err } return resp, res, nil @@ -184,17 +184,17 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build // buildTargets runs the specified build and returns the result. // -// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext, +// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle, // this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can // inspect the result and debug the cause of that error. -func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) { - var res *build.ResultContext +func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, error) { + var res *build.ResultHandle var resp map[string]*client.SolveResponse var err error if generateResult { var mu sync.Mutex var idx int - resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultContext) { + resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultHandle) { mu.Lock() defer mu.Unlock() if res == nil || driverIndex < idx { diff --git a/controller/local/controller.go b/controller/local/controller.go index 9d69412e..fb1cd282 100644 --- a/controller/local/controller.go +++ b/controller/local/controller.go @@ -29,7 +29,7 @@ func NewLocalBuildxController(ctx context.Context, dockerCli command.Cli, logger type buildConfig struct { // TODO: these two structs should be merged // Discussion: https://github.com/docker/buildx/pull/1640#discussion_r1113279719 - resultCtx *build.ResultContext + resultCtx *build.ResultHandle buildOptions *controllerapi.BuildOptions } @@ -49,7 +49,7 @@ func (b *localController) Build(ctx context.Context, options controllerapi.Build defer b.buildOnGoing.Store(false) resp, res, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progress, true) - // NOTE: RunBuild can return *build.ResultContext even on error. + // NOTE: RunBuild can return *build.ResultHandle even on error. if res != nil { b.buildConfig = buildConfig{ resultCtx: res, diff --git a/controller/processes/processes.go b/controller/processes/processes.go index d0c0df42..c74b39dd 100644 --- a/controller/processes/processes.go +++ b/controller/processes/processes.go @@ -98,7 +98,7 @@ func (m *Manager) DeleteProcess(id string) error { // When a container isn't available (i.e. first time invoking or the container has exited) or cfg.Rollback is set, // this method will start a new container and run the process in it. Otherwise, this method starts a new process in the // existing container. -func (m *Manager) StartProcess(pid string, resultCtx *build.ResultContext, cfg *pb.InvokeConfig) (*Process, error) { +func (m *Manager) StartProcess(pid string, resultCtx *build.ResultHandle, cfg *pb.InvokeConfig) (*Process, error) { // Get the target result to invoke a container from var ctr *build.Container if a := m.container.Load(); a != nil { diff --git a/controller/remote/controller.go b/controller/remote/controller.go index 7d7c8496..28840998 100644 --- a/controller/remote/controller.go +++ b/controller/remote/controller.go @@ -148,7 +148,7 @@ func serveCmd(dockerCli command.Cli) *cobra.Command { }() // prepare server - b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultContext, error) { + b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultHandle, error) { return cbuild.RunBuild(ctx, dockerCli, *options, stdin, progress, true) }) defer b.Close() diff --git a/controller/remote/server.go b/controller/remote/server.go index d89fc218..a39fea51 100644 --- a/controller/remote/server.go +++ b/controller/remote/server.go @@ -19,7 +19,7 @@ import ( "golang.org/x/sync/errgroup" ) -type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultContext, err error) +type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultHandle, err error) func NewServer(buildFunc BuildFunc) *Server { return &Server{ @@ -40,7 +40,7 @@ type session struct { buildOptions *pb.BuildOptions inputPipe *io.PipeWriter - result *build.ResultContext + result *build.ResultHandle processes *processes.Manager } @@ -205,7 +205,7 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp resp, res, buildErr := m.buildFunc(ctx, req.Options, inR, pw) m.sessionMu.Lock() if s, ok := m.session[ref]; ok { - // NOTE: buildFunc can return *build.ResultContext even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild). + // NOTE: buildFunc can return *build.ResultHandle even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild). if res != nil { s.result = res s.cancelBuild = cancel From 5b27d5a9f649082f1ed1622e347dcf904dbb45bf Mon Sep 17 00:00:00 2001 From: Justin Chadwell Date: Wed, 31 May 2023 15:13:00 +0100 Subject: [PATCH 5/7] build: cleanup res if returned in basic build In practice, this shouldn't happen, but the check is good to include anyways. Signed-off-by: Justin Chadwell --- commands/build.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/commands/build.go b/commands/build.go index 3467e463..231283c3 100644 --- a/commands/build.go +++ b/commands/build.go @@ -297,7 +297,10 @@ func runBuild(dockerCli command.Cli, options buildOptions) (err error) { } func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, options buildOptions, printer *progress.Printer) (*client.SolveResponse, error) { - resp, _, err := cbuild.RunBuild(ctx, dockerCli, *opts, os.Stdin, printer, false) + resp, res, err := cbuild.RunBuild(ctx, dockerCli, *opts, os.Stdin, printer, false) + if res != nil { + res.Done() + } return resp, err } From b7f0b3d76384776aaeac942615c3319c5168b2fb Mon Sep 17 00:00:00 2001 From: Justin Chadwell Date: Wed, 31 May 2023 15:13:56 +0100 Subject: [PATCH 6/7] build: clear exports for secondary solve request Signed-off-by: Justin Chadwell --- build/result.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/build/result.go b/build/result.go index 6c58db20..32836d09 100644 --- a/build/result.go +++ b/build/result.go @@ -158,6 +158,8 @@ func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt // NOTE: ideally this second connection should be lazily opened opt := opt opt.Ref = "" + opt.Exports = nil + opt.CacheExports = nil _, respErr = cc.Build(ctx, opt, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { res, err := evalDefinition(ctx, c, def) if err != nil { From c6db4cf3424f6109f969b9f63d3b5d2b4619a0c1 Mon Sep 17 00:00:00 2001 From: Justin Chadwell Date: Tue, 6 Jun 2023 09:08:30 +0200 Subject: [PATCH 7/7] build: clarify NewResultHandle comment Signed-off-by: Justin Chadwell --- build/result.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build/result.go b/build/result.go index 32836d09..91ba9efa 100644 --- a/build/result.go +++ b/build/result.go @@ -20,8 +20,8 @@ import ( "golang.org/x/sync/errgroup" ) -// NewResultHandle wraps a call to client.Build, additionally returning a -// ResultHandle alongside the standard response and error. +// NewResultHandle makes a call to client.Build, additionally returning a +// opaque ResultHandle alongside the standard response and error. // // This ResultHandle can be used to execute additional build steps in the same // context as the build occurred, which can allow easy debugging of build