Merge pull request #1804 from jedevc/fixup-solve

pull/1865/head
Justin Chadwell 2 years ago committed by GitHub
commit 7cef021a8a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -65,7 +65,7 @@ var (
) )
const ( const (
printFallbackImage = "docker/dockerfile-upstream:1.4-outline@sha256:627443ff4e2d0f635d429cfc1da5388bcd5a70949c38adcd3cd7c4e5df67c73c" printFallbackImage = "docker/dockerfile:1.5.2-labs@sha256:f2e91734a84c0922ff47aa4098ab775f1dfa932430d2888dd5cad5251fafdac4"
) )
type Options struct { type Options struct {
@ -667,7 +667,7 @@ func Build(ctx context.Context, nodes []builder.Node, opt map[string]Options, do
return BuildWithResultHandler(ctx, nodes, opt, docker, configDir, w, nil) return BuildWithResultHandler(ctx, nodes, opt, docker, configDir, w, nil)
} }
func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultContext)) (resp map[string]*client.SolveResponse, err error) { func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultHandle)) (resp map[string]*client.SolveResponse, err error) {
if len(nodes) == 0 { if len(nodes) == 0 {
return nil, errors.Errorf("driver required for build") return nil, errors.Errorf("driver required for build")
} }
@ -885,61 +885,61 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
cc := c cc := c
var printRes map[string][]byte var printRes map[string][]byte
rr, err := c.Build(ctx, so, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { buildFunc := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
var isFallback bool if opt.PrintFunc != nil {
var origErr error if _, ok := req.FrontendOpt["frontend.caps"]; !ok {
for { req.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests+forward"
if opt.PrintFunc != nil { } else {
if _, ok := req.FrontendOpt["frontend.caps"]; !ok { req.FrontendOpt["frontend.caps"] += ",moby.buildkit.frontend.subrequests+forward"
req.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests+forward"
} else {
req.FrontendOpt["frontend.caps"] += ",moby.buildkit.frontend.subrequests+forward"
}
req.FrontendOpt["requestid"] = "frontend." + opt.PrintFunc.Name
if isFallback {
req.FrontendOpt["build-arg:BUILDKIT_SYNTAX"] = printFallbackImage
}
} }
res, err := c.Solve(ctx, req) req.FrontendOpt["requestid"] = "frontend." + opt.PrintFunc.Name
if err != nil { }
if origErr != nil {
res, err := c.Solve(ctx, req)
if err != nil {
fallback := false
var reqErr *errdefs.UnsupportedSubrequestError
if errors.As(err, &reqErr) {
switch reqErr.Name {
case "frontend.outline", "frontend.targets":
fallback = true
default:
return nil, err return nil, err
} }
var reqErr *errdefs.UnsupportedSubrequestError } else {
if !isFallback {
if errors.As(err, &reqErr) {
switch reqErr.Name {
case "frontend.outline", "frontend.targets":
isFallback = true
origErr = err
continue
}
return nil, err
}
// buildkit v0.8 vendored in Docker 20.10 does not support typed errors
if strings.Contains(err.Error(), "unsupported request frontend.outline") || strings.Contains(err.Error(), "unsupported request frontend.targets") {
isFallback = true
origErr = err
continue
}
}
return nil, err return nil, err
} }
if opt.PrintFunc != nil { // buildkit v0.8 vendored in Docker 20.10 does not support typed errors
printRes = res.Metadata if strings.Contains(err.Error(), "unsupported request frontend.outline") || strings.Contains(err.Error(), "unsupported request frontend.targets") {
fallback = true
} }
results.Set(resultKey(dp.driverIndex, k), res)
if resultHandleFunc != nil { if fallback {
resultCtx, err := NewResultContext(ctx, cc, so, res) req.FrontendOpt["build-arg:BUILDKIT_SYNTAX"] = printFallbackImage
if err == nil { res2, err2 := c.Solve(ctx, req)
resultHandleFunc(dp.driverIndex, resultCtx) if err2 != nil {
} else { return nil, err
logrus.Warnf("failed to record result: %s", err)
} }
res = res2
} else {
return nil, err
} }
return res, nil
} }
}, ch) if opt.PrintFunc != nil {
printRes = res.Metadata
}
results.Set(resultKey(dp.driverIndex, k), res)
return res, nil
}
var rr *client.SolveResponse
if resultHandleFunc != nil {
var resultHandle *ResultHandle
resultHandle, rr, err = NewResultHandle(ctx, cc, so, "buildx", buildFunc, ch)
resultHandleFunc(dp.driverIndex, resultHandle)
} else {
rr, err = c.Build(ctx, so, "buildx", buildFunc, ch)
}
if err != nil { if err != nil {
return err return err
} }

@ -21,10 +21,10 @@ type Container struct {
initStarted atomic.Bool initStarted atomic.Bool
container gateway.Container container gateway.Container
releaseCh chan struct{} releaseCh chan struct{}
resultCtx *ResultContext resultCtx *ResultHandle
} }
func NewContainer(ctx context.Context, resultCtx *ResultContext, cfg *controllerapi.InvokeConfig) (*Container, error) { func NewContainer(ctx context.Context, resultCtx *ResultHandle, cfg *controllerapi.InvokeConfig) (*Container, error) {
mainCtx := ctx mainCtx := ctx
ctrCh := make(chan *Container) ctrCh := make(chan *Container)
@ -112,7 +112,7 @@ func (c *Container) Exec(ctx context.Context, cfg *controllerapi.InvokeConfig, s
return err return err
} }
func exec(ctx context.Context, resultCtx *ResultContext, cfg *controllerapi.InvokeConfig, ctr gateway.Container, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error { func exec(ctx context.Context, resultCtx *ResultHandle, cfg *controllerapi.InvokeConfig, ctr gateway.Container, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) error {
processCfg, err := resultCtx.getProcessConfig(cfg, stdin, stdout, stderr) processCfg, err := resultCtx.getProcessConfig(cfg, stdin, stdout, stderr)
if err != nil { if err != nil {
return err return err

@ -6,8 +6,6 @@ import (
"encoding/json" "encoding/json"
"io" "io"
"sync" "sync"
"sync/atomic"
"time"
controllerapi "github.com/docker/buildx/controller/pb" controllerapi "github.com/docker/buildx/controller/pb"
"github.com/moby/buildkit/client" "github.com/moby/buildkit/client"
@ -22,158 +20,253 @@ import (
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
func NewResultContext(ctx context.Context, c *client.Client, solveOpt client.SolveOpt, res *gateway.Result) (*ResultContext, error) { // NewResultHandle makes a call to client.Build, additionally returning a
def, err := getDefinition(ctx, res) // opaque ResultHandle alongside the standard response and error.
if err != nil { //
return nil, err // This ResultHandle can be used to execute additional build steps in the same
} // context as the build occurred, which can allow easy debugging of build
return getResultAt(ctx, c, solveOpt, def, nil) // failures and successes.
} //
// If the returned ResultHandle is not nil, the caller must call Done() on it.
func getDefinition(ctx context.Context, res *gateway.Result) (*result.Result[*pb.Definition], error) { func NewResultHandle(ctx context.Context, cc *client.Client, opt client.SolveOpt, product string, buildFunc gateway.BuildFunc, ch chan *client.SolveStatus) (*ResultHandle, *client.SolveResponse, error) {
return result.ConvertResult(res, func(ref gateway.Reference) (*pb.Definition, error) { // Create a new context to wrap the original, and cancel it when the
st, err := ref.ToState() // caller-provided context is cancelled.
if err != nil { //
return nil, err // We derive the context from the background context so that we can forbid
} // cancellation of the build request after <-done is closed (which we do
def, err := st.Marshal(ctx) // before returning the ResultHandle).
if err != nil { baseCtx := ctx
return nil, err ctx, cancel := context.WithCancelCause(context.Background())
done := make(chan struct{})
go func() {
select {
case <-baseCtx.Done():
cancel(baseCtx.Err())
case <-done:
// Once done is closed, we've recorded a ResultHandle, so we
// shouldn't allow cancelling the underlying build request anymore.
} }
return def.ToPB(), nil }()
})
}
func getResultAt(ctx context.Context, c *client.Client, solveOpt client.SolveOpt, targets *result.Result[*pb.Definition], statusChan chan *client.SolveStatus) (*ResultContext, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// forward SolveStatus // Create a new channel to forward status messages to the original.
done := new(atomic.Bool) //
defer done.Store(true) // We do this so that we can discard status messages after the main portion
ch := make(chan *client.SolveStatus) // of the build is complete. This is necessary for the solve error case,
// where the original gateway is kept open until the ResultHandle is
// closed - we don't want progress messages from operations in that
// ResultHandle to display after this function exits.
//
// Additionally, callers should wait for the progress channel to be closed.
// If we keep the session open and never close the progress channel, the
// caller will likely hang.
baseCh := ch
ch = make(chan *client.SolveStatus)
go func() { go func() {
for { for {
s := <-ch s, ok := <-ch
if s == nil { if !ok {
return return
} }
if done.Load() {
// Do not forward if the function returned because statusChan is possibly closed
continue
}
select { select {
case statusChan <- s: case <-baseCh:
case <-ctx.Done(): // base channel is closed, discard status messages
default:
baseCh <- s
} }
} }
}() }()
defer close(baseCh)
var resp *client.SolveResponse
var respErr error
var respHandle *ResultHandle
// get result
resultCtxCh := make(chan *ResultContext)
errCh := make(chan error)
go func() { go func() {
solveOpt := solveOpt defer cancel(context.Canceled) // ensure no dangling processes
solveOpt.Ref = ""
buildDoneCh := make(chan struct{}) var res *gateway.Result
_, err := c.Build(context.Background(), solveOpt, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { var err error
doneErr := errors.Errorf("done") resp, err = cc.Build(ctx, opt, product, func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
ctx, cancel := context.WithCancelCause(ctx) var err error
defer cancel(doneErr) res, err = buildFunc(ctx, c)
// force evaluation of all targets in parallel if res != nil && err == nil {
results := make(map[*pb.Definition]*gateway.Result) // Force evaluation of the build result (otherwise, we likely
resultsMu := sync.Mutex{} // won't get a solve error)
eg, egCtx := errgroup.WithContext(ctx) def, err2 := getDefinition(ctx, res)
targets.EachRef(func(def *pb.Definition) error { if err2 != nil {
eg.Go(func() error { return nil, err2
res2, err := c.Solve(egCtx, gateway.SolveRequest{ }
Evaluate: true, res, err = evalDefinition(ctx, c, def)
Definition: def, }
})
if err != nil { if err != nil {
return err // Scenario 1: we failed to evaluate a node somewhere in the
} // build graph.
resultsMu.Lock() //
results[def] = res2 // In this case, we construct a ResultHandle from this
resultsMu.Unlock() // original Build session, and return it alongside the original
return nil // build error. We then need to keep the gateway session open
}) // until the caller explicitly closes the ResultHandle.
return nil
})
resultCtx := ResultContext{}
if err := eg.Wait(); err != nil {
var se *errdefs.SolveError var se *errdefs.SolveError
if errors.As(err, &se) { if errors.As(err, &se) {
resultCtx.solveErr = se respHandle = &ResultHandle{
} else { done: make(chan struct{}),
return nil, err solveErr: se,
gwClient: c,
gwCtx: ctx,
}
respErr = se
close(done)
// Block until the caller closes the ResultHandle.
select {
case <-respHandle.done:
case <-ctx.Done():
}
} }
} }
res2, _ := result.ConvertResult(targets, func(def *pb.Definition) (gateway.Reference, error) { return res, err
if res, ok := results[def]; ok { }, ch)
return res.Ref, nil if respHandle != nil {
} return
return nil, nil }
}) if err != nil {
// Something unexpected failed during the build, we didn't succeed,
// but we also didn't make it far enough to create a ResultHandle.
respErr = err
close(done)
return
}
// Record the client and ctx as well so that containers can be created from the SolveError. // Scenario 2: we successfully built the image with no errors.
resultCtx.res = res2 //
resultCtx.gwClient = c // In this case, the original gateway session has now been closed
resultCtx.gwCtx = ctx // since the Build has been completed. So, we need to create a new
resultCtx.gwDone = func() { // gateway session to populate the ResultHandle. To do this, we
cancel(doneErr) // need to re-evaluate the target result, in this new session. This
// wait for Build() completion(or timeout) to ensure the Build's finalizing and avoiding an error "context canceled" // should be instantaneous since the result should be cached.
select {
case <-buildDoneCh: def, err := getDefinition(ctx, res)
case <-time.After(5 * time.Second): if err != nil {
} respErr = err
close(done)
return
}
// NOTE: ideally this second connection should be lazily opened
opt := opt
opt.Ref = ""
opt.Exports = nil
opt.CacheExports = nil
_, respErr = cc.Build(ctx, opt, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
res, err := evalDefinition(ctx, c, def)
if err != nil {
// This should probably not happen, since we've previously
// successfully evaluated the same result with no issues.
return nil, errors.Wrap(err, "inconsistent solve result")
} }
select { respHandle = &ResultHandle{
case resultCtxCh <- &resultCtx: done: make(chan struct{}),
case <-ctx.Done(): res: res,
return nil, ctx.Err() gwClient: c,
gwCtx: ctx,
} }
close(done)
// wait for cleanup or cancel // Block until the caller closes the ResultHandle.
<-ctx.Done() select {
if context.Cause(ctx) != doneErr { // doneErr is not an error. case <-respHandle.done:
return nil, ctx.Err() case <-ctx.Done():
} }
return nil, nil return nil, ctx.Err()
}, ch) }, nil)
close(buildDoneCh) if respHandle != nil {
if err != nil { return
errCh <- err
} }
close(done)
}() }()
// Block until the other thread signals that it's completed the build.
select { select {
case resultCtx := <-resultCtxCh: case <-done:
return resultCtx, nil case <-baseCtx.Done():
case err := <-errCh: if respErr == nil {
respErr = baseCtx.Err()
}
}
return respHandle, resp, respErr
}
// getDefinition converts a gateway result into a collection of definitions for
// each ref in the result.
func getDefinition(ctx context.Context, res *gateway.Result) (*result.Result[*pb.Definition], error) {
return result.ConvertResult(res, func(ref gateway.Reference) (*pb.Definition, error) {
st, err := ref.ToState()
if err != nil {
return nil, err
}
def, err := st.Marshal(ctx)
if err != nil {
return nil, err
}
return def.ToPB(), nil
})
}
// evalDefinition performs the reverse of getDefinition, converting a
// collection of definitions into a gateway result.
func evalDefinition(ctx context.Context, c gateway.Client, defs *result.Result[*pb.Definition]) (*gateway.Result, error) {
// force evaluation of all targets in parallel
results := make(map[*pb.Definition]*gateway.Result)
resultsMu := sync.Mutex{}
eg, egCtx := errgroup.WithContext(ctx)
defs.EachRef(func(def *pb.Definition) error {
eg.Go(func() error {
res, err := c.Solve(egCtx, gateway.SolveRequest{
Evaluate: true,
Definition: def,
})
if err != nil {
return err
}
resultsMu.Lock()
results[def] = res
resultsMu.Unlock()
return nil
})
return nil
})
if err := eg.Wait(); err != nil {
return nil, err return nil, err
case <-ctx.Done():
return nil, ctx.Err()
} }
res, _ := result.ConvertResult(defs, func(def *pb.Definition) (gateway.Reference, error) {
if res, ok := results[def]; ok {
return res.Ref, nil
}
return nil, nil
})
return res, nil
} }
// ResultContext is a build result with the client that built it. // ResultHandle is a build result with the client that built it.
type ResultContext struct { type ResultHandle struct {
res *gateway.Result res *gateway.Result
solveErr *errdefs.SolveError solveErr *errdefs.SolveError
gwClient gateway.Client done chan struct{}
gwCtx context.Context doneOnce sync.Once
gwDone func()
gwDoneOnce sync.Once gwClient gateway.Client
gwCtx context.Context
cleanups []func() cleanups []func()
cleanupsMu sync.Mutex cleanupsMu sync.Mutex
} }
func (r *ResultContext) Done() { func (r *ResultHandle) Done() {
r.gwDoneOnce.Do(func() { r.doneOnce.Do(func() {
r.cleanupsMu.Lock() r.cleanupsMu.Lock()
cleanups := r.cleanups cleanups := r.cleanups
r.cleanups = nil r.cleanups = nil
@ -181,22 +274,24 @@ func (r *ResultContext) Done() {
for _, f := range cleanups { for _, f := range cleanups {
f() f()
} }
r.gwDone()
close(r.done)
<-r.gwCtx.Done()
}) })
} }
func (r *ResultContext) registerCleanup(f func()) { func (r *ResultHandle) registerCleanup(f func()) {
r.cleanupsMu.Lock() r.cleanupsMu.Lock()
r.cleanups = append(r.cleanups, f) r.cleanups = append(r.cleanups, f)
r.cleanupsMu.Unlock() r.cleanupsMu.Unlock()
} }
func (r *ResultContext) build(buildFunc gateway.BuildFunc) (err error) { func (r *ResultHandle) build(buildFunc gateway.BuildFunc) (err error) {
_, err = buildFunc(r.gwCtx, r.gwClient) _, err = buildFunc(r.gwCtx, r.gwClient)
return err return err
} }
func (r *ResultContext) getContainerConfig(ctx context.Context, c gateway.Client, cfg *controllerapi.InvokeConfig) (containerCfg gateway.NewContainerRequest, _ error) { func (r *ResultHandle) getContainerConfig(ctx context.Context, c gateway.Client, cfg *controllerapi.InvokeConfig) (containerCfg gateway.NewContainerRequest, _ error) {
if r.res != nil && r.solveErr == nil { if r.res != nil && r.solveErr == nil {
logrus.Debugf("creating container from successful build") logrus.Debugf("creating container from successful build")
ccfg, err := containerConfigFromResult(ctx, r.res, c, *cfg) ccfg, err := containerConfigFromResult(ctx, r.res, c, *cfg)
@ -215,7 +310,7 @@ func (r *ResultContext) getContainerConfig(ctx context.Context, c gateway.Client
return containerCfg, nil return containerCfg, nil
} }
func (r *ResultContext) getProcessConfig(cfg *controllerapi.InvokeConfig, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) (_ gateway.StartRequest, err error) { func (r *ResultHandle) getProcessConfig(cfg *controllerapi.InvokeConfig, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) (_ gateway.StartRequest, err error) {
processCfg := newStartRequest(stdin, stdout, stderr) processCfg := newStartRequest(stdin, stdout, stderr)
if r.res != nil && r.solveErr == nil { if r.res != nil && r.solveErr == nil {
logrus.Debugf("creating container from successful build") logrus.Debugf("creating container from successful build")

@ -304,7 +304,10 @@ func getImageID(resp map[string]string) string {
} }
func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, options buildOptions, printer *progress.Printer) (*client.SolveResponse, error) { func runBasicBuild(ctx context.Context, dockerCli command.Cli, opts *controllerapi.BuildOptions, options buildOptions, printer *progress.Printer) (*client.SolveResponse, error) {
resp, _, err := cbuild.RunBuild(ctx, dockerCli, *opts, os.Stdin, printer, false) resp, res, err := cbuild.RunBuild(ctx, dockerCli, *opts, os.Stdin, printer, false)
if res != nil {
res.Done()
}
return resp, err return resp, err
} }

@ -33,10 +33,10 @@ const defaultTargetName = "default"
// RunBuild runs the specified build and returns the result. // RunBuild runs the specified build and returns the result.
// //
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext, // NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can // this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
// inspect the result and debug the cause of that error. // inspect the result and debug the cause of that error.
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) { func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, error) {
if in.NoCache && len(in.NoCacheFilter) > 0 { if in.NoCache && len(in.NoCacheFilter) > 0 {
return nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together") return nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
} }
@ -176,7 +176,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
resp, res, err := buildTargets(ctx, dockerCli, b.NodeGroup, nodes, map[string]build.Options{defaultTargetName: opts}, progress, generateResult) resp, res, err := buildTargets(ctx, dockerCli, b.NodeGroup, nodes, map[string]build.Options{defaultTargetName: opts}, progress, generateResult)
err = wrapBuildError(err, false) err = wrapBuildError(err, false)
if err != nil { if err != nil {
// NOTE: buildTargets can return *build.ResultContext even on error. // NOTE: buildTargets can return *build.ResultHandle even on error.
return nil, res, err return nil, res, err
} }
return resp, res, nil return resp, res, nil
@ -184,17 +184,17 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
// buildTargets runs the specified build and returns the result. // buildTargets runs the specified build and returns the result.
// //
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext, // NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultHandle,
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can // this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
// inspect the result and debug the cause of that error. // inspect the result and debug the cause of that error.
func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) { func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultHandle, error) {
var res *build.ResultContext var res *build.ResultHandle
var resp map[string]*client.SolveResponse var resp map[string]*client.SolveResponse
var err error var err error
if generateResult { if generateResult {
var mu sync.Mutex var mu sync.Mutex
var idx int var idx int
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultContext) { resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultHandle) {
mu.Lock() mu.Lock()
defer mu.Unlock() defer mu.Unlock()
if res == nil || driverIndex < idx { if res == nil || driverIndex < idx {

@ -29,7 +29,7 @@ func NewLocalBuildxController(ctx context.Context, dockerCli command.Cli, logger
type buildConfig struct { type buildConfig struct {
// TODO: these two structs should be merged // TODO: these two structs should be merged
// Discussion: https://github.com/docker/buildx/pull/1640#discussion_r1113279719 // Discussion: https://github.com/docker/buildx/pull/1640#discussion_r1113279719
resultCtx *build.ResultContext resultCtx *build.ResultHandle
buildOptions *controllerapi.BuildOptions buildOptions *controllerapi.BuildOptions
} }
@ -49,7 +49,7 @@ func (b *localController) Build(ctx context.Context, options controllerapi.Build
defer b.buildOnGoing.Store(false) defer b.buildOnGoing.Store(false)
resp, res, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progress, true) resp, res, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progress, true)
// NOTE: RunBuild can return *build.ResultContext even on error. // NOTE: RunBuild can return *build.ResultHandle even on error.
if res != nil { if res != nil {
b.buildConfig = buildConfig{ b.buildConfig = buildConfig{
resultCtx: res, resultCtx: res,

@ -98,7 +98,7 @@ func (m *Manager) DeleteProcess(id string) error {
// When a container isn't available (i.e. first time invoking or the container has exited) or cfg.Rollback is set, // When a container isn't available (i.e. first time invoking or the container has exited) or cfg.Rollback is set,
// this method will start a new container and run the process in it. Otherwise, this method starts a new process in the // this method will start a new container and run the process in it. Otherwise, this method starts a new process in the
// existing container. // existing container.
func (m *Manager) StartProcess(pid string, resultCtx *build.ResultContext, cfg *pb.InvokeConfig) (*Process, error) { func (m *Manager) StartProcess(pid string, resultCtx *build.ResultHandle, cfg *pb.InvokeConfig) (*Process, error) {
// Get the target result to invoke a container from // Get the target result to invoke a container from
var ctr *build.Container var ctr *build.Container
if a := m.container.Load(); a != nil { if a := m.container.Load(); a != nil {

@ -148,7 +148,7 @@ func serveCmd(dockerCli command.Cli) *cobra.Command {
}() }()
// prepare server // prepare server
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultContext, error) { b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultHandle, error) {
return cbuild.RunBuild(ctx, dockerCli, *options, stdin, progress, true) return cbuild.RunBuild(ctx, dockerCli, *options, stdin, progress, true)
}) })
defer b.Close() defer b.Close()

@ -19,7 +19,7 @@ import (
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultContext, err error) type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultHandle, err error)
func NewServer(buildFunc BuildFunc) *Server { func NewServer(buildFunc BuildFunc) *Server {
return &Server{ return &Server{
@ -40,7 +40,7 @@ type session struct {
buildOptions *pb.BuildOptions buildOptions *pb.BuildOptions
inputPipe *io.PipeWriter inputPipe *io.PipeWriter
result *build.ResultContext result *build.ResultHandle
processes *processes.Manager processes *processes.Manager
} }
@ -205,7 +205,7 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
resp, res, buildErr := m.buildFunc(ctx, req.Options, inR, pw) resp, res, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
m.sessionMu.Lock() m.sessionMu.Lock()
if s, ok := m.session[ref]; ok { if s, ok := m.session[ref]; ok {
// NOTE: buildFunc can return *build.ResultContext even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild). // NOTE: buildFunc can return *build.ResultHandle even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild).
if res != nil { if res != nil {
s.result = res s.result = res
s.cancelBuild = cancel s.cancelBuild = cancel

Loading…
Cancel
Save