build: rework build package to use only a single Build call

Signed-off-by: Justin Chadwell <me@jedevc.com>
pull/1750/head
Justin Chadwell 2 years ago
parent c6a78d216c
commit 1571c137bc

@ -653,11 +653,166 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op
return &so, releaseF, nil return &so, releaseF, nil
} }
func build(ctx context.Context, c *client.Client, opt Options, so client.SolveOpt, pw progress.Writer) (*ResultContext, error) {
frontendInputs := make(map[string]*pb.Definition)
for key, st := range so.FrontendInputs {
def, err := st.Marshal(ctx)
if err != nil {
return nil, err
}
frontendInputs[key] = def.ToPB()
}
req := gateway.SolveRequest{
Frontend: so.Frontend,
FrontendInputs: frontendInputs,
FrontendOpt: make(map[string]string),
}
for k, v := range so.FrontendAttrs {
req.FrontendOpt[k] = v
}
so.Frontend = ""
so.FrontendInputs = nil
ch, chdone := progress.NewChannel(pw)
rcs := make(chan *ResultContext)
suspend := make(chan struct{})
suspendDone := make(chan struct{})
go func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var rc *ResultContext
var printRes map[string][]byte
rr, err := c.Build(ctx, so, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
var isFallback bool
var origErr error
for {
if opt.PrintFunc != nil {
if _, ok := req.FrontendOpt["frontend.caps"]; !ok {
req.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests+forward"
} else {
req.FrontendOpt["frontend.caps"] += ",moby.buildkit.frontend.subrequests+forward"
}
req.FrontendOpt["requestid"] = "frontend." + opt.PrintFunc.Name
if isFallback {
req.FrontendOpt["build-arg:BUILDKIT_SYNTAX"] = printFallbackImage
}
}
res, err := c.Solve(ctx, req)
if err != nil {
if origErr != nil {
return nil, err
}
var reqErr *errdefs.UnsupportedSubrequestError
if !isFallback {
if errors.As(err, &reqErr) {
switch reqErr.Name {
case "frontend.outline", "frontend.targets":
isFallback = true
origErr = err
continue
}
return nil, err
}
// buildkit v0.8 vendored in Docker 20.10 does not support typed errors
if strings.Contains(err.Error(), "unsupported request frontend.outline") || strings.Contains(err.Error(), "unsupported request frontend.targets") {
isFallback = true
origErr = err
continue
}
}
}
// TODO: would we want to compute this on demand instead of blocking?
eg, ctx2 := errgroup.WithContext(ctx)
res.EachRef(func(r gateway.Reference) error {
eg.Go(func() error {
return r.Evaluate(ctx2)
})
return nil
})
err = eg.Wait()
if opt.PrintFunc != nil {
printRes = res.Metadata
}
rc = &ResultContext{
gwClient: c,
gwCtx: ctx,
gwDone: cancel,
gwRef: res,
suspend: suspend,
suspendDone: suspendDone,
}
var se *errdefs.SolveError
if errors.As(err, &se) {
rc.gwErr = se
} else if err != nil {
return nil, err
}
rcs <- rc
<-suspend
return res, err
}
}, ch)
<-chdone
if rr != nil {
if rr.ExporterResponse == nil {
rr.ExporterResponse = map[string]string{}
}
for k, v := range printRes {
rr.ExporterResponse[k] = string(v)
}
}
rc.resp = rr
rc.err = err
close(suspendDone)
}()
select {
case <-ctx.Done():
return nil, ctx.Err()
case rc := <-rcs:
return rc, nil
}
}
func Build(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) { func Build(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
return BuildWithResultHandler(ctx, nodes, opt, docker, configDir, w, nil) resp = map[string]*client.SolveResponse{}
rcs, err := BuildResults(ctx, nodes, opt, docker, configDir, w)
if err != nil {
return nil, err
}
eg, ctx := errgroup.WithContext(ctx)
for k, v := range rcs {
k, v := k, v
eg.Go(func() error {
v2, err := v.Wait(ctx)
if err != nil {
return err
}
resp[k] = v2
return nil
})
}
if err := eg.Wait(); err != nil {
return nil, err
}
return resp, nil
} }
func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultContext)) (resp map[string]*client.SolveResponse, err error) { func BuildResults(ctx context.Context, nodes []builder.Node, opt map[string]Options, docker *dockerutil.Client, configDir string, w progress.Writer) (resp map[string]*ResultContext, err error) {
if len(nodes) == 0 { if len(nodes) == 0 {
return nil, errors.Errorf("driver required for build") return nil, errors.Errorf("driver required for build")
} }
@ -708,8 +863,6 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
} }
}() }()
eg, ctx := errgroup.WithContext(ctx)
for k, opt := range opt { for k, opt := range opt {
multiDriver := len(m[k]) > 1 multiDriver := len(m[k]) > 1
hasMobyDriver := false hasMobyDriver := false
@ -785,8 +938,9 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
} }
} }
resp = map[string]*client.SolveResponse{} eg, ctx := errgroup.WithContext(ctx)
var respMu sync.Mutex var respMu sync.Mutex
resp = map[string]*ResultContext{}
results := waitmap.New() results := waitmap.New()
multiTarget := len(opt) > 1 multiTarget := len(opt) > 1
@ -801,15 +955,17 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
if multiTarget { if multiTarget {
span, ctx = tracing.StartSpan(ctx, k) span, ctx = tracing.StartSpan(ctx, k)
} }
baseCtx := ctx
res := make([]*client.SolveResponse, len(dps)) res := make([]*ResultContext, len(dps))
eg2, ctx := errgroup.WithContext(ctx)
var pushNames string var pushNames string
var insecurePush bool var insecurePush bool
wg := sync.WaitGroup{}
for i, dp := range dps { for i, dp := range dps {
wg.Add(1)
i, dp, so := i, dp, *dp.so i, dp, so := i, dp, *dp.so
if multiDriver { if multiDriver {
for i, e := range so.Exports { for i, e := range so.Exports {
@ -842,104 +998,24 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
pw := progress.WithPrefix(w, k, multiTarget) pw := progress.WithPrefix(w, k, multiTarget)
c := clients[dp.driverIndex] c := clients[dp.driverIndex]
eg2.Go(func() error { eg.Go(func() error {
pw = progress.ResetTime(pw) pw = progress.ResetTime(pw)
if err := waitContextDeps(ctx, dp.driverIndex, results, &so); err != nil { if err := waitContextDeps(ctx, dp.driverIndex, results, &so); err != nil {
return err return err
} }
frontendInputs := make(map[string]*pb.Definition) result, err := build(ctx, c, opt, so, pw)
for key, st := range so.FrontendInputs {
def, err := st.Marshal(ctx)
if err != nil { if err != nil {
return err return err
} }
frontendInputs[key] = def.ToPB() results.Set(resultKey(dp.driverIndex, k), result.gwRef)
}
req := gateway.SolveRequest{ res[i] = result
Frontend: so.Frontend, resp[k] = result
FrontendInputs: frontendInputs,
FrontendOpt: make(map[string]string),
}
for k, v := range so.FrontendAttrs {
req.FrontendOpt[k] = v
}
so.Frontend = ""
so.FrontendInputs = nil
ch, done := progress.NewChannel(pw) result.hook(func(ctx context.Context) error {
defer func() { <-done }() defer wg.Done()
cc := c
var printRes map[string][]byte
rr, err := c.Build(ctx, so, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
var isFallback bool
var origErr error
for {
if opt.PrintFunc != nil {
if _, ok := req.FrontendOpt["frontend.caps"]; !ok {
req.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests+forward"
} else {
req.FrontendOpt["frontend.caps"] += ",moby.buildkit.frontend.subrequests+forward"
}
req.FrontendOpt["requestid"] = "frontend." + opt.PrintFunc.Name
if isFallback {
req.FrontendOpt["build-arg:BUILDKIT_SYNTAX"] = printFallbackImage
}
}
res, err := c.Solve(ctx, req)
if err != nil {
if origErr != nil {
return nil, err
}
var reqErr *errdefs.UnsupportedSubrequestError
if !isFallback {
if errors.As(err, &reqErr) {
switch reqErr.Name {
case "frontend.outline", "frontend.targets":
isFallback = true
origErr = err
continue
}
return nil, err
}
// buildkit v0.8 vendored in Docker 20.10 does not support typed errors
if strings.Contains(err.Error(), "unsupported request frontend.outline") || strings.Contains(err.Error(), "unsupported request frontend.targets") {
isFallback = true
origErr = err
continue
}
}
return nil, err
}
if opt.PrintFunc != nil {
printRes = res.Metadata
}
results.Set(resultKey(dp.driverIndex, k), res)
if resultHandleFunc != nil {
resultCtx, err := NewResultContext(cc, so, res)
if err == nil {
resultHandleFunc(dp.driverIndex, resultCtx)
} else {
logrus.Warnf("failed to record result: %s", err)
}
}
return res, nil
}
}, ch)
if err != nil {
return err
}
res[i] = rr
if rr.ExporterResponse == nil {
rr.ExporterResponse = map[string]string{}
}
for k, v := range printRes {
rr.ExporterResponse[k] = string(v)
}
node := nodes[dp.driverIndex].Driver node := nodes[dp.driverIndex].Driver
if node.IsMobyDriver() { if node.IsMobyDriver() {
@ -963,12 +1039,12 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
if err == nil && remoteDigest != "" { if err == nil && remoteDigest != "" {
// old daemons might not have containerimage.config.digest set // old daemons might not have containerimage.config.digest set
// in response so use containerimage.digest value for it if available // in response so use containerimage.digest value for it if available
if _, ok := rr.ExporterResponse[exptypes.ExporterImageConfigDigestKey]; !ok { if _, ok := result.resp.ExporterResponse[exptypes.ExporterImageConfigDigestKey]; !ok {
if v, ok := rr.ExporterResponse[exptypes.ExporterImageDigestKey]; ok { if v, ok := result.resp.ExporterResponse[exptypes.ExporterImageDigestKey]; ok {
rr.ExporterResponse[exptypes.ExporterImageConfigDigestKey] = v result.resp.ExporterResponse[exptypes.ExporterImageConfigDigestKey] = v
} }
} }
rr.ExporterResponse[exptypes.ExporterImageDigestKey] = remoteDigest result.resp.ExporterResponse[exptypes.ExporterImageDigestKey] = remoteDigest
} else if err != nil { } else if err != nil {
return err return err
} }
@ -976,21 +1052,21 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
} }
} }
} }
return nil return nil
}) })
}
eg.Go(func() (err error) { if i == 0 {
ctx := baseCtx result.hook(func(ctx context.Context) error {
wg.Wait()
defer func() { defer func() {
if span != nil { if span != nil {
tracing.FinishWithError(span, err) tracing.FinishWithError(span, err)
} }
}() }()
pw := progress.WithPrefix(w, "default", false) pw := progress.WithPrefix(w, "default", false)
if err := eg2.Wait(); err != nil {
return err
}
respMu.Lock() respMu.Lock()
resp[k] = res[0] resp[k] = res[0]
@ -998,13 +1074,15 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
if len(res) == 1 { if len(res) == 1 {
return nil return nil
} }
if pushNames == "" {
return nil
}
if pushNames != "" {
progress.Write(pw, fmt.Sprintf("merging manifest list %s", pushNames), func() error { progress.Write(pw, fmt.Sprintf("merging manifest list %s", pushNames), func() error {
descs := make([]specs.Descriptor, 0, len(res)) descs := make([]specs.Descriptor, 0, len(res))
for _, r := range res { for _, r := range res {
s, ok := r.ExporterResponse[exptypes.ExporterImageDescriptorKey] s, ok := r.resp.ExporterResponse[exptypes.ExporterImageDescriptorKey]
if ok { if ok {
dt, err := base64.StdEncoding.DecodeString(s) dt, err := base64.StdEncoding.DecodeString(s)
if err != nil { if err != nil {
@ -1021,7 +1099,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
// Note that the mediatype isn't really correct as most of the time it is image manifest and // Note that the mediatype isn't really correct as most of the time it is image manifest and
// not manifest list but actually both are handled because for Docker mediatypes the // not manifest list but actually both are handled because for Docker mediatypes the
// mediatype value in the Accpet header does not seem to matter. // mediatype value in the Accpet header does not seem to matter.
s, ok = r.ExporterResponse[exptypes.ExporterImageDigestKey] s, ok = r.resp.ExporterResponse[exptypes.ExporterImageDigestKey]
if ok { if ok {
descs = append(descs, specs.Descriptor{ descs = append(descs, specs.Descriptor{
Digest: digest.Digest(s), Digest: digest.Digest(s),
@ -1087,7 +1165,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
} }
respMu.Lock() respMu.Lock()
resp[k] = &client.SolveResponse{ resp[k].resp = &client.SolveResponse{
ExporterResponse: map[string]string{ ExporterResponse: map[string]string{
exptypes.ExporterImageDigestKey: desc.Digest.String(), exptypes.ExporterImageDigestKey: desc.Digest.String(),
}, },
@ -1096,10 +1174,13 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s
} }
return nil return nil
}) })
return nil
})
} }
return nil return nil
}) })
} }
}
if err := eg.Wait(); err != nil { if err := eg.Wait(); err != nil {
return nil, err return nil, err

@ -6,7 +6,6 @@ import (
"encoding/json" "encoding/json"
"io" "io"
"sync" "sync"
"sync/atomic"
controllerapi "github.com/docker/buildx/controller/pb" controllerapi "github.com/docker/buildx/controller/pb"
"github.com/moby/buildkit/client" "github.com/moby/buildkit/client"
@ -17,121 +16,63 @@ import (
specs "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
) )
func NewResultContext(c *client.Client, solveOpt client.SolveOpt, res *gateway.Result) (*ResultContext, error) { // ResultContext is a build result with the client that built it.
ctx := context.Background() type ResultContext struct {
def, err := getDefinition(ctx, res) resp *client.SolveResponse
if err != nil { err error
return nil, err suspend chan<- struct{}
} suspendDone <-chan struct{}
return getResultAt(ctx, c, solveOpt, def, nil)
hooks []func(ctx context.Context) error
gwRef *gateway.Result
gwErr *errdefs.SolveError
gwClient gateway.Client
gwCtx context.Context
gwDone func()
gwDoneOnce sync.Once
cleanups []func()
cleanupsMu sync.Mutex
} }
func getDefinition(ctx context.Context, res *gateway.Result) (*pb.Definition, error) { func (r *ResultContext) hook(hook func(ctx context.Context) error) {
ref, err := res.SingleRef() r.hooks = append(r.hooks, hook)
if err != nil {
return nil, err
}
st, err := ref.ToState()
if err != nil {
return nil, err
}
def, err := st.Marshal(ctx)
if err != nil {
return nil, err
}
return def.ToPB(), nil
} }
func getResultAt(ctx context.Context, c *client.Client, solveOpt client.SolveOpt, target *pb.Definition, statusChan chan *client.SolveStatus) (*ResultContext, error) { func (r *ResultContext) Result(ctx context.Context) (*gateway.Result, *errdefs.SolveError) {
ctx, cancel := context.WithCancel(ctx) return r.gwRef, r.gwErr
defer cancel() }
// forward SolveStatus func (r *ResultContext) Wait(ctx context.Context) (*client.SolveResponse, error) {
done := new(atomic.Bool) defer r.Close()
defer done.Store(true)
ch := make(chan *client.SolveStatus)
go func() {
for {
s := <-ch
if s == nil {
return
}
if done.Load() {
// Do not forward if the function returned because statusChan is possibly closed
continue
}
select {
case statusChan <- s:
case <-ctx.Done():
}
}
}()
// get result close(r.suspend)
resultCtxCh := make(chan *ResultContext)
errCh := make(chan error)
go func() {
_, err := c.Build(context.Background(), solveOpt, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
resultCtx := ResultContext{}
res2, err := c.Solve(ctx, gateway.SolveRequest{
Evaluate: true,
Definition: target,
})
if err != nil {
var se *errdefs.SolveError
if errors.As(err, &se) {
resultCtx.solveErr = se
} else {
return nil, err
}
}
// Record the client and ctx as well so that containers can be created from the SolveError.
resultCtx.res = res2
resultCtx.gwClient = c
resultCtx.gwCtx = ctx
resultCtx.gwDone = cancel
select { select {
case resultCtxCh <- &resultCtx:
case <-ctx.Done(): case <-ctx.Done():
return nil, ctx.Err() return nil, ctx.Err()
case <-r.suspendDone:
eg, ctx := errgroup.WithContext(ctx)
eg.Go(func() error {
for _, f := range r.hooks {
if err := f(ctx); err != nil {
return err
} }
<-ctx.Done()
return nil, nil
}, ch)
if err != nil {
errCh <- err
} }
}() return nil
})
select { if err := eg.Wait(); err != nil {
case resultCtx := <-resultCtxCh:
return resultCtx, nil
case err := <-errCh:
return nil, err return nil, err
case <-ctx.Done(): }
return nil, ctx.Err() return r.resp, r.err
} }
} }
// ResultContext is a build result with the client that built it. func (r *ResultContext) Close() {
type ResultContext struct {
res *gateway.Result
solveErr *errdefs.SolveError
gwClient gateway.Client
gwCtx context.Context
gwDone func()
gwDoneOnce sync.Once
cleanups []func()
cleanupsMu sync.Mutex
}
func (r *ResultContext) Done() {
r.gwDoneOnce.Do(func() { r.gwDoneOnce.Do(func() {
r.cleanupsMu.Lock() r.cleanupsMu.Lock()
cleanups := r.cleanups cleanups := r.cleanups
@ -156,16 +97,16 @@ func (r *ResultContext) build(buildFunc gateway.BuildFunc) (err error) {
} }
func (r *ResultContext) getContainerConfig(ctx context.Context, c gateway.Client, cfg *controllerapi.InvokeConfig) (containerCfg gateway.NewContainerRequest, _ error) { func (r *ResultContext) getContainerConfig(ctx context.Context, c gateway.Client, cfg *controllerapi.InvokeConfig) (containerCfg gateway.NewContainerRequest, _ error) {
if r.res != nil && r.solveErr == nil { if r.gwRef != nil && r.gwErr == nil {
logrus.Debugf("creating container from successful build") logrus.Debugf("creating container from successful build")
ccfg, err := containerConfigFromResult(ctx, r.res, c, *cfg) ccfg, err := containerConfigFromResult(ctx, r.gwRef, c, *cfg)
if err != nil { if err != nil {
return containerCfg, err return containerCfg, err
} }
containerCfg = *ccfg containerCfg = *ccfg
} else { } else {
logrus.Debugf("creating container from failed build %+v", cfg) logrus.Debugf("creating container from failed build %+v", cfg)
ccfg, err := containerConfigFromError(r.solveErr, *cfg) ccfg, err := containerConfigFromError(r.gwErr, *cfg)
if err != nil { if err != nil {
return containerCfg, errors.Wrapf(err, "no result nor error is available") return containerCfg, errors.Wrapf(err, "no result nor error is available")
} }
@ -176,14 +117,14 @@ func (r *ResultContext) getContainerConfig(ctx context.Context, c gateway.Client
func (r *ResultContext) getProcessConfig(cfg *controllerapi.InvokeConfig, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) (_ gateway.StartRequest, err error) { func (r *ResultContext) getProcessConfig(cfg *controllerapi.InvokeConfig, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser) (_ gateway.StartRequest, err error) {
processCfg := newStartRequest(stdin, stdout, stderr) processCfg := newStartRequest(stdin, stdout, stderr)
if r.res != nil && r.solveErr == nil { if r.gwRef != nil && r.gwErr == nil {
logrus.Debugf("creating container from successful build") logrus.Debugf("creating container from successful build")
if err := populateProcessConfigFromResult(&processCfg, r.res, *cfg); err != nil { if err := populateProcessConfigFromResult(&processCfg, r.gwRef, *cfg); err != nil {
return processCfg, err return processCfg, err
} }
} else { } else {
logrus.Debugf("creating container from failed build %+v", cfg) logrus.Debugf("creating container from failed build %+v", cfg)
if err := populateProcessConfigFromError(&processCfg, r.solveErr, *cfg); err != nil { if err := populateProcessConfigFromError(&processCfg, r.gwErr, *cfg); err != nil {
return processCfg, err return processCfg, err
} }
} }

@ -8,11 +8,13 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"log"
"os" "os"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strconv" "strconv"
"strings" "strings"
"sync"
"github.com/containerd/console" "github.com/containerd/console"
"github.com/docker/buildx/build" "github.com/docker/buildx/build"
@ -33,10 +35,14 @@ import (
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
dockeropts "github.com/docker/cli/opts" dockeropts "github.com/docker/cli/opts"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/builder/remotecontext/urlutil" "github.com/docker/docker/builder/remotecontext/urlutil"
"github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/ioutils"
"github.com/moby/buildkit/client" "github.com/moby/buildkit/client"
"github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/frontend/subrequests"
"github.com/moby/buildkit/frontend/subrequests/outline"
"github.com/moby/buildkit/frontend/subrequests/targets"
"github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/util/appcontext" "github.com/moby/buildkit/util/appcontext"
"github.com/moby/buildkit/util/grpcerrors" "github.com/moby/buildkit/util/grpcerrors"
@ -106,7 +112,6 @@ func (o *buildOptions) toControllerOptions() (controllerapi.BuildOptions, error)
NetworkMode: o.networkMode, NetworkMode: o.networkMode,
NoCacheFilter: o.noCacheFilter, NoCacheFilter: o.noCacheFilter,
Platforms: o.platforms, Platforms: o.platforms,
PrintFunc: o.printFunc,
ShmSize: int64(o.shmSize), ShmSize: int64(o.shmSize),
Tags: o.tags, Tags: o.tags,
Target: o.target, Target: o.target,
@ -153,6 +158,11 @@ func (o *buildOptions) toControllerOptions() (controllerapi.BuildOptions, error)
} }
} }
opts.PrintFunc, err = parsePrintFunc(o.printFunc)
if err != nil {
return controllerapi.BuildOptions{}, err
}
opts.CacheFrom, err = buildflags.ParseCacheEntry(o.cacheFrom) opts.CacheFrom, err = buildflags.ParseCacheEntry(o.cacheFrom)
if err != nil { if err != nil {
return controllerapi.BuildOptions{}, err return controllerapi.BuildOptions{}, err
@ -231,9 +241,13 @@ func runBuild(dockerCli command.Cli, in buildOptions) error {
return errors.Wrap(err, "removing image ID file") return errors.Wrap(err, "removing image ID file")
} }
} }
resp, _, err := cbuild.RunBuild(ctx, dockerCli, opts, os.Stdin, printer, false) res, err := cbuild.RunBuild(ctx, dockerCli, opts, os.Stdin, printer)
if err1 := printer.Wait(); err == nil { resp, err2 := res.Wait(ctx)
err = err1 if err == nil {
err = err2
}
if err2 := printer.Wait(); err == nil {
err = err2
} }
if err != nil { if err != nil {
return err return err
@ -249,6 +263,16 @@ func runBuild(dockerCli command.Cli, in buildOptions) error {
} }
return os.WriteFile(in.imageIDFile, []byte(dgst), 0644) return os.WriteFile(in.imageIDFile, []byte(dgst), 0644)
} }
if in.metadataFile != "" {
if err := writeMetadataFile(in.metadataFile, decodeExporterResponse(resp.ExporterResponse)); err != nil {
return err
}
}
if opts.PrintFunc != nil {
if err := printResult(opts.PrintFunc, resp.ExporterResponse); err != nil {
return err
}
}
return nil return nil
} }
@ -518,7 +542,7 @@ func updateLastActivity(dockerCli command.Cli, ng *store.NodeGroup) error {
return txn.UpdateLastActivity(ng) return txn.UpdateLastActivity(ng)
} }
func launchControllerAndRunBuild(dockerCli command.Cli, options buildOptions) error { func launchControllerAndRunBuild(dockerCli command.Cli, options buildOptions) (err error) {
ctx := context.TODO() ctx := context.TODO()
if options.invoke != nil && (options.dockerfileName == "-" || options.contextPath == "-") { if options.invoke != nil && (options.dockerfileName == "-" || options.contextPath == "-") {
@ -560,6 +584,14 @@ func launchControllerAndRunBuild(dockerCli command.Cli, options buildOptions) er
if err != nil { if err != nil {
return err return err
} }
printerCloseOnce := sync.Once{}
defer func() {
printerCloseOnce.Do(func() {
if err1 := printer.Wait(); err == nil {
err = err1
}
})
}()
// NOTE: buildx server has the current working directory different from the client // NOTE: buildx server has the current working directory different from the client
// so we need to resolve paths to abosolute ones in the client. // so we need to resolve paths to abosolute ones in the client.
@ -588,11 +620,7 @@ func launchControllerAndRunBuild(dockerCli command.Cli, options buildOptions) er
} }
} }
var resp *client.SolveResponse ref, err = c.Build(ctx, opts, pr, printer)
ref, resp, err = c.Build(ctx, opts, pr, printer)
if err1 := printer.Wait(); err == nil {
err = err1
}
if err != nil { if err != nil {
var be *controllererrors.BuildError var be *controllererrors.BuildError
if errors.As(err, &be) { if errors.As(err, &be) {
@ -610,22 +638,13 @@ func launchControllerAndRunBuild(dockerCli command.Cli, options buildOptions) er
if err := pr.Close(); err != nil { if err := pr.Close(); err != nil {
logrus.Debug("failed to close stdin pipe reader") logrus.Debug("failed to close stdin pipe reader")
} }
if options.quiet {
fmt.Println(resp.ExporterResponse[exptypes.ExporterImageDigestKey])
}
if options.imageIDFile != "" {
dgst := resp.ExporterResponse[exptypes.ExporterImageDigestKey]
if v, ok := resp.ExporterResponse[exptypes.ExporterImageConfigDigestKey]; ok {
dgst = v
}
return os.WriteFile(options.imageIDFile, []byte(dgst), 0644)
}
} }
// post-build operations // post-build operations
if options.invoke != nil && options.invoke.needsMonitor(retErr) { if options.invoke != nil && options.invoke.needsMonitor(retErr) {
// HACK: pause the printer to prevent interference with monitor
printer.Pause(true)
pr2, pw2 := io.Pipe() pr2, pw2 := io.Pipe()
f.SetWriter(pw2, func() io.WriteCloser { f.SetWriter(pw2, func() io.WriteCloser {
pw2.Close() // propagate EOF pw2.Close() // propagate EOF
@ -646,11 +665,40 @@ func launchControllerAndRunBuild(dockerCli command.Cli, options buildOptions) er
if err != nil { if err != nil {
logrus.Warnf("failed to run monitor: %v", err) logrus.Warnf("failed to run monitor: %v", err)
} }
} else {
if err := c.Disconnect(ctx, ref); err != nil { printer.Pause(false)
logrus.Warnf("disconnect error: %v", err) }
resp, err := c.Finalize(ctx, ref)
printerCloseOnce.Do(func() {
if err1 := printer.Wait(); err == nil {
err = err1
}
})
if err != nil {
return err
}
if options.quiet {
fmt.Println(resp.ExporterResponse[exptypes.ExporterImageDigestKey])
}
if options.imageIDFile != "" {
dgst := resp.ExporterResponse[exptypes.ExporterImageDigestKey]
if v, ok := resp.ExporterResponse[exptypes.ExporterImageConfigDigestKey]; ok {
dgst = v
}
return os.WriteFile(options.imageIDFile, []byte(dgst), 0644)
}
if options.metadataFile != "" {
if err := writeMetadataFile(options.metadataFile, decodeExporterResponse(resp.ExporterResponse)); err != nil {
return err
}
}
if opts.PrintFunc != nil {
if err := printResult(opts.PrintFunc, resp.ExporterResponse); err != nil {
return err
} }
} }
return nil return nil
} }
@ -964,3 +1012,65 @@ func printWarnings(w io.Writer, warnings []client.VertexWarning, mode string) {
} }
} }
func printResult(f *controllerapi.PrintFunc, res map[string]string) error {
switch f.Name {
case "outline":
return printValue(outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res)
case "targets":
return printValue(targets.PrintTargets, targets.SubrequestsTargetsDefinition.Version, f.Format, res)
case "subrequests.describe":
return printValue(subrequests.PrintDescribe, subrequests.SubrequestsDescribeDefinition.Version, f.Format, res)
default:
if dt, ok := res["result.txt"]; ok {
fmt.Print(dt)
} else {
log.Printf("%s %+v", f, res)
}
}
return nil
}
type printFunc func([]byte, io.Writer) error
func printValue(printer printFunc, version string, format string, res map[string]string) error {
if format == "json" {
fmt.Fprintln(os.Stdout, res["result.json"])
return nil
}
if res["version"] != "" && versions.LessThan(version, res["version"]) && res["result.txt"] != "" {
// structure is too new and we don't know how to print it
fmt.Fprint(os.Stdout, res["result.txt"])
return nil
}
return printer([]byte(res["result.json"]), os.Stdout)
}
func parsePrintFunc(str string) (*controllerapi.PrintFunc, error) {
if str == "" {
return nil, nil
}
csvReader := csv.NewReader(strings.NewReader(str))
fields, err := csvReader.Read()
if err != nil {
return nil, err
}
f := &controllerapi.PrintFunc{}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) == 2 {
if parts[0] == "format" {
f.Format = parts[1]
} else {
return nil, errors.Errorf("invalid print field: %s", field)
}
} else {
if f.Name != "" {
return nil, errors.Errorf("invalid print value: %s", str)
}
f.Name = field
}
}
return f, nil
}

@ -2,14 +2,10 @@ package build
import ( import (
"context" "context"
"encoding/base64"
"encoding/csv"
"encoding/json"
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"sync"
"github.com/docker/buildx/build" "github.com/docker/buildx/build"
"github.com/docker/buildx/builder" "github.com/docker/buildx/builder"
@ -24,7 +20,6 @@ import (
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/config" "github.com/docker/cli/cli/config"
dockeropts "github.com/docker/cli/opts" dockeropts "github.com/docker/cli/opts"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/go-units" "github.com/docker/go-units"
"github.com/moby/buildkit/client" "github.com/moby/buildkit/client"
"github.com/moby/buildkit/session/auth/authprovider" "github.com/moby/buildkit/session/auth/authprovider"
@ -36,13 +31,9 @@ import (
const defaultTargetName = "default" const defaultTargetName = "default"
// RunBuild runs the specified build and returns the result. // RunBuild runs the specified build and returns the result.
// func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer) (*build.ResultContext, error) {
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext,
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
// inspect the result and debug the cause of that error.
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
if in.NoCache && len(in.NoCacheFilter) > 0 { if in.NoCache && len(in.NoCacheFilter) > 0 {
return nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together") return nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
} }
contexts := map[string]build.NamedContext{} contexts := map[string]build.NamedContext{}
@ -50,11 +41,6 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
contexts[name] = build.NamedContext{Path: path} contexts[name] = build.NamedContext{Path: path}
} }
printFunc, err := parsePrintFunc(in.PrintFunc)
if err != nil {
return nil, nil, err
}
opts := build.Options{ opts := build.Options{
Inputs: build.Inputs{ Inputs: build.Inputs{
ContextPath: in.ContextPath, ContextPath: in.ContextPath,
@ -73,12 +59,11 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
Tags: in.Tags, Tags: in.Tags,
Target: in.Target, Target: in.Target,
Ulimits: controllerUlimitOpt2DockerUlimit(in.Ulimits), Ulimits: controllerUlimitOpt2DockerUlimit(in.Ulimits),
PrintFunc: printFunc,
} }
platforms, err := platformutil.Parse(in.Platforms) platforms, err := platformutil.Parse(in.Platforms)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
opts.Platforms = platforms opts.Platforms = platforms
@ -87,7 +72,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
secrets, err := controllerapi.CreateSecrets(in.Secrets) secrets, err := controllerapi.CreateSecrets(in.Secrets)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
opts.Session = append(opts.Session, secrets) opts.Session = append(opts.Session, secrets)
@ -97,17 +82,17 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
} }
ssh, err := controllerapi.CreateSSH(sshSpecs) ssh, err := controllerapi.CreateSSH(sshSpecs)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
opts.Session = append(opts.Session, ssh) opts.Session = append(opts.Session, ssh)
outputs, err := controllerapi.CreateExports(in.Exports) outputs, err := controllerapi.CreateExports(in.Exports)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
if in.ExportPush { if in.ExportPush {
if in.ExportLoad { if in.ExportLoad {
return nil, nil, errors.Errorf("push and load may not be set together at the moment") return nil, errors.Errorf("push and load may not be set together at the moment")
} }
if len(outputs) == 0 { if len(outputs) == 0 {
outputs = []client.ExportEntry{{ outputs = []client.ExportEntry{{
@ -121,7 +106,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
case "image": case "image":
outputs[0].Attrs["push"] = "true" outputs[0].Attrs["push"] = "true"
default: default:
return nil, nil, errors.Errorf("push and %q output can't be used together", outputs[0].Type) return nil, errors.Errorf("push and %q output can't be used together", outputs[0].Type)
} }
} }
} }
@ -135,12 +120,19 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
switch outputs[0].Type { switch outputs[0].Type {
case "docker": case "docker":
default: default:
return nil, nil, errors.Errorf("load and %q output can't be used together", outputs[0].Type) return nil, errors.Errorf("load and %q output can't be used together", outputs[0].Type)
} }
} }
} }
opts.Exports = outputs opts.Exports = outputs
if in.PrintFunc != nil {
opts.PrintFunc = &build.PrintFunc{
Name: in.PrintFunc.Name,
Format: in.PrintFunc.Format,
}
}
opts.CacheFrom = controllerapi.CreateCaches(in.CacheFrom) opts.CacheFrom = controllerapi.CreateCaches(in.CacheFrom)
opts.CacheTo = controllerapi.CreateCaches(in.CacheTo) opts.CacheTo = controllerapi.CreateCaches(in.CacheTo)
@ -148,7 +140,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
allow, err := buildflags.ParseEntitlements(in.Allow) allow, err := buildflags.ParseEntitlements(in.Allow)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
opts.Allow = allow opts.Allow = allow
@ -164,120 +156,32 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
builder.WithContextPathHash(contextPathHash), builder.WithContextPathHash(contextPathHash),
) )
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil { if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil {
return nil, nil, errors.Wrapf(err, "failed to update builder last activity time") return nil, errors.Wrapf(err, "failed to update builder last activity time")
} }
nodes, err := b.LoadNodes(ctx, false) nodes, err := b.LoadNodes(ctx, false)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
resp, res, err := buildTargets(ctx, dockerCli, b.NodeGroup, nodes, map[string]build.Options{defaultTargetName: opts}, progress, in.MetadataFile, generateResult) res, err := buildTargets(ctx, dockerCli, b.NodeGroup, nodes, map[string]build.Options{defaultTargetName: opts}, progress, in.MetadataFile)
err = wrapBuildError(err, false) err = wrapBuildError(err, false)
if err != nil { if err != nil {
// NOTE: buildTargets can return *build.ResultContext even on error. return nil, err
return nil, res, err
} }
return resp, res, nil return res[defaultTargetName], nil
} }
// buildTargets runs the specified build and returns the result. // buildTargets runs the specified build and returns the result.
// func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, metadataFile string) (map[string]*build.ResultContext, error) {
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext, res, err := build.BuildResults(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress)
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
// inspect the result and debug the cause of that error.
func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, metadataFile string, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
var res *build.ResultContext
var resp map[string]*client.SolveResponse
var err error
if generateResult {
var mu sync.Mutex
var idx int
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultContext) {
mu.Lock()
defer mu.Unlock()
if res == nil || driverIndex < idx {
idx, res = driverIndex, gotRes
}
})
} else {
resp, err = build.Build(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress)
}
if err != nil {
return nil, res, err
}
if len(metadataFile) > 0 && resp != nil {
if err := writeMetadataFile(metadataFile, decodeExporterResponse(resp[defaultTargetName].ExporterResponse)); err != nil {
return nil, nil, err
}
}
for k := range resp {
if opts[k].PrintFunc != nil {
if err := printResult(opts[k].PrintFunc, resp[k].ExporterResponse); err != nil {
return nil, nil, err
}
}
}
return resp[defaultTargetName], res, err
}
func parsePrintFunc(str string) (*build.PrintFunc, error) {
if str == "" {
return nil, nil
}
csvReader := csv.NewReader(strings.NewReader(str))
fields, err := csvReader.Read()
if err != nil { if err != nil {
return nil, err return nil, err
} }
f := &build.PrintFunc{}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) == 2 {
if parts[0] == "format" {
f.Format = parts[1]
} else {
return nil, errors.Errorf("invalid print field: %s", field)
}
} else {
if f.Name != "" {
return nil, errors.Errorf("invalid print value: %s", str)
}
f.Name = field
}
}
return f, nil
}
func writeMetadataFile(filename string, dt interface{}) error { return res, err
b, err := json.MarshalIndent(dt, "", " ")
if err != nil {
return err
}
return ioutils.AtomicWriteFile(filename, b, 0644)
}
func decodeExporterResponse(exporterResponse map[string]string) map[string]interface{} {
out := make(map[string]interface{})
for k, v := range exporterResponse {
dt, err := base64.StdEncoding.DecodeString(v)
if err != nil {
out[k] = v
continue
}
var raw map[string]interface{}
if err = json.Unmarshal(dt, &raw); err != nil || len(raw) == 0 {
out[k] = v
continue
}
out[k] = json.RawMessage(dt)
}
return out
} }
func wrapBuildError(err error, bake bool) error { func wrapBuildError(err error, bake bool) error {

@ -1,48 +0,0 @@
package build
import (
"fmt"
"io"
"log"
"os"
"github.com/docker/buildx/build"
"github.com/docker/docker/api/types/versions"
"github.com/moby/buildkit/frontend/subrequests"
"github.com/moby/buildkit/frontend/subrequests/outline"
"github.com/moby/buildkit/frontend/subrequests/targets"
)
func printResult(f *build.PrintFunc, res map[string]string) error {
switch f.Name {
case "outline":
return printValue(outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res)
case "targets":
return printValue(targets.PrintTargets, targets.SubrequestsTargetsDefinition.Version, f.Format, res)
case "subrequests.describe":
return printValue(subrequests.PrintDescribe, subrequests.SubrequestsDescribeDefinition.Version, f.Format, res)
default:
if dt, ok := res["result.txt"]; ok {
fmt.Print(dt)
} else {
log.Printf("%s %+v", f, res)
}
}
return nil
}
type printFunc func([]byte, io.Writer) error
func printValue(printer printFunc, version string, format string, res map[string]string) error {
if format == "json" {
fmt.Fprintln(os.Stdout, res["result.json"])
return nil
}
if res["version"] != "" && versions.LessThan(version, res["version"]) && res["result.txt"] != "" {
// structure is too new and we don't know how to print it
fmt.Fprint(os.Stdout, res["result.txt"])
return nil
}
return printer([]byte(res["result.json"]), os.Stdout)
}

@ -10,19 +10,23 @@ import (
) )
type BuildxController interface { type BuildxController interface {
Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, err error) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, error)
// Invoke starts an IO session into the specified process. // Invoke starts an IO session into the specified process.
// If pid doesn't matche to any running processes, it starts a new process with the specified config. // If pid doesn't matche to any running processes, it starts a new process with the specified config.
// If there is no container running or InvokeConfig.Rollback is speicfied, the process will start in a newly created container. // If there is no container running or InvokeConfig.Rollback is speicfied, the process will start in a newly created container.
// NOTE: If needed, in the future, we can split this API into three APIs (NewContainer, NewProcess and Attach). // NOTE: If needed, in the future, we can split this API into three APIs (NewContainer, NewProcess and Attach).
Invoke(ctx context.Context, ref, pid string, options controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error Invoke(ctx context.Context, ref, pid string, options controllerapi.InvokeConfig, ioIn io.ReadCloser, ioOut io.WriteCloser, ioErr io.WriteCloser) error
Kill(ctx context.Context) error Finalize(ctx context.Context, ref string) (*client.SolveResponse, error)
Close() error
List(ctx context.Context) (refs []string, _ error)
Disconnect(ctx context.Context, ref string) error Disconnect(ctx context.Context, ref string) error
ListProcesses(ctx context.Context, ref string) (infos []*controllerapi.ProcessInfo, retErr error)
DisconnectProcess(ctx context.Context, ref, pid string) error
Inspect(ctx context.Context, ref string) (*controllerapi.InspectResponse, error) Inspect(ctx context.Context, ref string) (*controllerapi.InspectResponse, error)
List(ctx context.Context) ([]string, error)
ListProcesses(ctx context.Context, ref string) ([]*controllerapi.ProcessInfo, error)
DisconnectProcess(ctx context.Context, ref, pid string) error
Kill(ctx context.Context) error
Close() error
} }
type ControlOptions struct { type ControlOptions struct {

@ -42,27 +42,27 @@ type localController struct {
buildOnGoing atomic.Bool buildOnGoing atomic.Bool
} }
func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, error) { func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, error) {
if !b.buildOnGoing.CompareAndSwap(false, true) { if !b.buildOnGoing.CompareAndSwap(false, true) {
return "", nil, errors.New("build ongoing") return "", errors.New("build ongoing")
} }
defer b.buildOnGoing.Store(false) defer b.buildOnGoing.Store(false)
resp, res, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progress, true) res, err := cbuild.RunBuild(ctx, b.dockerCli, options, in, progress)
// NOTE: RunBuild can return *build.ResultContext even on error. if err != nil {
return "", err
}
if res != nil { if res != nil {
b.buildConfig = buildConfig{ b.buildConfig = buildConfig{
resultCtx: res, resultCtx: res,
buildOptions: &options, buildOptions: &options,
} }
_, buildErr := b.buildConfig.resultCtx.Result(ctx)
if buildErr != nil { if buildErr != nil {
buildErr = controllererrors.WrapBuild(buildErr, b.ref) return "", controllererrors.WrapBuild(buildErr, b.ref)
}
} }
if buildErr != nil {
return "", nil, buildErr
} }
return b.ref, resp, nil return b.ref, nil
} }
func (b *localController) ListProcesses(ctx context.Context, ref string) (infos []*controllerapi.ProcessInfo, retErr error) { func (b *localController) ListProcesses(ctx context.Context, ref string) (infos []*controllerapi.ProcessInfo, retErr error) {
@ -123,7 +123,7 @@ func (b *localController) Kill(context.Context) error {
func (b *localController) Close() error { func (b *localController) Close() error {
b.cancelRunningProcesses() b.cancelRunningProcesses()
if b.buildConfig.resultCtx != nil { if b.buildConfig.resultCtx != nil {
b.buildConfig.resultCtx.Done() b.buildConfig.resultCtx.Close()
} }
// TODO: cancel ongoing builds? // TODO: cancel ongoing builds?
return nil return nil
@ -133,9 +133,17 @@ func (b *localController) List(ctx context.Context) (res []string, _ error) {
return []string{b.ref}, nil return []string{b.ref}, nil
} }
func (b *localController) Finalize(ctx context.Context, key string) (*client.SolveResponse, error) {
b.cancelRunningProcesses()
if b.buildConfig.resultCtx != nil {
return b.buildConfig.resultCtx.Wait(ctx)
}
return nil, nil
}
func (b *localController) Disconnect(ctx context.Context, key string) error { func (b *localController) Disconnect(ctx context.Context, key string) error {
b.Close() return b.Close()
return nil
} }
func (b *localController) Inspect(ctx context.Context, ref string) (*controllerapi.InspectResponse, error) { func (b *localController) Inspect(ctx context.Context, ref string) (*controllerapi.InspectResponse, error) {

File diff suppressed because it is too large Load Diff

@ -7,14 +7,19 @@ import "github.com/moby/buildkit/api/services/control/control.proto";
option go_package = "pb"; option go_package = "pb";
service Controller { service Controller {
rpc Info(InfoRequest) returns (InfoResponse);
rpc Build(BuildRequest) returns (BuildResponse); rpc Build(BuildRequest) returns (BuildResponse);
rpc Inspect(InspectRequest) returns (InspectResponse); rpc Finalize(FinalizeRequest) returns (FinalizeResponse);
rpc Status(StatusRequest) returns (stream StatusResponse);
rpc Input(stream InputMessage) returns (InputResponse);
rpc Invoke(stream Message) returns (stream Message); rpc Invoke(stream Message) returns (stream Message);
rpc List(ListRequest) returns (ListResponse);
rpc Disconnect(DisconnectRequest) returns (DisconnectResponse); rpc Disconnect(DisconnectRequest) returns (DisconnectResponse);
rpc Info(InfoRequest) returns (InfoResponse); rpc Status(StatusRequest) returns (stream StatusResponse);
rpc Inspect(InspectRequest) returns (InspectResponse);
rpc List(ListRequest) returns (ListResponse);
rpc Input(stream InputMessage) returns (InputResponse);
rpc ListProcesses(ListProcessesRequest) returns (ListProcessesResponse); rpc ListProcesses(ListProcessesRequest) returns (ListProcessesResponse);
rpc DisconnectProcess(DisconnectProcessRequest) returns (DisconnectProcessResponse); rpc DisconnectProcess(DisconnectProcessRequest) returns (DisconnectProcessResponse);
} }
@ -48,7 +53,7 @@ message BuildRequest {
message BuildOptions { message BuildOptions {
string ContextPath = 1; string ContextPath = 1;
string DockerfileName = 2; string DockerfileName = 2;
string PrintFunc = 3; PrintFunc PrintFunc = 3;
map<string, string> NamedContexts = 4; map<string, string> NamedContexts = 4;
repeated string Allow = 5; repeated string Allow = 5;
@ -78,6 +83,19 @@ message BuildOptions {
bool ExportLoad = 28; bool ExportLoad = 28;
} }
message FinalizeRequest {
string Ref = 1;
}
message FinalizeResponse {
map<string, string> ExporterResponse = 1;
}
message PrintFunc {
string Name = 1;
string Format = 2;
}
message ExportEntry { message ExportEntry {
string Type = 1; string Type = 1;
map<string, string> Attrs = 2; map<string, string> Attrs = 2;
@ -125,7 +143,6 @@ message Ulimit {
} }
message BuildResponse { message BuildResponse {
map<string, string> ExporterResponse = 1;
} }
message DisconnectRequest { message DisconnectRequest {

@ -113,49 +113,30 @@ func (c *Client) Inspect(ctx context.Context, ref string) (*pb.InspectResponse,
return c.client().Inspect(ctx, &pb.InspectRequest{Ref: ref}) return c.client().Inspect(ctx, &pb.InspectRequest{Ref: ref})
} }
func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, error) { func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, error) {
ref := identity.NewID() ref := identity.NewID()
statusChan := make(chan *client.SolveStatus) if err := c.build(ctx, ref, options, in, progress); err != nil {
eg, egCtx := errgroup.WithContext(ctx) return "", err
var resp *client.SolveResponse
eg.Go(func() error {
defer close(statusChan)
var err error
resp, err = c.build(egCtx, ref, options, in, statusChan)
return err
})
eg.Go(func() error {
for s := range statusChan {
st := s
progress.Write(st)
} }
return nil return ref, nil
}) }
return ref, resp, eg.Wait()
func (c *Client) Finalize(ctx context.Context, ref string) (*client.SolveResponse, error) {
resp, err := c.client().Finalize(ctx, &pb.FinalizeRequest{Ref: ref})
if err != nil {
return nil, err
}
return &client.SolveResponse{
ExporterResponse: resp.ExporterResponse,
}, nil
} }
func (c *Client) build(ctx context.Context, ref string, options pb.BuildOptions, in io.ReadCloser, statusChan chan *client.SolveStatus) (*client.SolveResponse, error) { func (c *Client) build(ctx context.Context, ref string, options pb.BuildOptions, in io.ReadCloser, writer progress.Writer) error {
eg, egCtx := errgroup.WithContext(ctx) eg, egCtx := errgroup.WithContext(ctx)
done := make(chan struct{}) done := make(chan struct{})
var resp *client.SolveResponse go func() error {
stream, err := c.client().Status(ctx, &pb.StatusRequest{
eg.Go(func() error {
defer close(done)
pbResp, err := c.client().Build(egCtx, &pb.BuildRequest{
Ref: ref,
Options: &options,
})
if err != nil {
return err
}
resp = &client.SolveResponse{
ExporterResponse: pbResp.ExporterResponse,
}
return nil
})
eg.Go(func() error {
stream, err := c.client().Status(egCtx, &pb.StatusRequest{
Ref: ref, Ref: ref,
}) })
if err != nil { if err != nil {
@ -169,8 +150,20 @@ func (c *Client) build(ctx context.Context, ref string, options pb.BuildOptions,
} }
return errors.Wrap(err, "failed to receive status") return errors.Wrap(err, "failed to receive status")
} }
statusChan <- pb.FromControlStatus(resp) writer.Write(pb.FromControlStatus(resp))
}
}()
eg.Go(func() error {
defer close(done)
_, err := c.client().Build(egCtx, &pb.BuildRequest{
Ref: ref,
Options: &options,
})
if err != nil {
return err
} }
return nil
}) })
if in != nil { if in != nil {
eg.Go(func() error { eg.Go(func() error {
@ -232,7 +225,7 @@ func (c *Client) build(ctx context.Context, ref string, options pb.BuildOptions,
return eg2.Wait() return eg2.Wait()
}) })
} }
return resp, eg.Wait() return eg.Wait()
} }
func (c *Client) client() pb.ControllerClient { func (c *Client) client() pb.ControllerClient {

@ -24,7 +24,6 @@ import (
"github.com/docker/buildx/util/progress" "github.com/docker/buildx/util/progress"
"github.com/docker/buildx/version" "github.com/docker/buildx/version"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/util/grpcerrors" "github.com/moby/buildkit/util/grpcerrors"
"github.com/pelletier/go-toml" "github.com/pelletier/go-toml"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -143,8 +142,8 @@ func serveCmd(dockerCli command.Cli) *cobra.Command {
}() }()
// prepare server // prepare server
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultContext, error) { b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*build.ResultContext, error) {
return cbuild.RunBuild(ctx, dockerCli, *options, stdin, progress, true) return cbuild.RunBuild(ctx, dockerCli, *options, stdin, progress)
}) })
defer b.Close() defer b.Close()

@ -4,7 +4,6 @@ import (
"context" "context"
"io" "io"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/docker/buildx/build" "github.com/docker/buildx/build"
@ -14,12 +13,11 @@ import (
"github.com/docker/buildx/util/ioset" "github.com/docker/buildx/util/ioset"
"github.com/docker/buildx/util/progress" "github.com/docker/buildx/util/progress"
"github.com/docker/buildx/version" "github.com/docker/buildx/version"
"github.com/moby/buildkit/client"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultContext, err error) type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (res *build.ResultContext, err error)
func NewServer(buildFunc BuildFunc) *Server { func NewServer(buildFunc BuildFunc) *Server {
return &Server{ return &Server{
@ -34,7 +32,6 @@ type Server struct {
} }
type session struct { type session struct {
buildOnGoing atomic.Bool
statusChan chan *pb.StatusResponse statusChan chan *pb.StatusResponse
cancelBuild func() cancelBuild func()
buildOptions *pb.BuildOptions buildOptions *pb.BuildOptions
@ -101,28 +98,6 @@ func (m *Server) List(ctx context.Context, req *pb.ListRequest) (res *pb.ListRes
}, nil }, nil
} }
func (m *Server) Disconnect(ctx context.Context, req *pb.DisconnectRequest) (res *pb.DisconnectResponse, err error) {
key := req.Ref
if key == "" {
return nil, errors.New("disconnect: empty key")
}
m.sessionMu.Lock()
if s, ok := m.session[key]; ok {
if s.cancelBuild != nil {
s.cancelBuild()
}
s.cancelRunningProcesses()
if s.result != nil {
s.result.Done()
}
}
delete(m.session, key)
m.sessionMu.Unlock()
return &pb.DisconnectResponse{}, nil
}
func (m *Server) Close() error { func (m *Server) Close() error {
m.sessionMu.Lock() m.sessionMu.Lock()
for k := range m.session { for k := range m.session {
@ -167,15 +142,10 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
} }
s, ok := m.session[ref] s, ok := m.session[ref]
if ok { if ok {
if !s.buildOnGoing.CompareAndSwap(false, true) {
m.sessionMu.Unlock()
return &pb.BuildResponse{}, errors.New("build ongoing")
}
s.cancelRunningProcesses() s.cancelRunningProcesses()
s.result = nil s.result = nil
} else { } else {
s = &session{} s = &session{}
s.buildOnGoing.Store(true)
} }
s.processes = processes.NewManager() s.processes = processes.NewManager()
@ -186,33 +156,24 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
s.inputPipe = inW s.inputPipe = inW
m.session[ref] = s m.session[ref] = s
m.sessionMu.Unlock() m.sessionMu.Unlock()
defer func() {
close(statusChan)
m.sessionMu.Lock()
s, ok := m.session[ref]
if ok {
s.statusChan = nil
s.buildOnGoing.Store(false)
}
m.sessionMu.Unlock()
}()
pw := pb.NewProgressWriter(statusChan) pw := pb.NewProgressWriter(statusChan)
// Build the specified request // Build the specified request
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
resp, res, buildErr := m.buildFunc(ctx, req.Options, inR, pw) res, err := m.buildFunc(ctx, req.Options, inR, pw)
m.sessionMu.Lock() m.sessionMu.Lock()
if s, ok := m.session[ref]; ok { if s, ok := m.session[ref]; ok {
// NOTE: buildFunc can return *build.ResultContext even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild).
if res != nil { if res != nil {
s.result = res s.result = res
s.cancelBuild = cancel s.cancelBuild = cancel
s.buildOptions = req.Options s.buildOptions = req.Options
m.session[ref] = s m.session[ref] = s
_, buildErr := s.result.Result(ctx)
if buildErr != nil { if buildErr != nil {
buildErr = controllererrors.WrapBuild(buildErr, ref) err = controllererrors.WrapBuild(buildErr, ref)
} }
} }
} else { } else {
@ -221,18 +182,69 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
} }
m.sessionMu.Unlock() m.sessionMu.Unlock()
if buildErr != nil { if err != nil {
return nil, buildErr return nil, err
}
return &pb.BuildResponse{}, nil
}
func (m *Server) Finalize(ctx context.Context, req *pb.FinalizeRequest) (*pb.FinalizeResponse, error) {
key := req.Ref
if key == "" {
return nil, errors.New("finalize: empty key")
} }
if resp == nil { m.sessionMu.Lock()
resp = &client.SolveResponse{} defer m.sessionMu.Unlock()
s, ok := m.session[key]
if !ok {
return nil, errors.Errorf("unknown ref %q", key)
}
if s.result == nil {
return nil, errors.Errorf("no build ongoing for %q", key)
} }
return &pb.BuildResponse{ resp, err := s.result.Wait(ctx)
if err != nil {
return nil, err
}
if s.cancelBuild != nil {
s.cancelBuild()
}
s.cancelRunningProcesses()
close(s.statusChan)
delete(m.session, key)
return &pb.FinalizeResponse{
ExporterResponse: resp.ExporterResponse, ExporterResponse: resp.ExporterResponse,
}, nil }, nil
} }
func (m *Server) Disconnect(ctx context.Context, req *pb.DisconnectRequest) (res *pb.DisconnectResponse, err error) {
key := req.Ref
if key == "" {
return nil, errors.New("disconnect: empty key")
}
m.sessionMu.Lock()
defer m.sessionMu.Unlock()
if s, ok := m.session[key]; ok {
if s.cancelBuild != nil {
s.cancelBuild()
}
close(s.statusChan)
s.cancelRunningProcesses()
if s.result != nil {
s.result.Close()
}
}
delete(m.session, key)
return &pb.DisconnectResponse{}, nil
}
func (m *Server) Status(req *pb.StatusRequest, stream pb.Controller_StatusServer) error { func (m *Server) Status(req *pb.StatusRequest, stream pb.Controller_StatusServer) error {
ref := req.Ref ref := req.Ref
if ref == "" { if ref == "" {

@ -38,11 +38,6 @@ Available commands are:
// RunMonitor provides an interactive session for running and managing containers via specified IO. // RunMonitor provides an interactive session for running and managing containers via specified IO.
func RunMonitor(ctx context.Context, curRef string, options *controllerapi.BuildOptions, invokeConfig controllerapi.InvokeConfig, c control.BuildxController, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File, progress progress.Writer) error { func RunMonitor(ctx context.Context, curRef string, options *controllerapi.BuildOptions, invokeConfig controllerapi.InvokeConfig, c control.BuildxController, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File, progress progress.Writer) error {
defer func() {
if err := c.Disconnect(ctx, curRef); err != nil {
logrus.Warnf("disconnect error: %v", err)
}
}()
monitorIn, monitorOut := ioset.Pipe() monitorIn, monitorOut := ioset.Pipe()
defer func() { defer func() {
monitorIn.Close() monitorIn.Close()
@ -146,7 +141,7 @@ func RunMonitor(ctx context.Context, curRef string, options *controllerapi.Build
} }
} }
var resultUpdated bool var resultUpdated bool
ref, _, err := c.Build(ctx, *bo, nil, progress) // TODO: support stdin, hold build ref ref, err := c.Build(ctx, *bo, nil, progress) // TODO: support stdin, hold build ref
if err != nil { if err != nil {
var be *controllererrors.BuildError var be *controllererrors.BuildError
if errors.As(err, &be) { if errors.As(err, &be) {

@ -29,6 +29,7 @@ type Printer struct {
warnings []client.VertexWarning warnings []client.VertexWarning
logMu sync.Mutex logMu sync.Mutex
logSourceMap map[digest.Digest]interface{} logSourceMap map[digest.Digest]interface{}
paused *bool
} }
func (p *Printer) Wait() error { func (p *Printer) Wait() error {
@ -45,6 +46,10 @@ func (p *Printer) Warnings() []client.VertexWarning {
return p.warnings return p.warnings
} }
func (p *Printer) Pause(v bool) {
*p.paused = v
}
func (p *Printer) ValidateLogSource(dgst digest.Digest, v interface{}) bool { func (p *Printer) ValidateLogSource(dgst digest.Digest, v interface{}) bool {
p.logMu.Lock() p.logMu.Lock()
defer p.logMu.Unlock() defer p.logMu.Unlock()
@ -74,10 +79,12 @@ func NewPrinter(ctx context.Context, w io.Writer, out console.File, mode string,
statusCh := make(chan *client.SolveStatus) statusCh := make(chan *client.SolveStatus)
doneCh := make(chan struct{}) doneCh := make(chan struct{})
paused := false
pw := &Printer{ pw := &Printer{
status: statusCh, status: statusCh,
done: doneCh, done: doneCh,
logSourceMap: map[digest.Digest]interface{}{}, logSourceMap: map[digest.Digest]interface{}{},
paused: &paused,
} }
if v := os.Getenv("BUILDKIT_PROGRESS"); v != "" && mode == PrinterModeAuto { if v := os.Getenv("BUILDKIT_PROGRESS"); v != "" && mode == PrinterModeAuto {
@ -101,7 +108,7 @@ func NewPrinter(ctx context.Context, w io.Writer, out console.File, mode string,
go func() { go func() {
resumeLogs := logutil.Pause(logrus.StandardLogger()) resumeLogs := logutil.Pause(logrus.StandardLogger())
// not using shared context to not disrupt display but let is finish reporting errors // not using shared context to not disrupt display but let is finish reporting errors
pw.warnings, pw.err = progressui.DisplaySolveStatus(ctx, c, w, statusCh, solveStatusOpt...) pw.warnings, pw.err = progressui.DisplaySolveStatus(ctx, c, w, statusCh, pw.paused, solveStatusOpt...)
resumeLogs() resumeLogs()
close(doneCh) close(doneCh)
}() }()

@ -42,7 +42,7 @@ func WithDesc(text string, console string) DisplaySolveStatusOpt {
} }
} }
func DisplaySolveStatus(ctx context.Context, c console.Console, w io.Writer, ch chan *client.SolveStatus, opts ...DisplaySolveStatusOpt) ([]client.VertexWarning, error) { func DisplaySolveStatus(ctx context.Context, c console.Console, w io.Writer, ch chan *client.SolveStatus, paused *bool, opts ...DisplaySolveStatusOpt) ([]client.VertexWarning, error) {
modeConsole := c != nil modeConsole := c != nil
dsso := &displaySolveStatusOpts{} dsso := &displaySolveStatusOpts{}
@ -78,6 +78,11 @@ func DisplaySolveStatus(ctx context.Context, c console.Console, w io.Writer, ch
displayLimiter := rate.NewLimiter(rate.Every(displayTimeout), 1) displayLimiter := rate.NewLimiter(rate.Every(displayTimeout), 1)
ispaused := false
if paused != nil {
ispaused = *paused
}
var height int var height int
width, _ := disp.getSize() width, _ := disp.getSize()
for { for {
@ -93,6 +98,10 @@ func DisplaySolveStatus(ctx context.Context, c console.Console, w io.Writer, ch
} }
} }
if paused != nil && *paused && ispaused {
continue
}
if modeConsole { if modeConsole {
width, height = disp.getSize() width, height = disp.getSize()
if done { if done {
@ -115,6 +124,10 @@ func DisplaySolveStatus(ctx context.Context, c console.Console, w io.Writer, ch
ticker = time.NewTicker(tickerTimeout) ticker = time.NewTicker(tickerTimeout)
} }
} }
if paused != nil {
ispaused = *paused
}
} }
} }

@ -87,7 +87,7 @@ func NewPrinter(ctx context.Context, out console.File, mode string) (Writer, err
go func() { go func() {
// not using shared context to not disrupt display but let is finish reporting errors // not using shared context to not disrupt display but let is finish reporting errors
_, pw.err = progressui.DisplaySolveStatus(ctx, c, out, statusCh) _, pw.err = progressui.DisplaySolveStatus(ctx, c, out, statusCh, nil)
close(doneCh) close(doneCh)
}() }()
return pw, nil return pw, nil

Loading…
Cancel
Save