controller: refactor progress api

Refactor the progress printer creation to the caller-side of the
controller api. Then, instead of passing around status channels, we can
simply pass around the higher level interface progress.Writer, which
allows us to correctly pull warnings out of the remote controller.

Signed-off-by: Justin Chadwell <me@jedevc.com>
pull/1750/head
Justin Chadwell 2 years ago
parent 3d49bbd23a
commit 1fd23933c2

@ -1,6 +1,7 @@
package commands
import (
"bytes"
"context"
"encoding/base64"
"encoding/csv"
@ -15,6 +16,7 @@ import (
"github.com/containerd/console"
"github.com/docker/buildx/build"
"github.com/docker/buildx/builder"
"github.com/docker/buildx/controller"
cbuild "github.com/docker/buildx/controller/build"
"github.com/docker/buildx/controller/control"
@ -35,8 +37,11 @@ import (
"github.com/docker/docker/pkg/ioutils"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/util/appcontext"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/moby/buildkit/util/progress/progressui"
"github.com/morikuni/aec"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@ -200,7 +205,22 @@ func runBuild(dockerCli command.Cli, in buildOptions) error {
if err != nil {
return err
}
progress, err := in.toProgress()
progressMode, err := in.toProgress()
if err != nil {
return err
}
b, err := builder.New(dockerCli,
builder.WithName(opts.Builder),
builder.WithContextPathHash(opts.ContextPath),
)
if err != nil {
return err
}
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, progressMode, progressui.WithDesc(
fmt.Sprintf("building with %q instance using %s driver", b.Name, b.Driver),
fmt.Sprintf("%s:%s", b.Driver, b.Name),
))
if err != nil {
return err
}
@ -211,10 +231,14 @@ func runBuild(dockerCli command.Cli, in buildOptions) error {
return errors.Wrap(err, "removing image ID file")
}
}
resp, _, err := cbuild.RunBuild(ctx, dockerCli, opts, os.Stdin, progress, nil, false)
resp, _, err := cbuild.RunBuild(ctx, dockerCli, opts, os.Stdin, printer, false)
if err1 := printer.Wait(); err == nil {
err = err1
}
if err != nil {
return err
}
printWarnings(os.Stderr, printer.Warnings(), progressMode)
if in.quiet {
fmt.Println(resp.ExporterResponse[exptypes.ExporterImageDigestKey])
}
@ -517,7 +541,22 @@ func launchControllerAndRunBuild(dockerCli command.Cli, options buildOptions) er
if err != nil {
return err
}
progress, err := options.toProgress()
progressMode, err := options.toProgress()
if err != nil {
return err
}
b, err := builder.New(dockerCli,
builder.WithName(opts.Builder),
builder.WithContextPathHash(opts.ContextPath),
)
if err != nil {
return err
}
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, progressMode, progressui.WithDesc(
fmt.Sprintf("building with %q instance using %s driver", b.Name, b.Driver),
fmt.Sprintf("%s:%s", b.Driver, b.Name),
))
if err != nil {
return err
}
@ -550,7 +589,10 @@ func launchControllerAndRunBuild(dockerCli command.Cli, options buildOptions) er
}
var resp *client.SolveResponse
ref, resp, err = c.Build(ctx, opts, pr, os.Stdout, os.Stderr, progress)
ref, resp, err = c.Build(ctx, opts, pr, printer)
if err1 := printer.Wait(); err == nil {
err = err1
}
if err != nil {
var be *controllererrors.BuildError
if errors.As(err, &be) {
@ -561,6 +603,7 @@ func launchControllerAndRunBuild(dockerCli command.Cli, options buildOptions) er
return errors.Wrapf(err, "failed to build")
}
}
printWarnings(os.Stderr, printer.Warnings(), progressMode)
if err := pw.Close(); err != nil {
logrus.Debug("failed to close stdin pipe writer")
}
@ -595,7 +638,7 @@ func launchControllerAndRunBuild(dockerCli command.Cli, options buildOptions) er
}
return errors.Errorf("failed to configure terminal: %v", err)
}
err = monitor.RunMonitor(ctx, ref, &opts, options.invoke.InvokeConfig, c, progress, pr2, os.Stdout, os.Stderr)
err = monitor.RunMonitor(ctx, ref, &opts, options.invoke.InvokeConfig, c, pr2, os.Stdout, os.Stderr, printer)
con.Reset()
if err := pw2.Close(); err != nil {
logrus.Debug("failed to close monitor stdin pipe reader")
@ -881,3 +924,43 @@ func resolvePaths(options *controllerapi.BuildOptions) (_ *controllerapi.BuildOp
return options, nil
}
func printWarnings(w io.Writer, warnings []client.VertexWarning, mode string) {
if len(warnings) == 0 || mode == progress.PrinterModeQuiet {
return
}
fmt.Fprintf(w, "\n ")
sb := &bytes.Buffer{}
if len(warnings) == 1 {
fmt.Fprintf(sb, "1 warning found")
} else {
fmt.Fprintf(sb, "%d warnings found", len(warnings))
}
if logrus.GetLevel() < logrus.DebugLevel {
fmt.Fprintf(sb, " (use --debug to expand)")
}
fmt.Fprintf(sb, ":\n")
fmt.Fprint(w, aec.Apply(sb.String(), aec.YellowF))
for _, warn := range warnings {
fmt.Fprintf(w, " - %s\n", warn.Short)
if logrus.GetLevel() < logrus.DebugLevel {
continue
}
for _, d := range warn.Detail {
fmt.Fprintf(w, "%s\n", d)
}
if warn.URL != "" {
fmt.Fprintf(w, "More info: %s\n", warn.URL)
}
if warn.SourceInfo != nil && warn.Range != nil {
src := errdefs.Source{
Info: warn.SourceInfo,
Ranges: warn.Range,
}
src.Print(w)
}
fmt.Fprintf(w, "\n")
}
}

@ -10,6 +10,7 @@ import (
"github.com/docker/buildx/controller/control"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/monitor"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -18,7 +19,7 @@ import (
func debugShellCmd(dockerCli command.Cli) *cobra.Command {
var options control.ControlOptions
var progress string
var progressMode string
cmd := &cobra.Command{
Use: "debug-shell",
@ -38,9 +39,15 @@ func debugShellCmd(dockerCli command.Cli) *cobra.Command {
if err := con.SetRaw(); err != nil {
return errors.Errorf("failed to configure terminal: %v", err)
}
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, progressMode)
if err != nil {
return err
}
err = monitor.RunMonitor(ctx, "", nil, controllerapi.InvokeConfig{
Tty: true,
}, c, progress, os.Stdin, os.Stdout, os.Stderr)
}, c, os.Stdin, os.Stdout, os.Stderr, printer)
con.Reset()
return err
},
@ -51,7 +58,7 @@ func debugShellCmd(dockerCli command.Cli) *cobra.Command {
flags.StringVar(&options.Root, "root", "", "Specify root directory of server to connect [experimental]")
flags.BoolVar(&options.Detach, "detach", runtime.GOOS == "linux", "Detach buildx server (supported only on linux) [experimental]")
flags.StringVar(&options.ServerConfig, "server-config", "", "Specify buildx server config file (used only when launching new server) [experimental]")
flags.StringVar(&progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
flags.StringVar(&progressMode, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
return cmd
}

@ -1,12 +1,10 @@
package build
import (
"bytes"
"context"
"encoding/base64"
"encoding/csv"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
@ -30,12 +28,8 @@ import (
"github.com/docker/go-units"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/session/auth/authprovider"
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/moby/buildkit/util/progress/progressui"
"github.com/morikuni/aec"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
)
@ -46,7 +40,7 @@ const defaultTargetName = "default"
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext,
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
// inspect the result and debug the cause of that error.
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progressMode string, statusChan chan *client.SolveStatus, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.BuildOptions, inStream io.Reader, progress progress.Writer, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
if in.NoCache && len(in.NoCacheFilter) > 0 {
return nil, nil, errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
}
@ -164,6 +158,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
contextPathHash = in.ContextPath
}
// TODO: this should not be loaded this side of the controller api
b, err := builder.New(dockerCli,
builder.WithName(in.Builder),
builder.WithContextPathHash(contextPathHash),
@ -179,7 +174,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
return nil, nil, err
}
resp, res, err := buildTargets(ctx, dockerCli, b.NodeGroup, nodes, map[string]build.Options{defaultTargetName: opts}, progressMode, in.MetadataFile, statusChan, generateResult)
resp, res, err := buildTargets(ctx, dockerCli, b.NodeGroup, nodes, map[string]build.Options{defaultTargetName: opts}, progress, in.MetadataFile, generateResult)
err = wrapBuildError(err, false)
if err != nil {
// NOTE: buildTargets can return *build.ResultContext even on error.
@ -193,24 +188,14 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build
// NOTE: When an error happens during the build and this function acquires the debuggable *build.ResultContext,
// this function returns it in addition to the error (i.e. it does "return nil, res, err"). The caller can
// inspect the result and debug the cause of that error.
func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progressMode string, metadataFile string, statusChan chan *client.SolveStatus, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
ctx2, cancel := context.WithCancel(context.TODO())
defer cancel()
printer, err := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, progressMode, progressui.WithDesc(
fmt.Sprintf("building with %q instance using %s driver", ng.Name, ng.Driver),
fmt.Sprintf("%s:%s", ng.Driver, ng.Name),
))
if err != nil {
return nil, nil, err
}
func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, nodes []builder.Node, opts map[string]build.Options, progress progress.Writer, metadataFile string, generateResult bool) (*client.SolveResponse, *build.ResultContext, error) {
var res *build.ResultContext
var resp map[string]*client.SolveResponse
var err error
if generateResult {
var mu sync.Mutex
var idx int
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress.Tee(printer, statusChan), func(driverIndex int, gotRes *build.ResultContext) {
resp, err = build.BuildWithResultHandler(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress, func(driverIndex int, gotRes *build.ResultContext) {
mu.Lock()
defer mu.Unlock()
if res == nil || driverIndex < idx {
@ -218,11 +203,7 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGrou
}
})
} else {
resp, err = build.Build(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress.Tee(printer, statusChan))
}
err1 := printer.Wait()
if err == nil {
err = err1
resp, err = build.Build(ctx, nodes, opts, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), progress)
}
if err != nil {
return nil, res, err
@ -234,8 +215,6 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGrou
}
}
printWarnings(os.Stderr, printer.Warnings(), progressMode)
for k := range resp {
if opts[k].PrintFunc != nil {
if err := printResult(opts[k].PrintFunc, resp[k].ExporterResponse); err != nil {
@ -247,46 +226,6 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, ng *store.NodeGrou
return resp[defaultTargetName], res, err
}
func printWarnings(w io.Writer, warnings []client.VertexWarning, mode string) {
if len(warnings) == 0 || mode == progress.PrinterModeQuiet {
return
}
fmt.Fprintf(w, "\n ")
sb := &bytes.Buffer{}
if len(warnings) == 1 {
fmt.Fprintf(sb, "1 warning found")
} else {
fmt.Fprintf(sb, "%d warnings found", len(warnings))
}
if logrus.GetLevel() < logrus.DebugLevel {
fmt.Fprintf(sb, " (use --debug to expand)")
}
fmt.Fprintf(sb, ":\n")
fmt.Fprint(w, aec.Apply(sb.String(), aec.YellowF))
for _, warn := range warnings {
fmt.Fprintf(w, " - %s\n", warn.Short)
if logrus.GetLevel() < logrus.DebugLevel {
continue
}
for _, d := range warn.Detail {
fmt.Fprintf(w, "%s\n", d)
}
if warn.URL != "" {
fmt.Fprintf(w, "More info: %s\n", warn.URL)
}
if warn.SourceInfo != nil && warn.Range != nil {
src := errdefs.Source{
Info: warn.SourceInfo,
Ranges: warn.Range,
}
src.Print(w)
}
fmt.Fprintf(w, "\n")
}
}
func parsePrintFunc(str string) (*build.PrintFunc, error) {
if str == "" {
return nil, nil

@ -4,13 +4,13 @@ import (
"context"
"io"
"github.com/containerd/console"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/util/progress"
"github.com/moby/buildkit/client"
)
type BuildxController interface {
Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, w io.Writer, out console.File, progressMode string) (ref string, resp *client.SolveResponse, err error)
Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (ref string, resp *client.SolveResponse, err error)
// Invoke starts an IO session into the specified process.
// If pid doesn't matche to any running processes, it starts a new process with the specified config.
// If there is no container running or InvokeConfig.Rollback is speicfied, the process will start in a newly created container.

@ -5,7 +5,6 @@ import (
"io"
"sync/atomic"
"github.com/containerd/console"
"github.com/docker/buildx/build"
cbuild "github.com/docker/buildx/controller/build"
"github.com/docker/buildx/controller/control"
@ -13,6 +12,7 @@ import (
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/controller/processes"
"github.com/docker/buildx/util/ioset"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
"github.com/moby/buildkit/client"
"github.com/pkg/errors"
@ -42,13 +42,13 @@ type localController struct {
buildOnGoing atomic.Bool
}
func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, w io.Writer, out console.File, progressMode string) (string, *client.SolveResponse, error) {
func (b *localController) Build(ctx context.Context, options controllerapi.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, error) {
if !b.buildOnGoing.CompareAndSwap(false, true) {
return "", nil, errors.New("build ongoing")
}
defer b.buildOnGoing.Store(false)
resp, res, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progressMode, nil, true)
resp, res, buildErr := cbuild.RunBuild(ctx, b.dockerCli, options, in, progress, true)
// NOTE: RunBuild can return *build.ResultContext even on error.
if res != nil {
b.buildConfig = buildConfig{

@ -0,0 +1,122 @@
package pb
import (
"github.com/docker/buildx/util/progress"
control "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client"
"github.com/opencontainers/go-digest"
)
type writer struct {
ch chan<- *StatusResponse
}
func NewProgressWriter(ch chan<- *StatusResponse) progress.Writer {
return &writer{ch: ch}
}
func (w *writer) Write(status *client.SolveStatus) {
w.ch <- ToControlStatus(status)
}
func (w *writer) ValidateLogSource(digest.Digest, interface{}) bool {
return true
}
func (w *writer) ClearLogSource(interface{}) {}
func ToControlStatus(s *client.SolveStatus) *StatusResponse {
resp := StatusResponse{}
for _, v := range s.Vertexes {
resp.Vertexes = append(resp.Vertexes, &control.Vertex{
Digest: v.Digest,
Inputs: v.Inputs,
Name: v.Name,
Started: v.Started,
Completed: v.Completed,
Error: v.Error,
Cached: v.Cached,
ProgressGroup: v.ProgressGroup,
})
}
for _, v := range s.Statuses {
resp.Statuses = append(resp.Statuses, &control.VertexStatus{
ID: v.ID,
Vertex: v.Vertex,
Name: v.Name,
Total: v.Total,
Current: v.Current,
Timestamp: v.Timestamp,
Started: v.Started,
Completed: v.Completed,
})
}
for _, v := range s.Logs {
resp.Logs = append(resp.Logs, &control.VertexLog{
Vertex: v.Vertex,
Stream: int64(v.Stream),
Msg: v.Data,
Timestamp: v.Timestamp,
})
}
for _, v := range s.Warnings {
resp.Warnings = append(resp.Warnings, &control.VertexWarning{
Vertex: v.Vertex,
Level: int64(v.Level),
Short: v.Short,
Detail: v.Detail,
Url: v.URL,
Info: v.SourceInfo,
Ranges: v.Range,
})
}
return &resp
}
func FromControlStatus(resp *StatusResponse) *client.SolveStatus {
s := client.SolveStatus{}
for _, v := range resp.Vertexes {
s.Vertexes = append(s.Vertexes, &client.Vertex{
Digest: v.Digest,
Inputs: v.Inputs,
Name: v.Name,
Started: v.Started,
Completed: v.Completed,
Error: v.Error,
Cached: v.Cached,
ProgressGroup: v.ProgressGroup,
})
}
for _, v := range resp.Statuses {
s.Statuses = append(s.Statuses, &client.VertexStatus{
ID: v.ID,
Vertex: v.Vertex,
Name: v.Name,
Total: v.Total,
Current: v.Current,
Timestamp: v.Timestamp,
Started: v.Started,
Completed: v.Completed,
})
}
for _, v := range resp.Logs {
s.Logs = append(s.Logs, &client.VertexLog{
Vertex: v.Vertex,
Stream: int(v.Stream),
Data: v.Msg,
Timestamp: v.Timestamp,
})
}
for _, v := range resp.Warnings {
s.Warnings = append(s.Warnings, &client.VertexWarning{
Vertex: v.Vertex,
Level: int(v.Level),
Short: v.Short,
Detail: v.Detail,
URL: v.Url,
SourceInfo: v.Info,
Range: v.Ranges,
})
}
return &s
}

@ -6,7 +6,6 @@ import (
"sync"
"time"
"github.com/containerd/console"
"github.com/containerd/containerd/defaults"
"github.com/containerd/containerd/pkg/dialer"
"github.com/docker/buildx/controller/pb"
@ -114,14 +113,9 @@ func (c *Client) Inspect(ctx context.Context, ref string) (*pb.InspectResponse,
return c.client().Inspect(ctx, &pb.InspectRequest{Ref: ref})
}
func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, w io.Writer, out console.File, progressMode string) (string, *client.SolveResponse, error) {
func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadCloser, progress progress.Writer) (string, *client.SolveResponse, error) {
ref := identity.NewID()
pw, err := progress.NewPrinter(context.TODO(), w, out, progressMode)
if err != nil {
return "", nil, err
}
statusChan := make(chan *client.SolveStatus)
statusDone := make(chan struct{})
eg, egCtx := errgroup.WithContext(ctx)
var resp *client.SolveResponse
eg.Go(func() error {
@ -131,17 +125,12 @@ func (c *Client) Build(ctx context.Context, options pb.BuildOptions, in io.ReadC
return err
})
eg.Go(func() error {
defer close(statusDone)
for s := range statusChan {
st := s
pw.Write(st)
progress.Write(st)
}
return nil
})
eg.Go(func() error {
<-statusDone
return pw.Wait()
})
return ref, resp, eg.Wait()
}
@ -180,51 +169,7 @@ func (c *Client) build(ctx context.Context, ref string, options pb.BuildOptions,
}
return errors.Wrap(err, "failed to receive status")
}
s := client.SolveStatus{}
for _, v := range resp.Vertexes {
s.Vertexes = append(s.Vertexes, &client.Vertex{
Digest: v.Digest,
Inputs: v.Inputs,
Name: v.Name,
Started: v.Started,
Completed: v.Completed,
Error: v.Error,
Cached: v.Cached,
ProgressGroup: v.ProgressGroup,
})
}
for _, v := range resp.Statuses {
s.Statuses = append(s.Statuses, &client.VertexStatus{
ID: v.ID,
Vertex: v.Vertex,
Name: v.Name,
Total: v.Total,
Current: v.Current,
Timestamp: v.Timestamp,
Started: v.Started,
Completed: v.Completed,
})
}
for _, v := range resp.Logs {
s.Logs = append(s.Logs, &client.VertexLog{
Vertex: v.Vertex,
Stream: int(v.Stream),
Data: v.Msg,
Timestamp: v.Timestamp,
})
}
for _, v := range resp.Warnings {
s.Warnings = append(s.Warnings, &client.VertexWarning{
Vertex: v.Vertex,
Level: int(v.Level),
Short: v.Short,
Detail: v.Detail,
URL: v.Url,
SourceInfo: v.Info,
Range: v.Ranges,
})
}
statusChan <- &s
statusChan <- pb.FromControlStatus(resp)
}
})
if in != nil {

@ -21,6 +21,7 @@ import (
"github.com/docker/buildx/controller/control"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/progress"
"github.com/docker/buildx/version"
"github.com/docker/cli/cli/command"
"github.com/moby/buildkit/client"
@ -142,8 +143,8 @@ func serveCmd(dockerCli command.Cli) *cobra.Command {
}()
// prepare server
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, statusChan chan *client.SolveStatus) (*client.SolveResponse, *build.ResultContext, error) {
return cbuild.RunBuild(ctx, dockerCli, *options, stdin, "quiet", statusChan, true)
b := NewServer(func(ctx context.Context, options *controllerapi.BuildOptions, stdin io.Reader, progress progress.Writer) (*client.SolveResponse, *build.ResultContext, error) {
return cbuild.RunBuild(ctx, dockerCli, *options, stdin, progress, true)
})
defer b.Close()

@ -12,14 +12,14 @@ import (
"github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/controller/processes"
"github.com/docker/buildx/util/ioset"
"github.com/docker/buildx/util/progress"
"github.com/docker/buildx/version"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, statusChan chan *client.SolveStatus) (resp *client.SolveResponse, res *build.ResultContext, err error)
type BuildFunc func(ctx context.Context, options *pb.BuildOptions, stdin io.Reader, progress progress.Writer) (resp *client.SolveResponse, res *build.ResultContext, err error)
func NewServer(buildFunc BuildFunc) *Server {
return &Server{
@ -35,7 +35,7 @@ type Server struct {
type session struct {
buildOnGoing atomic.Bool
statusChan chan *client.SolveStatus
statusChan chan *pb.StatusResponse
cancelBuild func()
buildOptions *pb.BuildOptions
inputPipe *io.PipeWriter
@ -177,8 +177,9 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
s = &session{}
s.buildOnGoing.Store(true)
}
s.processes = processes.NewManager()
statusChan := make(chan *client.SolveStatus)
statusChan := make(chan *pb.StatusResponse)
s.statusChan = statusChan
inR, inW := io.Pipe()
defer inR.Close()
@ -196,10 +197,12 @@ func (m *Server) Build(ctx context.Context, req *pb.BuildRequest) (*pb.BuildResp
m.sessionMu.Unlock()
}()
pw := pb.NewProgressWriter(statusChan)
// Build the specified request
ctx, cancel := context.WithCancel(ctx)
defer cancel()
resp, res, buildErr := m.buildFunc(ctx, req.Options, inR, statusChan)
resp, res, buildErr := m.buildFunc(ctx, req.Options, inR, pw)
m.sessionMu.Lock()
if s, ok := m.session[ref]; ok {
// NOTE: buildFunc can return *build.ResultContext even on error (e.g. when it's implemented using (github.com/docker/buildx/controller/build).RunBuild).
@ -237,7 +240,7 @@ func (m *Server) Status(req *pb.StatusRequest, stream pb.Controller_StatusServer
}
// Wait and get status channel prepared by Build()
var statusChan <-chan *client.SolveStatus
var statusChan <-chan *pb.StatusResponse
for {
// TODO: timeout?
m.sessionMu.Lock()
@ -256,8 +259,7 @@ func (m *Server) Status(req *pb.StatusRequest, stream pb.Controller_StatusServer
if ss == nil {
break
}
cs := toControlStatus(ss)
if err := stream.Send(cs); err != nil {
if err := stream.Send(ss); err != nil {
return err
}
}
@ -437,51 +439,3 @@ func (m *Server) Invoke(srv pb.Controller_InvokeServer) error {
return eg.Wait()
}
func toControlStatus(s *client.SolveStatus) *pb.StatusResponse {
resp := pb.StatusResponse{}
for _, v := range s.Vertexes {
resp.Vertexes = append(resp.Vertexes, &controlapi.Vertex{
Digest: v.Digest,
Inputs: v.Inputs,
Name: v.Name,
Started: v.Started,
Completed: v.Completed,
Error: v.Error,
Cached: v.Cached,
ProgressGroup: v.ProgressGroup,
})
}
for _, v := range s.Statuses {
resp.Statuses = append(resp.Statuses, &controlapi.VertexStatus{
ID: v.ID,
Vertex: v.Vertex,
Name: v.Name,
Total: v.Total,
Current: v.Current,
Timestamp: v.Timestamp,
Started: v.Started,
Completed: v.Completed,
})
}
for _, v := range s.Logs {
resp.Logs = append(resp.Logs, &controlapi.VertexLog{
Vertex: v.Vertex,
Stream: int64(v.Stream),
Msg: v.Data,
Timestamp: v.Timestamp,
})
}
for _, v := range s.Warnings {
resp.Warnings = append(resp.Warnings, &controlapi.VertexWarning{
Vertex: v.Vertex,
Level: int64(v.Level),
Short: v.Short,
Detail: v.Detail,
Url: v.URL,
Info: v.SourceInfo,
Ranges: v.Range,
})
}
return &resp
}

@ -15,6 +15,7 @@ import (
controllererrors "github.com/docker/buildx/controller/errdefs"
controllerapi "github.com/docker/buildx/controller/pb"
"github.com/docker/buildx/util/ioset"
"github.com/docker/buildx/util/progress"
"github.com/moby/buildkit/identity"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -36,7 +37,7 @@ Available commands are:
`
// RunMonitor provides an interactive session for running and managing containers via specified IO.
func RunMonitor(ctx context.Context, curRef string, options *controllerapi.BuildOptions, invokeConfig controllerapi.InvokeConfig, c control.BuildxController, progressMode string, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File) error {
func RunMonitor(ctx context.Context, curRef string, options *controllerapi.BuildOptions, invokeConfig controllerapi.InvokeConfig, c control.BuildxController, stdin io.ReadCloser, stdout io.WriteCloser, stderr console.File, progress progress.Writer) error {
defer func() {
if err := c.Disconnect(ctx, curRef); err != nil {
logrus.Warnf("disconnect error: %v", err)
@ -145,7 +146,7 @@ func RunMonitor(ctx context.Context, curRef string, options *controllerapi.Build
}
}
var resultUpdated bool
ref, _, err := c.Build(ctx, *bo, nil, stdout, stderr, progressMode) // TODO: support stdin, hold build ref
ref, _, err := c.Build(ctx, *bo, nil, progress) // TODO: support stdin, hold build ref
if err != nil {
var be *controllererrors.BuildError
if errors.As(err, &be) {

Loading…
Cancel
Save