From f1f81e28d537002e98a65353405d98531c68a44c Mon Sep 17 00:00:00 2001 From: CrazyMax Date: Fri, 16 Dec 2022 09:54:13 +0100 Subject: [PATCH] builder: load nodes once Signed-off-by: CrazyMax --- builder/builder.go | 22 ++++---- builder/node.go | 119 +++++++++++++++++++++++++----------------- commands/bake.go | 7 ++- commands/build.go | 8 +-- commands/create.go | 2 +- commands/diskusage.go | 6 ++- commands/inspect.go | 5 +- commands/ls.go | 12 +++-- commands/prune.go | 6 ++- commands/rm.go | 12 ++--- commands/stop.go | 3 +- 11 files changed, 118 insertions(+), 84 deletions(-) diff --git a/builder/builder.go b/builder/builder.go index 55476ea1..f29ad1b7 100644 --- a/builder/builder.go +++ b/builder/builder.go @@ -21,7 +21,7 @@ import ( type Builder struct { *store.NodeGroup driverFactory driverFactory - nodes []Node + nodesFactory nodesFactory opts builderOpts err error } @@ -144,12 +144,17 @@ func (b *Builder) ImageOpt() (imagetools.Opt, error) { // Boot bootstrap a builder func (b *Builder) Boot(ctx context.Context) (bool, error) { - toBoot := make([]int, 0, len(b.nodes)) - for idx, d := range b.nodes { - if d.Err != nil || d.Driver == nil || d.DriverInfo == nil { + // ensure nodes are loaded + if _, err := b.Nodes(ctx); err != nil { + return false, err + } + + toBoot := make([]int, 0, len(b.nodesFactory.nodes)) + for idx, n := range b.nodesFactory.nodes { + if n.Err != nil || n.Driver == nil || n.DriverInfo == nil { continue } - if d.DriverInfo.Status != driver.Running { + if n.DriverInfo.Status != driver.Running { toBoot = append(toBoot, idx) } } @@ -168,9 +173,8 @@ func (b *Builder) Boot(ctx context.Context) (bool, error) { func(idx int) { eg.Go(func() error { pw := progress.WithPrefix(printer, b.NodeGroup.Nodes[idx].Name, len(toBoot) > 1) - _, err := driver.Boot(ctx, baseCtx, b.nodes[idx].Driver, pw) - if err != nil { - b.nodes[idx].Err = err + if _, err := driver.Boot(ctx, baseCtx, b.nodesFactory.nodes[idx].Driver, pw); err != nil { + b.nodesFactory.nodes[idx].Err = err } return nil }) @@ -188,7 +192,7 @@ func (b *Builder) Boot(ctx context.Context) (bool, error) { // Inactive checks if all nodes are inactive for this builder. func (b *Builder) Inactive() bool { - for _, d := range b.nodes { + for _, d := range b.nodesFactory.nodes { if d.DriverInfo != nil && d.DriverInfo.Status == driver.Running { return false } diff --git a/builder/node.go b/builder/node.go index f565738d..a385d2ec 100644 --- a/builder/node.go +++ b/builder/node.go @@ -2,6 +2,7 @@ package builder import ( "context" + "sync" "github.com/docker/buildx/driver" ctxkube "github.com/docker/buildx/driver/kubernetes/context" @@ -29,16 +30,27 @@ type Node struct { Err error } -// Nodes returns nodes for this builder. -func (b *Builder) Nodes() []Node { - return b.nodes +type nodesFactory struct { + nodes []Node + once sync.Once } -// LoadNodes loads and returns nodes for this builder. -// TODO: this should be a method on a Node object and lazy load data for each driver. -func (b *Builder) LoadNodes(ctx context.Context, withData bool) (_ []Node, err error) { - eg, _ := errgroup.WithContext(ctx) - b.nodes = make([]Node, len(b.NodeGroup.Nodes)) +// Nodes loads and returns nodes for this builder. +func (b *Builder) Nodes(ctx context.Context) (_ []Node, err error) { + b.nodesFactory.once.Do(func() { + err = b.loadNodes(ctx) + }) + return b.nodesFactory.nodes, err +} + +// NodesWithData loads nodes with data for this builder. +func (b *Builder) NodesWithData(ctx context.Context) (_ []Node, err error) { + err = b.loadNodesWithData(ctx) + return b.nodesFactory.nodes, err +} + +func (b *Builder) loadNodes(ctx context.Context) (err error) { + b.nodesFactory.nodes = make([]Node, len(b.NodeGroup.Nodes)) defer func() { if b.err == nil && err != nil { @@ -48,14 +60,15 @@ func (b *Builder) LoadNodes(ctx context.Context, withData bool) (_ []Node, err e factory, err := b.Factory(ctx) if err != nil { - return nil, err + return err } imageopt, err := b.ImageOpt() if err != nil { - return nil, err + return err } + eg, _ := errgroup.WithContext(ctx) for i, n := range b.NodeGroup.Nodes { func(i int, n store.Node) { eg.Go(func() error { @@ -64,7 +77,7 @@ func (b *Builder) LoadNodes(ctx context.Context, withData bool) (_ []Node, err e ProxyConfig: storeutil.GetProxyConfig(b.opts.dockerCli), } defer func() { - b.nodes[i] = node + b.nodesFactory.nodes[i] = node }() dockerapi, err := dockerutil.NewClientAPI(b.opts.dockerCli, n.Endpoint) @@ -108,59 +121,71 @@ func (b *Builder) LoadNodes(ctx context.Context, withData bool) (_ []Node, err e node.Err = err return nil } + node.Driver = d node.ImageOpt = imageopt - - if withData { - if err := node.loadData(ctx); err != nil { - node.Err = err - } - } return nil }) }(i, n) } - if err := eg.Wait(); err != nil { - return nil, err + return eg.Wait() +} + +func (b *Builder) loadNodesWithData(ctx context.Context) (err error) { + // ensure nodes are loaded + if _, err = b.Nodes(ctx); err != nil { + return err } - // TODO: This should be done in the routine loading driver data - if withData { - kubernetesDriverCount := 0 - for _, d := range b.nodes { - if d.DriverInfo != nil && len(d.DriverInfo.DynamicNodes) > 0 { - kubernetesDriverCount++ - } + eg, _ := errgroup.WithContext(ctx) + for idx := range b.nodesFactory.nodes { + func(idx int) { + eg.Go(func() error { + if err = b.nodesFactory.nodes[idx].loadData(ctx); err != nil { + b.nodesFactory.nodes[idx].Err = err + } + return nil + }) + }(idx) + } + if err = eg.Wait(); err != nil { + return err + } + + kubernetesDriverCount := 0 + for _, d := range b.nodesFactory.nodes { + if d.DriverInfo != nil && len(d.DriverInfo.DynamicNodes) > 0 { + kubernetesDriverCount++ } + } - isAllKubernetesDrivers := len(b.nodes) == kubernetesDriverCount - if isAllKubernetesDrivers { - var nodes []Node - var dynamicNodes []store.Node - for _, di := range b.nodes { - // dynamic nodes are used in Kubernetes driver. - // Kubernetes' pods are dynamically mapped to BuildKit Nodes. - if di.DriverInfo != nil && len(di.DriverInfo.DynamicNodes) > 0 { - for i := 0; i < len(di.DriverInfo.DynamicNodes); i++ { - diClone := di - if pl := di.DriverInfo.DynamicNodes[i].Platforms; len(pl) > 0 { - diClone.Platforms = pl - } - nodes = append(nodes, di) + isAllKubernetesDrivers := len(b.nodesFactory.nodes) == kubernetesDriverCount + if isAllKubernetesDrivers { + var nodes []Node + var dynamicNodes []store.Node + for _, di := range b.nodesFactory.nodes { + // dynamic nodes are used in Kubernetes driver. + // Kubernetes' pods are dynamically mapped to BuildKit Nodes. + if di.DriverInfo != nil && len(di.DriverInfo.DynamicNodes) > 0 { + for i := 0; i < len(di.DriverInfo.DynamicNodes); i++ { + diClone := di + if pl := di.DriverInfo.DynamicNodes[i].Platforms; len(pl) > 0 { + diClone.Platforms = pl } - dynamicNodes = append(dynamicNodes, di.DriverInfo.DynamicNodes...) + nodes = append(nodes, di) } + dynamicNodes = append(dynamicNodes, di.DriverInfo.DynamicNodes...) } - - // not append (remove the static nodes in the store) - b.NodeGroup.Nodes = dynamicNodes - b.nodes = nodes - b.NodeGroup.Dynamic = true } + + // not append (remove the static nodes in the store) + b.NodeGroup.Nodes = dynamicNodes + b.nodesFactory.nodes = nodes + b.NodeGroup.Dynamic = true } - return b.nodes, nil + return nil } func (n *Node) loadData(ctx context.Context) error { diff --git a/commands/bake.go b/commands/bake.go index e36d36b9..bad6ca4d 100644 --- a/commands/bake.go +++ b/commands/bake.go @@ -111,13 +111,12 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error if err != nil { return err } + if nodes, err = b.Nodes(ctx); err != nil { + return err + } if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil { return errors.Wrapf(err, "failed to update builder last activity time") } - nodes, err = b.LoadNodes(ctx, false) - if err != nil { - return err - } } if url != "" { diff --git a/commands/build.go b/commands/build.go index e8e3c231..4af5e3bf 100644 --- a/commands/build.go +++ b/commands/build.go @@ -263,13 +263,13 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) { if err != nil { return err } - if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil { - return errors.Wrapf(err, "failed to update builder last activity time") - } - nodes, err := b.LoadNodes(ctx, false) + nodes, err := b.Nodes(ctx) if err != nil { return err } + if err = updateLastActivity(dockerCli, b.NodeGroup); err != nil { + return errors.Wrapf(err, "failed to update builder last activity time") + } imageID, res, err := buildTargets(ctx, dockerCli, nodes, map[string]build.Options{defaultTargetName: opts}, in.progress, in.metadataFile, in.invoke != "") err = wrapBuildError(err, false) diff --git a/commands/create.go b/commands/create.go index 35f7ba5d..c38732bd 100644 --- a/commands/create.go +++ b/commands/create.go @@ -250,7 +250,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error { timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second) defer cancel() - nodes, err := b.LoadNodes(timeoutCtx, true) + nodes, err := b.NodesWithData(timeoutCtx) if err != nil { return err } diff --git a/commands/diskusage.go b/commands/diskusage.go index d4bd7dda..907615d8 100644 --- a/commands/diskusage.go +++ b/commands/diskusage.go @@ -33,12 +33,14 @@ func runDiskUsage(dockerCli command.Cli, opts duOptions) error { return err } - b, err := builder.New(dockerCli, builder.WithName(opts.builder)) + b, err := builder.New(dockerCli, + builder.WithName(opts.builder), + ) if err != nil { return err } - nodes, err := b.LoadNodes(ctx, false) + nodes, err := b.Nodes(ctx) if err != nil { return err } diff --git a/commands/inspect.go b/commands/inspect.go index fc4530b7..7951ae78 100644 --- a/commands/inspect.go +++ b/commands/inspect.go @@ -35,7 +35,7 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error { timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second) defer cancel() - nodes, err := b.LoadNodes(timeoutCtx, true) + nodes, err := b.NodesWithData(timeoutCtx) if in.bootstrap { var ok bool ok, err = b.Boot(ctx) @@ -43,7 +43,7 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error { return err } if ok { - nodes, err = b.LoadNodes(timeoutCtx, true) + nodes, err = b.NodesWithData(timeoutCtx) } } @@ -62,7 +62,6 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error { if err == nil { fmt.Fprintln(w, "") fmt.Fprintln(w, "Nodes:") - for i, n := range nodes { if i != 0 { fmt.Fprintln(w, "") diff --git a/commands/ls.go b/commands/ls.go index 428de1b6..edadfbde 100644 --- a/commands/ls.go +++ b/commands/ls.go @@ -48,7 +48,7 @@ func runLs(dockerCli command.Cli, in lsOptions) error { for _, b := range builders { func(b *builder.Builder) { eg.Go(func() error { - _, _ = b.LoadNodes(timeoutCtx, true) + _, _ = b.NodesWithData(timeoutCtx) return nil }) }(b) @@ -66,7 +66,7 @@ func runLs(dockerCli command.Cli, in lsOptions) error { if current.Name == b.Name { b.Name += " *" } - if ok := printBuilder(w, b); !ok { + if ok := printBuilder(ctx, w, b); !ok { printErr = true } } @@ -79,7 +79,8 @@ func runLs(dockerCli command.Cli, in lsOptions) error { if b.Err() != nil { _, _ = fmt.Fprintf(dockerCli.Err(), "Cannot load builder %s: %s\n", b.Name, strings.TrimSpace(b.Err().Error())) } else { - for _, d := range b.Nodes() { + nodes, _ := b.Nodes(ctx) + for _, d := range nodes { if d.Err != nil { _, _ = fmt.Fprintf(dockerCli.Err(), "Failed to get status for %s (%s): %s\n", b.Name, d.Name, strings.TrimSpace(d.Err.Error())) } @@ -91,7 +92,7 @@ func runLs(dockerCli command.Cli, in lsOptions) error { return nil } -func printBuilder(w io.Writer, b *builder.Builder) (ok bool) { +func printBuilder(ctx context.Context, w io.Writer, b *builder.Builder) (ok bool) { ok = true var err string if b.Err() != nil { @@ -100,7 +101,8 @@ func printBuilder(w io.Writer, b *builder.Builder) (ok bool) { } fmt.Fprintf(w, "%s\t%s\t%s\t\t\n", b.Name, b.Driver, err) if b.Err() == nil { - for _, n := range b.Nodes() { + nodes, _ := b.Nodes(ctx) + for _, n := range nodes { var status string if n.DriverInfo != nil { status = n.DriverInfo.Status.String() diff --git a/commands/prune.go b/commands/prune.go index 653ffd84..bda29ee0 100644 --- a/commands/prune.go +++ b/commands/prune.go @@ -54,12 +54,14 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error { return nil } - b, err := builder.New(dockerCli, builder.WithName(opts.builder)) + b, err := builder.New(dockerCli, + builder.WithName(opts.builder), + ) if err != nil { return err } - nodes, err := b.LoadNodes(ctx, false) + nodes, err := b.Nodes(ctx) if err != nil { return err } diff --git a/commands/rm.go b/commands/rm.go index caaecc83..499eebc6 100644 --- a/commands/rm.go +++ b/commands/rm.go @@ -54,15 +54,15 @@ func runRm(dockerCli command.Cli, in rmOptions) error { return err } - nodes, err := b.LoadNodes(ctx, false) - if err != nil { - return err - } - if cb := b.ContextName(); cb != "" { return errors.Errorf("context builder cannot be removed, run `docker context rm %s` to remove this context", cb) } + nodes, err := b.Nodes(ctx) + if err != nil { + return err + } + err1 := rm(ctx, nodes, in) if err := txn.Remove(b.Name); err != nil { return err @@ -137,7 +137,7 @@ func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, i for _, b := range builders { func(b *builder.Builder) { eg.Go(func() error { - nodes, err := b.LoadNodes(timeoutCtx, true) + nodes, err := b.NodesWithData(timeoutCtx) if err != nil { return errors.Wrapf(err, "cannot load %s", b.Name) } diff --git a/commands/stop.go b/commands/stop.go index cebf1c2a..b7fd378a 100644 --- a/commands/stop.go +++ b/commands/stop.go @@ -24,7 +24,8 @@ func runStop(dockerCli command.Cli, in stopOptions) error { if err != nil { return err } - nodes, err := b.LoadNodes(ctx, false) + + nodes, err := b.Nodes(ctx) if err != nil { return err }