Merge pull request #1753 from nicksieger/compose-go-v1.13.4

compose go v1.13.4
pull/1730/head
Justin Chadwell 2 years ago committed by GitHub
commit cb061b684c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -28,10 +28,14 @@ func ParseComposeFiles(fs []File) (*Config, error) {
} }
func ParseCompose(cfgs []compose.ConfigFile, envs map[string]string) (*Config, error) { func ParseCompose(cfgs []compose.ConfigFile, envs map[string]string) (*Config, error) {
if envs == nil {
envs = make(map[string]string)
}
cfg, err := loader.Load(compose.ConfigDetails{ cfg, err := loader.Load(compose.ConfigDetails{
ConfigFiles: cfgs, ConfigFiles: cfgs,
Environment: envs, Environment: envs,
}, func(options *loader.Options) { }, func(options *loader.Options) {
options.SetProjectName("bake", false)
options.SkipNormalization = true options.SkipNormalization = true
}) })
if err != nil { if err != nil {
@ -145,6 +149,7 @@ func validateCompose(dt []byte, envs map[string]string) error {
}, },
Environment: envs, Environment: envs,
}, func(options *loader.Options) { }, func(options *loader.Options) {
options.SetProjectName("bake", false)
options.SkipNormalization = true options.SkipNormalization = true
// consistency is checked later in ParseCompose to ensure multiple // consistency is checked later in ParseCompose to ensure multiple
// compose files can be merged together // compose files can be merged together

@ -5,7 +5,7 @@ go 1.20
require ( require (
github.com/Masterminds/semver/v3 v3.2.0 github.com/Masterminds/semver/v3 v3.2.0
github.com/aws/aws-sdk-go-v2/config v1.15.5 github.com/aws/aws-sdk-go-v2/config v1.15.5
github.com/compose-spec/compose-go v1.9.0 github.com/compose-spec/compose-go v1.13.4
github.com/containerd/console v1.0.3 github.com/containerd/console v1.0.3
github.com/containerd/containerd v1.7.0 github.com/containerd/containerd v1.7.0
github.com/containerd/typeurl/v2 v2.1.0 github.com/containerd/typeurl/v2 v2.1.0
@ -85,7 +85,7 @@ require (
github.com/containerd/ttrpc v1.2.1 // indirect github.com/containerd/ttrpc v1.2.1 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/distribution/distribution/v3 v3.0.0-20221103125252-ebfa2a0ac0a9 // indirect github.com/distribution/distribution/v3 v3.0.0-20230214150026-36d8c594d7aa // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-connections v0.4.0 // indirect
@ -111,7 +111,7 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
github.com/imdario/mergo v0.3.13 // indirect github.com/imdario/mergo v0.3.15 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/jinzhu/gorm v1.9.2 // indirect github.com/jinzhu/gorm v1.9.2 // indirect
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect

@ -141,8 +141,8 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/compose-spec/compose-go v1.9.0 h1:oaewhNhUP/AClVs6ytHzcjw1xwK+2EMWuvHXj6tYvRc= github.com/compose-spec/compose-go v1.13.4 h1:O6xAsPqaY1s9KXteiO7wRCDTJLahv1XP/z/eUO9EfbI=
github.com/compose-spec/compose-go v1.9.0/go.mod h1:Tb5Ae2PsYN3GTqYqzl2IRbTPiJtPZZjMw8UKUvmehFk= github.com/compose-spec/compose-go v1.13.4/go.mod h1:rsiZ8uaOHJYJemDBzTe9UBpaq5ZFVEOO4TxM2G3SJxk=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
@ -168,8 +168,8 @@ github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxG
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/distribution/distribution/v3 v3.0.0-20221103125252-ebfa2a0ac0a9 h1:doprs/RuXCuN864IfxC3h2qocrt158wGv3A5mcqSZQw= github.com/distribution/distribution/v3 v3.0.0-20230214150026-36d8c594d7aa h1:L9Ay/slwQ4ERSPaurC+TVkZrM0K98GNrEEo1En3e8as=
github.com/distribution/distribution/v3 v3.0.0-20221103125252-ebfa2a0ac0a9/go.mod h1:6rIc5NMSjXjjnwzWWy3HAm9gDBu+X7aCzL8VrHIKgxM= github.com/distribution/distribution/v3 v3.0.0-20230214150026-36d8c594d7aa/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI=
github.com/docker/cli v23.0.1+incompatible h1:LRyWITpGzl2C9e9uGxzisptnxAn1zfZKXy13Ul2Q5oM= github.com/docker/cli v23.0.1+incompatible h1:LRyWITpGzl2C9e9uGxzisptnxAn1zfZKXy13Ul2Q5oM=
github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli-docs-tool v0.5.1 h1:jIk/cCZurZERhALPVKhqlNxTQGxn2kcI+56gE57PQXg= github.com/docker/cli-docs-tool v0.5.1 h1:jIk/cCZurZERhALPVKhqlNxTQGxn2kcI+56gE57PQXg=
@ -347,8 +347,8 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl/v2 v2.8.2 h1:wmFle3D1vu0okesm8BTLVDyJ6/OL9DCLUwn0b2OptiY= github.com/hashicorp/hcl/v2 v2.8.2 h1:wmFle3D1vu0okesm8BTLVDyJ6/OL9DCLUwn0b2OptiY=
github.com/hashicorp/hcl/v2 v2.8.2/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= github.com/hashicorp/hcl/v2 v2.8.2/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY= github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY=
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
@ -954,7 +954,6 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=

@ -17,29 +17,65 @@
package cli package cli
import ( import (
"fmt" "bytes"
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/compose-spec/compose-go/consts" "github.com/compose-spec/compose-go/consts"
"github.com/compose-spec/compose-go/dotenv" "github.com/compose-spec/compose-go/dotenv"
"github.com/compose-spec/compose-go/errdefs" "github.com/compose-spec/compose-go/errdefs"
"github.com/compose-spec/compose-go/loader" "github.com/compose-spec/compose-go/loader"
"github.com/compose-spec/compose-go/types" "github.com/compose-spec/compose-go/types"
"github.com/compose-spec/compose-go/utils" "github.com/compose-spec/compose-go/utils"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
) )
// ProjectOptions groups the command line options recommended for a Compose implementation // ProjectOptions provides common configuration for loading a project.
type ProjectOptions struct { type ProjectOptions struct {
Name string // Name is a valid Compose project name to be used or empty.
WorkingDir string //
// If empty, the project loader will automatically infer a reasonable
// project name if possible.
Name string
// WorkingDir is a file path to use as the project directory or empty.
//
// If empty, the project loader will automatically infer a reasonable
// working directory if possible.
WorkingDir string
// ConfigPaths are file paths to one or more Compose files.
//
// These are applied in order by the loader following the merge logic
// as described in the spec.
//
// The first entry is required and is the primary Compose file.
// For convenience, WithConfigFileEnv and WithDefaultConfigPath
// are provided to populate this in a predictable manner.
ConfigPaths []string ConfigPaths []string
// Environment are additional environment variables to make available
// for interpolation.
//
// NOTE: For security, the loader does not automatically expose any
// process environment variables. For convenience, WithOsEnv can be
// used if appropriate.
Environment map[string]string Environment map[string]string
EnvFile string
// EnvFiles are file paths to ".env" files with additional environment
// variable data.
//
// These are loaded in-order, so it is possible to override variables or
// in subsequent files.
//
// This field is optional, but any file paths that are included here must
// exist or an error will be returned during load.
EnvFiles []string
loadOptions []func(*loader.Options) loadOptions []func(*loader.Options)
} }
@ -63,8 +99,12 @@ func NewProjectOptions(configs []string, opts ...ProjectOptionsFn) (*ProjectOpti
// WithName defines ProjectOptions' name // WithName defines ProjectOptions' name
func WithName(name string) ProjectOptionsFn { func WithName(name string) ProjectOptionsFn {
return func(o *ProjectOptions) error { return func(o *ProjectOptions) error {
// a project (once loaded) cannot have an empty name
// however, on the options object, the name is optional: if unset,
// a name will be inferred by the loader, so it's legal to set the
// name to an empty string here
if name != loader.NormalizeProjectName(name) { if name != loader.NormalizeProjectName(name) {
return fmt.Errorf("%q is not a valid project name", name) return loader.InvalidProjectNameErr(name)
} }
o.Name = name o.Name = name
return nil return nil
@ -187,9 +227,19 @@ func WithOsEnv(o *ProjectOptions) error {
} }
// WithEnvFile set an alternate env file // WithEnvFile set an alternate env file
// deprecated - use WithEnvFiles
func WithEnvFile(file string) ProjectOptionsFn { func WithEnvFile(file string) ProjectOptionsFn {
var files []string
if file != "" {
files = []string{file}
}
return WithEnvFiles(files...)
}
// WithEnvFiles set alternate env files
func WithEnvFiles(file ...string) ProjectOptionsFn {
return func(options *ProjectOptions) error { return func(options *ProjectOptions) error {
options.EnvFile = file options.EnvFiles = file
return nil return nil
} }
} }
@ -200,7 +250,7 @@ func WithDotEnv(o *ProjectOptions) error {
if err != nil { if err != nil {
return err return err
} }
envMap, err := GetEnvFromFile(o.Environment, wd, o.EnvFile) envMap, err := GetEnvFromFile(o.Environment, wd, o.EnvFiles)
if err != nil { if err != nil {
return err return err
} }
@ -213,55 +263,63 @@ func WithDotEnv(o *ProjectOptions) error {
return nil return nil
} }
func GetEnvFromFile(currentEnv map[string]string, workingDir string, filename string) (map[string]string, error) { func GetEnvFromFile(currentEnv map[string]string, workingDir string, filenames []string) (map[string]string, error) {
envMap := make(map[string]string) envMap := make(map[string]string)
dotEnvFile := filename dotEnvFiles := filenames
if dotEnvFile == "" { if len(dotEnvFiles) == 0 {
dotEnvFile = filepath.Join(workingDir, ".env") dotEnvFiles = append(dotEnvFiles, filepath.Join(workingDir, ".env"))
} }
abs, err := filepath.Abs(dotEnvFile) for _, dotEnvFile := range dotEnvFiles {
if err != nil { abs, err := filepath.Abs(dotEnvFile)
return envMap, err if err != nil {
} return envMap, err
dotEnvFile = abs }
dotEnvFile = abs
s, err := os.Stat(dotEnvFile) s, err := os.Stat(dotEnvFile)
if os.IsNotExist(err) { if os.IsNotExist(err) {
if filename != "" { if len(filenames) == 0 {
return nil, errors.Errorf("Couldn't find env file: %s", filename) return envMap, nil
}
return envMap, errors.Errorf("Couldn't find env file: %s", dotEnvFile)
}
if err != nil {
return envMap, err
} }
return envMap, nil
}
if err != nil {
return envMap, err
}
if s.IsDir() { if s.IsDir() {
if filename == "" { if len(filenames) == 0 {
return envMap, nil return envMap, nil
}
return envMap, errors.Errorf("%s is a directory", dotEnvFile)
} }
return envMap, errors.Errorf("%s is a directory", dotEnvFile)
}
file, err := os.Open(dotEnvFile) b, err := os.ReadFile(dotEnvFile)
if err != nil { if os.IsNotExist(err) {
return envMap, errors.Wrapf(err, "failed to read %s", dotEnvFile) return nil, errors.Errorf("Couldn't read env file: %s", dotEnvFile)
} }
defer file.Close() if err != nil {
return envMap, err
}
env, err := dotenv.ParseWithLookup(file, func(k string) (string, bool) { env, err := dotenv.ParseWithLookup(bytes.NewReader(b), func(k string) (string, bool) {
v, ok := currentEnv[k] v, ok := envMap[k]
if !ok { if ok {
return "", false return v, true
}
v, ok = currentEnv[k]
if !ok {
return "", false
}
return v, true
})
if err != nil {
return envMap, errors.Wrapf(err, "failed to read %s", dotEnvFile)
}
for k, v := range env {
envMap[k] = v
} }
return v, true
})
if err != nil {
return envMap, errors.Wrapf(err, "failed to read %s", dotEnvFile)
}
for k, v := range env {
envMap[k] = v
} }
return envMap, nil return envMap, nil
@ -393,7 +451,10 @@ func withNamePrecedenceLoad(absWorkingDir string, options *ProjectOptions) func(
} else if nameFromEnv, ok := options.Environment[consts.ComposeProjectName]; ok && nameFromEnv != "" { } else if nameFromEnv, ok := options.Environment[consts.ComposeProjectName]; ok && nameFromEnv != "" {
opts.SetProjectName(nameFromEnv, true) opts.SetProjectName(nameFromEnv, true)
} else { } else {
opts.SetProjectName(filepath.Base(absWorkingDir), false) opts.SetProjectName(
loader.NormalizeProjectName(filepath.Base(absWorkingDir)),
false,
)
} }
} }
} }

@ -20,4 +20,5 @@ const (
ComposeProjectName = "COMPOSE_PROJECT_NAME" ComposeProjectName = "COMPOSE_PROJECT_NAME"
ComposePathSeparator = "COMPOSE_PATH_SEPARATOR" ComposePathSeparator = "COMPOSE_PATH_SEPARATOR"
ComposeFilePath = "COMPOSE_FILE" ComposeFilePath = "COMPOSE_FILE"
ComposeProfiles = "COMPOSE_PROFILES"
) )

@ -111,8 +111,13 @@ func Read(filenames ...string) (map[string]string, error) {
// UnmarshalBytesWithLookup parses env file from byte slice of chars, returning a map of keys and values. // UnmarshalBytesWithLookup parses env file from byte slice of chars, returning a map of keys and values.
func UnmarshalBytesWithLookup(src []byte, lookupFn LookupFn) (map[string]string, error) { func UnmarshalBytesWithLookup(src []byte, lookupFn LookupFn) (map[string]string, error) {
return UnmarshalWithLookup(string(src), lookupFn)
}
// UnmarshalWithLookup parses env file from string, returning a map of keys and values.
func UnmarshalWithLookup(src string, lookupFn LookupFn) (map[string]string, error) {
out := make(map[string]string) out := make(map[string]string)
err := newParser().parseBytes(src, out, lookupFn) err := newParser().parse(src, out, lookupFn)
return out, err return out, err
} }

@ -1,7 +1,6 @@
package dotenv package dotenv
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"regexp" "regexp"
@ -31,14 +30,14 @@ func newParser() *parser {
} }
} }
func (p *parser) parseBytes(src []byte, out map[string]string, lookupFn LookupFn) error { func (p *parser) parse(src string, out map[string]string, lookupFn LookupFn) error {
cutset := src cutset := src
if lookupFn == nil { if lookupFn == nil {
lookupFn = noLookupFn lookupFn = noLookupFn
} }
for { for {
cutset = p.getStatementStart(cutset) cutset = p.getStatementStart(cutset)
if cutset == nil { if cutset == "" {
// reached end of file // reached end of file
break break
} }
@ -75,10 +74,10 @@ func (p *parser) parseBytes(src []byte, out map[string]string, lookupFn LookupFn
// getStatementPosition returns position of statement begin. // getStatementPosition returns position of statement begin.
// //
// It skips any comment line or non-whitespace character. // It skips any comment line or non-whitespace character.
func (p *parser) getStatementStart(src []byte) []byte { func (p *parser) getStatementStart(src string) string {
pos := p.indexOfNonSpaceChar(src) pos := p.indexOfNonSpaceChar(src)
if pos == -1 { if pos == -1 {
return nil return ""
} }
src = src[pos:] src = src[pos:]
@ -87,70 +86,69 @@ func (p *parser) getStatementStart(src []byte) []byte {
} }
// skip comment section // skip comment section
pos = bytes.IndexFunc(src, isCharFunc('\n')) pos = strings.IndexFunc(src, isCharFunc('\n'))
if pos == -1 { if pos == -1 {
return nil return ""
} }
return p.getStatementStart(src[pos:]) return p.getStatementStart(src[pos:])
} }
// locateKeyName locates and parses key name and returns rest of slice // locateKeyName locates and parses key name and returns rest of slice
func (p *parser) locateKeyName(src []byte) (string, []byte, bool, error) { func (p *parser) locateKeyName(src string) (string, string, bool, error) {
var key string var key string
var inherited bool var inherited bool
// trim "export" and space at beginning // trim "export" and space at beginning
src = bytes.TrimLeftFunc(exportRegex.ReplaceAll(src, nil), isSpace) src = strings.TrimLeftFunc(exportRegex.ReplaceAllString(src, ""), isSpace)
// locate key name end and validate it in single loop // locate key name end and validate it in single loop
offset := 0 offset := 0
loop: loop:
for i, char := range src { for i, rune := range src {
rchar := rune(char) if isSpace(rune) {
if isSpace(rchar) {
continue continue
} }
switch char { switch rune {
case '=', ':', '\n': case '=', ':', '\n':
// library also supports yaml-style value declaration // library also supports yaml-style value declaration
key = string(src[0:i]) key = string(src[0:i])
offset = i + 1 offset = i + 1
inherited = char == '\n' inherited = rune == '\n'
break loop break loop
case '_', '.', '-', '[', ']': case '_', '.', '-', '[', ']':
default: default:
// variable name should match [A-Za-z0-9_.-] // variable name should match [A-Za-z0-9_.-]
if unicode.IsLetter(rchar) || unicode.IsNumber(rchar) { if unicode.IsLetter(rune) || unicode.IsNumber(rune) {
continue continue
} }
return "", nil, inherited, fmt.Errorf( return "", "", inherited, fmt.Errorf(
`line %d: unexpected character %q in variable name`, `line %d: unexpected character %q in variable name`,
p.line, string(char)) p.line, string(rune))
} }
} }
if len(src) == 0 { if src == "" {
return "", nil, inherited, errors.New("zero length string") return "", "", inherited, errors.New("zero length string")
} }
// trim whitespace // trim whitespace
key = strings.TrimRightFunc(key, unicode.IsSpace) key = strings.TrimRightFunc(key, unicode.IsSpace)
cutset := bytes.TrimLeftFunc(src[offset:], isSpace) cutset := strings.TrimLeftFunc(src[offset:], isSpace)
return key, cutset, inherited, nil return key, cutset, inherited, nil
} }
// extractVarValue extracts variable value and returns rest of slice // extractVarValue extracts variable value and returns rest of slice
func (p *parser) extractVarValue(src []byte, envMap map[string]string, lookupFn LookupFn) (string, []byte, error) { func (p *parser) extractVarValue(src string, envMap map[string]string, lookupFn LookupFn) (string, string, error) {
quote, isQuoted := hasQuotePrefix(src) quote, isQuoted := hasQuotePrefix(src)
if !isQuoted { if !isQuoted {
// unquoted value - read until new line // unquoted value - read until new line
value, rest, _ := bytes.Cut(src, []byte("\n")) value, rest, _ := strings.Cut(src, "\n")
p.line++ p.line++
// Remove inline comments on unquoted lines // Remove inline comments on unquoted lines
value, _, _ = bytes.Cut(value, []byte(" #")) value, _, _ = strings.Cut(value, " #")
value = bytes.TrimRightFunc(value, unicode.IsSpace) value = strings.TrimRightFunc(value, unicode.IsSpace)
retVal, err := expandVariables(string(value), envMap, lookupFn) retVal, err := expandVariables(string(value), envMap, lookupFn)
return retVal, rest, err return retVal, rest, err
} }
@ -176,7 +174,7 @@ func (p *parser) extractVarValue(src []byte, envMap map[string]string, lookupFn
// variables on the result // variables on the result
retVal, err := expandVariables(expandEscapes(value), envMap, lookupFn) retVal, err := expandVariables(expandEscapes(value), envMap, lookupFn)
if err != nil { if err != nil {
return "", nil, err return "", "", err
} }
value = retVal value = retVal
} }
@ -185,12 +183,12 @@ func (p *parser) extractVarValue(src []byte, envMap map[string]string, lookupFn
} }
// return formatted error if quoted string is not terminated // return formatted error if quoted string is not terminated
valEndIndex := bytes.IndexFunc(src, isCharFunc('\n')) valEndIndex := strings.IndexFunc(src, isCharFunc('\n'))
if valEndIndex == -1 { if valEndIndex == -1 {
valEndIndex = len(src) valEndIndex = len(src)
} }
return "", nil, fmt.Errorf("line %d: unterminated quoted value %s", p.line, src[:valEndIndex]) return "", "", fmt.Errorf("line %d: unterminated quoted value %s", p.line, src[:valEndIndex])
} }
func expandEscapes(str string) string { func expandEscapes(str string) string {
@ -225,8 +223,8 @@ func expandEscapes(str string) string {
return out return out
} }
func (p *parser) indexOfNonSpaceChar(src []byte) int { func (p *parser) indexOfNonSpaceChar(src string) int {
return bytes.IndexFunc(src, func(r rune) bool { return strings.IndexFunc(src, func(r rune) bool {
if r == '\n' { if r == '\n' {
p.line++ p.line++
} }
@ -235,8 +233,8 @@ func (p *parser) indexOfNonSpaceChar(src []byte) int {
} }
// hasQuotePrefix reports whether charset starts with single or double quote and returns quote character // hasQuotePrefix reports whether charset starts with single or double quote and returns quote character
func hasQuotePrefix(src []byte) (byte, bool) { func hasQuotePrefix(src string) (byte, bool) {
if len(src) == 0 { if src == "" {
return 0, false return 0, false
} }

@ -72,7 +72,7 @@ func recursiveInterpolate(value interface{}, path Path, opts Options) (interface
switch value := value.(type) { switch value := value.(type) {
case string: case string:
newValue, err := opts.Substitute(value, template.Mapping(opts.LookupValue)) newValue, err := opts.Substitute(value, template.Mapping(opts.LookupValue))
if err != nil || newValue == value { if err != nil {
return value, newPathError(path, err) return value, newPathError(path, err)
} }
caster, ok := opts.getCasterForPath(path) caster, ok := opts.getCasterForPath(path)

@ -1,7 +1,13 @@
name: Full_Example_project_name name: full_example_project_name
services: services:
foo:
bar:
build:
dockerfile_inline: |
FROM alpine
RUN echo "hello" > /world.txt
foo:
build: build:
context: ./dir context: ./dir
dockerfile: Dockerfile dockerfile: Dockerfile
@ -15,6 +21,8 @@ services:
- foo - foo
- bar - bar
labels: [FOO=BAR] labels: [FOO=BAR]
additional_contexts:
foo: /bar
secrets: secrets:
- secret1 - secret1
- source: secret2 - source: secret2

@ -22,6 +22,7 @@ import (
interp "github.com/compose-spec/compose-go/interpolation" interp "github.com/compose-spec/compose-go/interpolation"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus"
) )
var interpolateTypeCastMapping = map[interp.Path]interp.Cast{ var interpolateTypeCastMapping = map[interp.Path]interp.Cast{
@ -114,9 +115,15 @@ func toFloat32(value string) (interface{}, error) {
// should match http://yaml.org/type/bool.html // should match http://yaml.org/type/bool.html
func toBoolean(value string) (interface{}, error) { func toBoolean(value string) (interface{}, error) {
switch strings.ToLower(value) { switch strings.ToLower(value) {
case "y", "yes", "true", "on": case "true":
return true, nil return true, nil
case "n", "no", "false", "off": case "false":
return false, nil
case "y", "yes", "on":
logrus.Warnf("%q for boolean is not supported by YAML 1.2, please use `true`", value)
return true, nil
case "n", "no", "off":
logrus.Warnf("%q for boolean is not supported by YAML 1.2, please use `false`", value)
return false, nil return false, nil
default: default:
return nil, errors.Errorf("invalid boolean: %s", value) return nil, errors.Errorf("invalid boolean: %s", value)

@ -37,7 +37,7 @@ import (
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v3"
) )
// Options supported by Load // Options supported by Load
@ -69,7 +69,7 @@ type Options struct {
} }
func (o *Options) SetProjectName(name string, imperativelySet bool) { func (o *Options) SetProjectName(name string, imperativelySet bool) {
o.projectName = NormalizeProjectName(name) o.projectName = name
o.projectNameImperativelySet = imperativelySet o.projectNameImperativelySet = imperativelySet
} }
@ -138,6 +138,14 @@ func ParseYAML(source []byte) (map[string]interface{}, error) {
if err := yaml.Unmarshal(source, &cfg); err != nil { if err := yaml.Unmarshal(source, &cfg); err != nil {
return nil, err return nil, err
} }
stringMap, ok := cfg.(map[string]interface{})
if ok {
converted, err := convertToStringKeysRecursive(stringMap, "")
if err != nil {
return nil, err
}
return converted.(map[string]interface{}), nil
}
cfgMap, ok := cfg.(map[interface{}]interface{}) cfgMap, ok := cfg.(map[interface{}]interface{})
if !ok { if !ok {
return nil, errors.Errorf("Top-level object must be a mapping") return nil, errors.Errorf("Top-level object must be a mapping")
@ -185,7 +193,7 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
} }
dict, err := parseConfig(file.Content, opts) dict, err := parseConfig(file.Content, opts)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("parsing %s: %w", file.Filename, err)
} }
configDict = dict configDict = dict
file.Config = dict file.Config = dict
@ -194,7 +202,7 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
if !opts.SkipValidation { if !opts.SkipValidation {
if err := schema.Validate(configDict); err != nil { if err := schema.Validate(configDict); err != nil {
return nil, err return nil, fmt.Errorf("validating %s: %w", file.Filename, err)
} }
} }
@ -233,7 +241,7 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
} }
if !opts.SkipNormalization { if !opts.SkipNormalization {
err = normalize(project, opts.ResolvePaths) err = Normalize(project, opts.ResolvePaths)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -246,40 +254,82 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
} }
} }
if len(opts.Profiles) > 0 { if profiles, ok := project.Environment[consts.ComposeProfiles]; ok && len(opts.Profiles) == 0 {
project.ApplyProfiles(opts.Profiles) opts.Profiles = strings.Split(profiles, ",")
} }
project.ApplyProfiles(opts.Profiles)
err = project.ResolveServicesEnvironment(opts.discardEnvFiles) err = project.ResolveServicesEnvironment(opts.discardEnvFiles)
return project, err return project, err
} }
func InvalidProjectNameErr(v string) error {
return fmt.Errorf(
"%q is not a valid project name: it must contain only "+
"characters from [a-z0-9_-] and start with [a-z0-9]", v,
)
}
// projectName determines the canonical name to use for the project considering
// the loader Options as well as `name` fields in Compose YAML fields (which
// also support interpolation).
//
// TODO(milas): restructure loading so that we don't need to re-parse the YAML
// here, as it's both wasteful and makes this code error-prone.
func projectName(details types.ConfigDetails, opts *Options) (string, error) { func projectName(details types.ConfigDetails, opts *Options) (string, error) {
projectName, projectNameImperativelySet := opts.GetProjectName() projectName, projectNameImperativelySet := opts.GetProjectName()
var pjNameFromConfigFile string
for _, configFile := range details.ConfigFiles { // if user did NOT provide a name explicitly, then see if one is defined
yml, err := ParseYAML(configFile.Content) // in any of the config files
if err != nil { if !projectNameImperativelySet {
return "", nil var pjNameFromConfigFile string
for _, configFile := range details.ConfigFiles {
yml, err := ParseYAML(configFile.Content)
if err != nil {
// HACK: the way that loading is currently structured, this is
// a duplicative parse just for the `name`. if it fails, we
// give up but don't return the error, knowing that it'll get
// caught downstream for us
return "", nil
}
if val, ok := yml["name"]; ok && val != "" {
sVal, ok := val.(string)
if !ok {
// HACK: see above - this is a temporary parsed version
// that hasn't been schema-validated, but we don't want
// to be the ones to actually report that, so give up,
// knowing that it'll get caught downstream for us
return "", nil
}
pjNameFromConfigFile = sVal
}
}
if !opts.SkipInterpolation {
interpolated, err := interp.Interpolate(
map[string]interface{}{"name": pjNameFromConfigFile},
*opts.Interpolate,
)
if err != nil {
return "", err
}
pjNameFromConfigFile = interpolated["name"].(string)
} }
if val, ok := yml["name"]; ok && val != "" { pjNameFromConfigFile = NormalizeProjectName(pjNameFromConfigFile)
pjNameFromConfigFile = yml["name"].(string) if pjNameFromConfigFile != "" {
projectName = pjNameFromConfigFile
} }
} }
if !opts.SkipInterpolation {
interpolated, err := interp.Interpolate(map[string]interface{}{"name": pjNameFromConfigFile}, *opts.Interpolate) if projectName == "" {
if err != nil { return "", errors.New("project name must not be empty")
return "", err
}
pjNameFromConfigFile = interpolated["name"].(string)
} }
pjNameFromConfigFile = NormalizeProjectName(pjNameFromConfigFile)
if !projectNameImperativelySet && pjNameFromConfigFile != "" { if NormalizeProjectName(projectName) != projectName {
projectName = pjNameFromConfigFile return "", InvalidProjectNameErr(projectName)
} }
// TODO(milas): this should probably ALWAYS set (overriding any existing)
if _, ok := details.Environment[consts.ComposeProjectName]; !ok && projectName != "" { if _, ok := details.Environment[consts.ComposeProjectName]; !ok && projectName != "" {
details.Environment[consts.ComposeProjectName] = projectName details.Environment[consts.ComposeProjectName] = projectName
} }
@ -304,6 +354,8 @@ func parseConfig(b []byte, opts *Options) (map[string]interface{}, error) {
return yml, err return yml, err
} }
const extensions = "#extensions" // Using # prefix, we prevent risk to conflict with an actual yaml key
func groupXFieldsIntoExtensions(dict map[string]interface{}) map[string]interface{} { func groupXFieldsIntoExtensions(dict map[string]interface{}) map[string]interface{} {
extras := map[string]interface{}{} extras := map[string]interface{}{}
for key, value := range dict { for key, value := range dict {
@ -316,7 +368,7 @@ func groupXFieldsIntoExtensions(dict map[string]interface{}) map[string]interfac
} }
} }
if len(extras) > 0 { if len(extras) > 0 {
dict["extensions"] = extras dict[extensions] = extras
} }
return dict return dict
} }
@ -355,7 +407,7 @@ func loadSections(filename string, config map[string]interface{}, configDetails
if err != nil { if err != nil {
return nil, err return nil, err
} }
extensions := getSection(config, "extensions") extensions := getSection(config, extensions)
if len(extensions) > 0 { if len(extensions) > 0 {
cfg.Extensions = extensions cfg.Extensions = extensions
} }
@ -450,6 +502,22 @@ func createTransformHook(additionalTransformers ...Transformer) mapstructure.Dec
// keys need to be converted to strings for jsonschema // keys need to be converted to strings for jsonschema
func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) { func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) {
if mapping, ok := value.(map[string]interface{}); ok {
for key, entry := range mapping {
var newKeyPrefix string
if keyPrefix == "" {
newKeyPrefix = key
} else {
newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, key)
}
convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
if err != nil {
return nil, err
}
mapping[key] = convertedEntry
}
return mapping, nil
}
if mapping, ok := value.(map[interface{}]interface{}); ok { if mapping, ok := value.(map[interface{}]interface{}); ok {
dict := make(map[string]interface{}) dict := make(map[string]interface{})
for key, entry := range mapping { for key, entry := range mapping {
@ -501,7 +569,7 @@ func formatInvalidKeyError(keyPrefix string, key interface{}) error {
func LoadServices(filename string, servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, opts *Options) ([]types.ServiceConfig, error) { func LoadServices(filename string, servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, opts *Options) ([]types.ServiceConfig, error) {
var services []types.ServiceConfig var services []types.ServiceConfig
x, ok := servicesDict["extensions"] x, ok := servicesDict[extensions]
if ok { if ok {
// as a top-level attribute, "services" doesn't support extensions, and a service can be named `x-foo` // as a top-level attribute, "services" doesn't support extensions, and a service can be named `x-foo`
for k, v := range x.(map[string]interface{}) { for k, v := range x.(map[string]interface{}) {
@ -541,16 +609,17 @@ func loadServiceWithExtends(filename, name string, servicesDict map[string]inter
} }
if serviceConfig.Extends != nil && !opts.SkipExtends { if serviceConfig.Extends != nil && !opts.SkipExtends {
baseServiceName := *serviceConfig.Extends["service"] baseServiceName := serviceConfig.Extends.Service
var baseService *types.ServiceConfig var baseService *types.ServiceConfig
if file := serviceConfig.Extends["file"]; file == nil { file := serviceConfig.Extends.File
if file == "" {
baseService, err = loadServiceWithExtends(filename, baseServiceName, servicesDict, workingDir, lookupEnv, opts, ct) baseService, err = loadServiceWithExtends(filename, baseServiceName, servicesDict, workingDir, lookupEnv, opts, ct)
if err != nil { if err != nil {
return nil, err return nil, err
} }
} else { } else {
// Resolve the path to the imported file, and load it. // Resolve the path to the imported file, and load it.
baseFilePath := absPath(workingDir, *file) baseFilePath := absPath(workingDir, file)
b, err := os.ReadFile(baseFilePath) b, err := os.ReadFile(baseFilePath)
if err != nil { if err != nil {
@ -569,10 +638,10 @@ func loadServiceWithExtends(filename, name string, servicesDict map[string]inter
} }
// Make paths relative to the importing Compose file. Note that we // Make paths relative to the importing Compose file. Note that we
// make the paths relative to `*file` rather than `baseFilePath` so // make the paths relative to `file` rather than `baseFilePath` so
// that the resulting paths won't be absolute if `*file` isn't an // that the resulting paths won't be absolute if `file` isn't an
// absolute path. // absolute path.
baseFileParent := filepath.Dir(*file) baseFileParent := filepath.Dir(file)
if baseService.Build != nil { if baseService.Build != nil {
baseService.Build.Context = resolveBuildContextPath(baseFileParent, baseService.Build.Context) baseService.Build.Context = resolveBuildContextPath(baseFileParent, baseService.Build.Context)
} }
@ -583,12 +652,17 @@ func loadServiceWithExtends(filename, name string, servicesDict map[string]inter
} }
baseService.Volumes[i].Source = resolveMaybeUnixPath(vol.Source, baseFileParent, lookupEnv) baseService.Volumes[i].Source = resolveMaybeUnixPath(vol.Source, baseFileParent, lookupEnv)
} }
for i, envFile := range baseService.EnvFile {
baseService.EnvFile[i] = resolveMaybeUnixPath(envFile, baseFileParent, lookupEnv)
}
} }
serviceConfig, err = _merge(baseService, serviceConfig) serviceConfig, err = _merge(baseService, serviceConfig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
serviceConfig.Extends = nil
} }
return serviceConfig, nil return serviceConfig, nil
@ -996,14 +1070,15 @@ var transformDependsOnConfig TransformerFunc = func(data interface{}) (interface
} }
} }
var transformExtendsConfig TransformerFunc = func(data interface{}) (interface{}, error) { var transformExtendsConfig TransformerFunc = func(value interface{}) (interface{}, error) {
switch data.(type) { switch value.(type) {
case string: case string:
data = map[string]interface{}{ return map[string]interface{}{"service": value}, nil
"service": data, case map[string]interface{}:
} return value, nil
default:
return value, errors.Errorf("invalid type %T for extends", value)
} }
return transformMappingOrListFunc("=", true)(data)
} }
var transformServiceVolumeConfig TransformerFunc = func(data interface{}) (interface{}, error) { var transformServiceVolumeConfig TransformerFunc = func(data interface{}) (interface{}, error) {

@ -130,7 +130,7 @@ func _merge(baseService *types.ServiceConfig, overrideService *types.ServiceConf
if overrideService.Command != nil { if overrideService.Command != nil {
baseService.Command = overrideService.Command baseService.Command = overrideService.Command
} }
if overrideService.HealthCheck != nil { if overrideService.HealthCheck != nil && overrideService.HealthCheck.Test != nil {
baseService.HealthCheck.Test = overrideService.HealthCheck.Test baseService.HealthCheck.Test = overrideService.HealthCheck.Test
} }
if overrideService.Entrypoint != nil { if overrideService.Entrypoint != nil {

@ -20,6 +20,7 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"github.com/compose-spec/compose-go/errdefs" "github.com/compose-spec/compose-go/errdefs"
"github.com/compose-spec/compose-go/types" "github.com/compose-spec/compose-go/types"
@ -27,8 +28,8 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
// normalize compose project by moving deprecated attributes to their canonical position and injecting implicit defaults // Normalize compose project by moving deprecated attributes to their canonical position and injecting implicit defaults
func normalize(project *types.Project, resolvePaths bool) error { func Normalize(project *types.Project, resolvePaths bool) error {
absWorkingDir, err := filepath.Abs(project.WorkingDir) absWorkingDir, err := filepath.Abs(project.WorkingDir)
if err != nil { if err != nil {
return err return err
@ -71,17 +72,26 @@ func normalize(project *types.Project, resolvePaths bool) error {
} }
if s.Build != nil { if s.Build != nil {
if s.Build.Dockerfile == "" { if s.Build.Dockerfile == "" && s.Build.DockerfileInline == "" {
s.Build.Dockerfile = "Dockerfile" s.Build.Dockerfile = "Dockerfile"
} }
localContext := absPath(project.WorkingDir, s.Build.Context) if resolvePaths {
if _, err := os.Stat(localContext); err == nil { // Build context might be a remote http/git context. Unfortunately supported "remote"
if resolvePaths { // syntax is highly ambiguous in moby/moby and not defined by compose-spec,
// so let's assume runtime will check
localContext := absPath(project.WorkingDir, s.Build.Context)
if _, err := os.Stat(localContext); err == nil {
s.Build.Context = localContext s.Build.Context = localContext
} }
// } else { for name, path := range s.Build.AdditionalContexts {
// might be a remote http/git context. Unfortunately supported "remote" syntax is highly ambiguous if strings.Contains(path, "://") { // `docker-image://` or any builder specific context type
// in moby/moby and not defined by compose-spec, so let's assume runtime will check continue
}
path = absPath(project.WorkingDir, path)
if _, err := os.Stat(path); err == nil {
s.Build.AdditionalContexts[name] = path
}
}
} }
s.Build.Args = s.Build.Args.Resolve(fn) s.Build.Args = s.Build.Args.Resolve(fn)
} }
@ -90,6 +100,41 @@ func normalize(project *types.Project, resolvePaths bool) error {
} }
s.Environment = s.Environment.Resolve(fn) s.Environment = s.Environment.Resolve(fn)
if s.Extends != nil && s.Extends.File != "" {
s.Extends.File = absPath(project.WorkingDir, s.Extends.File)
}
for _, link := range s.Links {
parts := strings.Split(link, ":")
if len(parts) == 2 {
link = parts[0]
}
s.DependsOn = setIfMissing(s.DependsOn, link, types.ServiceDependency{
Condition: types.ServiceConditionStarted,
Restart: true,
})
}
for _, namespace := range []string{s.NetworkMode, s.Ipc, s.Pid, s.Uts, s.Cgroup} {
if strings.HasPrefix(namespace, types.ServicePrefix) {
name := namespace[len(types.ServicePrefix):]
s.DependsOn = setIfMissing(s.DependsOn, name, types.ServiceDependency{
Condition: types.ServiceConditionStarted,
Restart: true,
})
}
}
for _, vol := range s.VolumesFrom {
if !strings.HasPrefix(vol, types.ContainerPrefix) {
spec := strings.Split(vol, ":")
s.DependsOn = setIfMissing(s.DependsOn, spec[0], types.ServiceDependency{
Condition: types.ServiceConditionStarted,
Restart: false,
})
}
}
err := relocateLogDriver(&s) err := relocateLogDriver(&s)
if err != nil { if err != nil {
return err return err
@ -126,9 +171,20 @@ func normalize(project *types.Project, resolvePaths bool) error {
return nil return nil
} }
// setIfMissing adds a ServiceDependency for service if not already defined
func setIfMissing(d types.DependsOnConfig, service string, dep types.ServiceDependency) types.DependsOnConfig {
if d == nil {
d = types.DependsOnConfig{}
}
if _, ok := d[service]; !ok {
d[service] = dep
}
return d
}
func relocateScale(s *types.ServiceConfig) error { func relocateScale(s *types.ServiceConfig) error {
scale := uint64(s.Scale) scale := uint64(s.Scale)
if scale != 1 { if scale > 1 {
logrus.Warn("`scale` is deprecated. Use the `deploy.replicas` element") logrus.Warn("`scale` is deprecated. Use the `deploy.replicas` element")
if s.Deploy == nil { if s.Deploy == nil {
s.Deploy = &types.DeployConfig{} s.Deploy = &types.DeployConfig{}

@ -32,6 +32,28 @@ func checkConsistency(project *types.Project) error {
return errors.Wrapf(errdefs.ErrInvalid, "service %q has neither an image nor a build context specified", s.Name) return errors.Wrapf(errdefs.ErrInvalid, "service %q has neither an image nor a build context specified", s.Name)
} }
if s.Build != nil {
if s.Build.DockerfileInline != "" && s.Build.Dockerfile != "" {
return errors.Wrapf(errdefs.ErrInvalid, "service %q declares mutualy exclusive dockerfile and dockerfile_inline", s.Name)
}
if len(s.Build.Platforms) > 0 && s.Platform != "" {
var found bool
for _, platform := range s.Build.Platforms {
if platform == s.Platform {
found = true
break
}
}
if !found {
return errors.Wrapf(errdefs.ErrInvalid, "service.build.platforms MUST include service.platform %q ", s.Platform)
}
}
}
if s.NetworkMode != "" && len(s.Networks) > 0 {
return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %s declares mutually exclusive `network_mode` and `networks`", s.Name))
}
for network := range s.Networks { for network := range s.Networks {
if _, ok := project.Networks[network]; !ok { if _, ok := project.Networks[network]; !ok {
return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q refers to undefined network %s", s.Name, network)) return errors.Wrap(errdefs.ErrInvalid, fmt.Sprintf("service %q refers to undefined network %s", s.Name, network))

@ -44,7 +44,7 @@ func isAbs(path string) (b bool) {
// volumeNameLen returns length of the leading volume name on Windows. // volumeNameLen returns length of the leading volume name on Windows.
// It returns 0 elsewhere. // It returns 0 elsewhere.
//nolint: gocyclo // nolint: gocyclo
func volumeNameLen(path string) int { func volumeNameLen(path string) int {
if len(path) < 2 { if len(path) < 2 {
return 0 return 0

@ -13,6 +13,7 @@
"name": { "name": {
"type": "string", "type": "string",
"pattern": "^[a-z0-9][a-z0-9_-]*$",
"description": "define the Compose project name, until user defines one explicitly." "description": "define the Compose project name, until user defines one explicitly."
}, },
@ -90,12 +91,14 @@
"properties": { "properties": {
"context": {"type": "string"}, "context": {"type": "string"},
"dockerfile": {"type": "string"}, "dockerfile": {"type": "string"},
"dockerfile_inline": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"}, "args": {"$ref": "#/definitions/list_or_dict"},
"ssh": {"$ref": "#/definitions/list_or_dict"}, "ssh": {"$ref": "#/definitions/list_or_dict"},
"labels": {"$ref": "#/definitions/list_or_dict"}, "labels": {"$ref": "#/definitions/list_or_dict"},
"cache_from": {"type": "array", "items": {"type": "string"}}, "cache_from": {"type": "array", "items": {"type": "string"}},
"cache_to": {"type": "array", "items": {"type": "string"}}, "cache_to": {"type": "array", "items": {"type": "string"}},
"no_cache": {"type": "boolean"}, "no_cache": {"type": "boolean"},
"additional_contexts": {"$ref": "#/definitions/list_or_dict"},
"network": {"type": "string"}, "network": {"type": "string"},
"pull": {"type": "boolean"}, "pull": {"type": "boolean"},
"target": {"type": "string"}, "target": {"type": "string"},
@ -143,12 +146,7 @@
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroup": {"type": "string", "enum": ["host", "private"]}, "cgroup": {"type": "string", "enum": ["host", "private"]},
"cgroup_parent": {"type": "string"}, "cgroup_parent": {"type": "string"},
"command": { "command": {"$ref": "#/definitions/command"},
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"configs": {"$ref": "#/definitions/service_config_or_secret"}, "configs": {"$ref": "#/definitions/service_config_or_secret"},
"container_name": {"type": "string"}, "container_name": {"type": "string"},
"cpu_count": {"type": "integer", "minimum": 0}, "cpu_count": {"type": "integer", "minimum": 0},
@ -181,6 +179,7 @@
"type": "object", "type": "object",
"additionalProperties": false, "additionalProperties": false,
"properties": { "properties": {
"restart": {"type": "boolean"},
"condition": { "condition": {
"type": "string", "type": "string",
"enum": ["service_started", "service_healthy", "service_completed_successfully"] "enum": ["service_started", "service_healthy", "service_completed_successfully"]
@ -198,12 +197,7 @@
"dns_opt": {"type": "array","items": {"type": "string"}, "uniqueItems": true}, "dns_opt": {"type": "array","items": {"type": "string"}, "uniqueItems": true},
"dns_search": {"$ref": "#/definitions/string_or_list"}, "dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"}, "domainname": {"type": "string"},
"entrypoint": { "entrypoint": {"$ref": "#/definitions/command"},
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"}, "env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"}, "environment": {"$ref": "#/definitions/list_or_dict"},
@ -734,6 +728,14 @@
"patternProperties": {"^x-": {}} "patternProperties": {"^x-": {}}
}, },
"command": {
"oneOf": [
{"type": "null"},
{"type": "string"},
{"type": "array","items": {"type": "string"}}
]
},
"string_or_list": { "string_or_list": {
"oneOf": [ "oneOf": [
{"type": "string"}, {"type": "string"},

@ -52,6 +52,7 @@ func init() {
} }
// Schema is the compose-spec JSON schema // Schema is the compose-spec JSON schema
//
//go:embed compose-spec.json //go:embed compose-spec.json
var Schema string var Schema string

@ -47,6 +47,19 @@ func (e InvalidTemplateError) Error() string {
return fmt.Sprintf("Invalid template: %#v", e.Template) return fmt.Sprintf("Invalid template: %#v", e.Template)
} }
// MissingRequiredError is returned when a variable template is missing
type MissingRequiredError struct {
Variable string
Reason string
}
func (e MissingRequiredError) Error() string {
if e.Reason != "" {
return fmt.Sprintf("required variable %s is missing a value: %s", e.Variable, e.Reason)
}
return fmt.Sprintf("required variable %s is missing a value", e.Variable)
}
// Mapping is a user-supplied function which maps from variable names to values. // Mapping is a user-supplied function which maps from variable names to values.
// Returns the value as a string and a bool indicating whether // Returns the value as a string and a bool indicating whether
// the value is present, to distinguish between an empty string // the value is present, to distinguish between an empty string
@ -351,8 +364,9 @@ func withRequired(substitution string, mapping Mapping, sep string, valid func(s
} }
value, ok := mapping(name) value, ok := mapping(name)
if !ok || !valid(value) { if !ok || !valid(value) {
return "", true, &InvalidTemplateError{ return "", true, &MissingRequiredError{
Template: fmt.Sprintf("required variable %s is missing a value: %s", name, errorMessage), Reason: errorMessage,
Variable: name,
} }
} }
return value, true, nil return value, true, nil

@ -18,6 +18,7 @@ package types
import ( import (
"bytes" "bytes"
"encoding/json"
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
@ -28,6 +29,7 @@ import (
godigest "github.com/opencontainers/go-digest" godigest "github.com/opencontainers/go-digest"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"gopkg.in/yaml.v3"
) )
// Project is the result of loading a set of compose files // Project is the result of loading a set of compose files
@ -39,16 +41,17 @@ type Project struct {
Volumes Volumes `yaml:",omitempty" json:"volumes,omitempty"` Volumes Volumes `yaml:",omitempty" json:"volumes,omitempty"`
Secrets Secrets `yaml:",omitempty" json:"secrets,omitempty"` Secrets Secrets `yaml:",omitempty" json:"secrets,omitempty"`
Configs Configs `yaml:",omitempty" json:"configs,omitempty"` Configs Configs `yaml:",omitempty" json:"configs,omitempty"`
Extensions Extensions `yaml:",inline" json:"-"` // https://github.com/golang/go/issues/6213 Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"` // https://github.com/golang/go/issues/6213
ComposeFiles []string `yaml:"-" json:"-"` ComposeFiles []string `yaml:"-" json:"-"`
Environment Mapping `yaml:"-" json:"-"` Environment Mapping `yaml:"-" json:"-"`
// DisabledServices track services which have been disable as profile is not active // DisabledServices track services which have been disable as profile is not active
DisabledServices Services `yaml:"-" json:"-"` DisabledServices Services `yaml:"-" json:"-"`
Profiles []string `yaml:"-" json:"-"`
} }
// ServiceNames return names for all services in this Compose config // ServiceNames return names for all services in this Compose config
func (p Project) ServiceNames() []string { func (p *Project) ServiceNames() []string {
var names []string var names []string
for _, s := range p.Services { for _, s := range p.Services {
names = append(names, s.Name) names = append(names, s.Name)
@ -58,7 +61,7 @@ func (p Project) ServiceNames() []string {
} }
// VolumeNames return names for all volumes in this Compose config // VolumeNames return names for all volumes in this Compose config
func (p Project) VolumeNames() []string { func (p *Project) VolumeNames() []string {
var names []string var names []string
for k := range p.Volumes { for k := range p.Volumes {
names = append(names, k) names = append(names, k)
@ -68,7 +71,7 @@ func (p Project) VolumeNames() []string {
} }
// NetworkNames return names for all volumes in this Compose config // NetworkNames return names for all volumes in this Compose config
func (p Project) NetworkNames() []string { func (p *Project) NetworkNames() []string {
var names []string var names []string
for k := range p.Networks { for k := range p.Networks {
names = append(names, k) names = append(names, k)
@ -78,7 +81,7 @@ func (p Project) NetworkNames() []string {
} }
// SecretNames return names for all secrets in this Compose config // SecretNames return names for all secrets in this Compose config
func (p Project) SecretNames() []string { func (p *Project) SecretNames() []string {
var names []string var names []string
for k := range p.Secrets { for k := range p.Secrets {
names = append(names, k) names = append(names, k)
@ -88,7 +91,7 @@ func (p Project) SecretNames() []string {
} }
// ConfigNames return names for all configs in this Compose config // ConfigNames return names for all configs in this Compose config
func (p Project) ConfigNames() []string { func (p *Project) ConfigNames() []string {
var names []string var names []string
for k := range p.Configs { for k := range p.Configs {
names = append(names, k) names = append(names, k)
@ -98,7 +101,7 @@ func (p Project) ConfigNames() []string {
} }
// GetServices retrieve services by names, or return all services if no name specified // GetServices retrieve services by names, or return all services if no name specified
func (p Project) GetServices(names ...string) (Services, error) { func (p *Project) GetServices(names ...string) (Services, error) {
if len(names) == 0 { if len(names) == 0 {
return p.Services, nil return p.Services, nil
} }
@ -119,8 +122,18 @@ func (p Project) GetServices(names ...string) (Services, error) {
return services, nil return services, nil
} }
// GetDisabledService retrieve disabled service by name
func (p Project) GetDisabledService(name string) (ServiceConfig, error) {
for _, config := range p.DisabledServices {
if config.Name == name {
return config, nil
}
}
return ServiceConfig{}, fmt.Errorf("no such service: %s", name)
}
// GetService retrieve a specific service by name // GetService retrieve a specific service by name
func (p Project) GetService(name string) (ServiceConfig, error) { func (p *Project) GetService(name string) (ServiceConfig, error) {
services, err := p.GetServices(name) services, err := p.GetServices(name)
if err != nil { if err != nil {
return ServiceConfig{}, err return ServiceConfig{}, err
@ -131,7 +144,7 @@ func (p Project) GetService(name string) (ServiceConfig, error) {
return services[0], nil return services[0], nil
} }
func (p Project) AllServices() Services { func (p *Project) AllServices() Services {
var all Services var all Services
all = append(all, p.Services...) all = append(all, p.Services...)
all = append(all, p.DisabledServices...) all = append(all, p.DisabledServices...)
@ -140,12 +153,16 @@ func (p Project) AllServices() Services {
type ServiceFunc func(service ServiceConfig) error type ServiceFunc func(service ServiceConfig) error
// WithServices run ServiceFunc on each service and dependencies in dependency order // WithServices run ServiceFunc on each service and dependencies according to DependencyPolicy
func (p Project) WithServices(names []string, fn ServiceFunc) error { func (p *Project) WithServices(names []string, fn ServiceFunc, options ...DependencyOption) error {
return p.withServices(names, fn, map[string]bool{}) if len(options) == 0 {
// backward compatibility
options = []DependencyOption{IncludeDependencies}
}
return p.withServices(names, fn, map[string]bool{}, options)
} }
func (p Project) withServices(names []string, fn ServiceFunc, seen map[string]bool) error { func (p *Project) withServices(names []string, fn ServiceFunc, seen map[string]bool, options []DependencyOption) error {
services, err := p.GetServices(names...) services, err := p.GetServices(names...)
if err != nil { if err != nil {
return err return err
@ -155,9 +172,21 @@ func (p Project) withServices(names []string, fn ServiceFunc, seen map[string]bo
continue continue
} }
seen[service.Name] = true seen[service.Name] = true
dependencies := service.GetDependencies() var dependencies []string
for _, policy := range options {
switch policy {
case IncludeDependents:
dependencies = append(dependencies, p.GetDependentsForService(service)...)
case IncludeDependencies:
dependencies = append(dependencies, service.GetDependencies()...)
case IgnoreDependencies:
// Noop
default:
return fmt.Errorf("unsupported dependency policy %d", policy)
}
}
if len(dependencies) > 0 { if len(dependencies) > 0 {
err := p.withServices(dependencies, fn, seen) err := p.withServices(dependencies, fn, seen, options)
if err != nil { if err != nil {
return err return err
} }
@ -169,6 +198,18 @@ func (p Project) withServices(names []string, fn ServiceFunc, seen map[string]bo
return nil return nil
} }
func (p *Project) GetDependentsForService(s ServiceConfig) []string {
var dependent []string
for _, service := range p.Services {
for name := range service.DependsOn {
if name == s.Name {
dependent = append(dependent, service.Name)
}
}
}
return dependent
}
// RelativePath resolve a relative path based project's working directory // RelativePath resolve a relative path based project's working directory
func (p *Project) RelativePath(path string) string { func (p *Project) RelativePath(path string) string {
if path[0] == '~' { if path[0] == '~' {
@ -219,7 +260,7 @@ func (p *Project) ApplyProfiles(profiles []string) {
} }
} }
var enabled, disabled Services var enabled, disabled Services
for _, service := range p.Services { for _, service := range p.AllServices() {
if service.HasProfile(profiles) { if service.HasProfile(profiles) {
enabled = append(enabled, service) enabled = append(enabled, service)
} else { } else {
@ -228,6 +269,41 @@ func (p *Project) ApplyProfiles(profiles []string) {
} }
p.Services = enabled p.Services = enabled
p.DisabledServices = disabled p.DisabledServices = disabled
p.Profiles = profiles
}
// EnableServices ensure services are enabled and activate profiles accordingly
func (p *Project) EnableServices(names ...string) error {
if len(names) == 0 {
return nil
}
var enabled []string
for _, name := range names {
_, err := p.GetService(name)
if err == nil {
// already enabled
continue
}
def, err := p.GetDisabledService(name)
if err != nil {
return err
}
enabled = append(enabled, def.Profiles...)
}
profiles := p.Profiles
PROFILES:
for _, profile := range enabled {
for _, p := range profiles {
if p == profile {
continue PROFILES
}
}
profiles = append(profiles, profile)
}
p.ApplyProfiles(profiles)
return p.ResolveServicesEnvironment(true)
} }
// WithoutUnnecessaryResources drops networks/volumes/secrets/configs that are not referenced by active services // WithoutUnnecessaryResources drops networks/volumes/secrets/configs that are not referenced by active services
@ -292,8 +368,16 @@ func (p *Project) WithoutUnnecessaryResources() {
p.Configs = configs p.Configs = configs
} }
// ForServices restrict the project model to a subset of services type DependencyOption int
func (p *Project) ForServices(names []string) error {
const (
IncludeDependencies = iota
IncludeDependents
IgnoreDependencies
)
// ForServices restrict the project model to selected services and dependencies
func (p *Project) ForServices(names []string, options ...DependencyOption) error {
if len(names) == 0 { if len(names) == 0 {
// All services // All services
return nil return nil
@ -303,7 +387,7 @@ func (p *Project) ForServices(names []string) error {
err := p.WithServices(names, func(service ServiceConfig) error { err := p.WithServices(names, func(service ServiceConfig) error {
set[service.Name] = struct{}{} set[service.Name] = struct{}{}
return nil return nil
}) }, options...)
if err != nil { if err != nil {
return err return err
} }
@ -357,6 +441,44 @@ func (p *Project) ResolveImages(resolver func(named reference.Named) (godigest.D
return eg.Wait() return eg.Wait()
} }
// MarshalYAML marshal Project into a yaml tree
func (p *Project) MarshalYAML() ([]byte, error) {
buf := bytes.NewBuffer([]byte{})
encoder := yaml.NewEncoder(buf)
encoder.SetIndent(2)
// encoder.CompactSeqIndent() FIXME https://github.com/go-yaml/yaml/pull/753
err := encoder.Encode(p)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// MarshalJSON makes Config implement json.Marshaler
func (p *Project) MarshalJSON() ([]byte, error) {
m := map[string]interface{}{
"name": p.Name,
"services": p.Services,
}
if len(p.Networks) > 0 {
m["networks"] = p.Networks
}
if len(p.Volumes) > 0 {
m["volumes"] = p.Volumes
}
if len(p.Secrets) > 0 {
m["secrets"] = p.Secrets
}
if len(p.Configs) > 0 {
m["configs"] = p.Configs
}
for k, v := range p.Extensions {
m[k] = v
}
return json.Marshal(m)
}
// ResolveServicesEnvironment parse env_files set for services to resolve the actual environment map for services // ResolveServicesEnvironment parse env_files set for services to resolve the actual environment map for services
func (p Project) ResolveServicesEnvironment(discardEnvFiles bool) error { func (p Project) ResolveServicesEnvironment(discardEnvFiles bool) error {
for i, service := range p.Services { for i, service := range p.Services {

@ -107,7 +107,7 @@ type ServiceConfig struct {
// Command for the service containers. // Command for the service containers.
// If set, overrides COMMAND from the image. // If set, overrides COMMAND from the image.
// //
// Set to `[]` or `''` to clear the command from the image. // Set to `[]` or an empty string to clear the command from the image.
Command ShellCommand `yaml:",omitempty" json:"command"` // NOTE: we can NOT omitempty for JSON! see ShellCommand type for details. Command ShellCommand `yaml:",omitempty" json:"command"` // NOTE: we can NOT omitempty for JSON! see ShellCommand type for details.
Configs []ServiceConfigObjConfig `yaml:",omitempty" json:"configs,omitempty"` Configs []ServiceConfigObjConfig `yaml:",omitempty" json:"configs,omitempty"`
@ -126,13 +126,13 @@ type ServiceConfig struct {
// Entrypoint for the service containers. // Entrypoint for the service containers.
// If set, overrides ENTRYPOINT from the image. // If set, overrides ENTRYPOINT from the image.
// //
// Set to `[]` or `''` to clear the entrypoint from the image. // Set to `[]` or an empty string to clear the entrypoint from the image.
Entrypoint ShellCommand `yaml:"entrypoint,omitempty" json:"entrypoint"` // NOTE: we can NOT omitempty for JSON! see ShellCommand type for details. Entrypoint ShellCommand `yaml:"entrypoint,omitempty" json:"entrypoint"` // NOTE: we can NOT omitempty for JSON! see ShellCommand type for details.
Environment MappingWithEquals `yaml:",omitempty" json:"environment,omitempty"` Environment MappingWithEquals `yaml:",omitempty" json:"environment,omitempty"`
EnvFile StringList `mapstructure:"env_file" yaml:"env_file,omitempty" json:"env_file,omitempty"` EnvFile StringList `mapstructure:"env_file" yaml:"env_file,omitempty" json:"env_file,omitempty"`
Expose StringOrNumberList `yaml:",omitempty" json:"expose,omitempty"` Expose StringOrNumberList `yaml:",omitempty" json:"expose,omitempty"`
Extends ExtendsConfig `yaml:"extends,omitempty" json:"extends,omitempty"` Extends *ExtendsConfig `yaml:"extends,omitempty" json:"extends,omitempty"`
ExternalLinks []string `mapstructure:"external_links" yaml:"external_links,omitempty" json:"external_links,omitempty"` ExternalLinks []string `mapstructure:"external_links" yaml:"external_links,omitempty" json:"external_links,omitempty"`
ExtraHosts HostsList `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` ExtraHosts HostsList `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"`
GroupAdd []string `mapstructure:"group_add" yaml:"group_add,omitempty" json:"group_add,omitempty"` GroupAdd []string `mapstructure:"group_add" yaml:"group_add,omitempty" json:"group_add,omitempty"`
@ -186,7 +186,7 @@ type ServiceConfig struct {
VolumesFrom []string `mapstructure:"volumes_from" yaml:"volumes_from,omitempty" json:"volumes_from,omitempty"` VolumesFrom []string `mapstructure:"volumes_from" yaml:"volumes_from,omitempty" json:"volumes_from,omitempty"`
WorkingDir string `mapstructure:"working_dir" yaml:"working_dir,omitempty" json:"working_dir,omitempty"` WorkingDir string `mapstructure:"working_dir" yaml:"working_dir,omitempty" json:"working_dir,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// NetworksByPriority return the service networks IDs sorted according to Priority // NetworksByPriority return the service networks IDs sorted according to Priority
@ -254,37 +254,26 @@ const (
NetworkModeContainerPrefix = ContainerPrefix NetworkModeContainerPrefix = ContainerPrefix
) )
// GetDependencies retrieve all services this service depends on // GetDependencies retrieves all services this service depends on
func (s ServiceConfig) GetDependencies() []string { func (s ServiceConfig) GetDependencies() []string {
dependencies := make(set) var dependencies []string
for dependency := range s.DependsOn { for service := range s.DependsOn {
dependencies.append(dependency) dependencies = append(dependencies, service)
}
for _, link := range s.Links {
parts := strings.Split(link, ":")
if len(parts) == 2 {
dependencies.append(parts[0])
} else {
dependencies.append(link)
}
}
if strings.HasPrefix(s.NetworkMode, ServicePrefix) {
dependencies.append(s.NetworkMode[len(ServicePrefix):])
}
if strings.HasPrefix(s.Ipc, ServicePrefix) {
dependencies.append(s.Ipc[len(ServicePrefix):])
}
if strings.HasPrefix(s.Pid, ServicePrefix) {
dependencies.append(s.Pid[len(ServicePrefix):])
} }
for _, vol := range s.VolumesFrom { return dependencies
if !strings.HasPrefix(s.Pid, ContainerPrefix) { }
spec := strings.Split(vol, ":")
dependencies.append(spec[0]) // GetDependents retrieves all services which depend on this service
func (s ServiceConfig) GetDependents(p *Project) []string {
var dependent []string
for _, service := range p.Services {
for name := range service.DependsOn {
if name == s.Name {
dependent = append(dependent, service.Name)
}
} }
} }
return dependent
return dependencies.toSlice()
} }
type set map[string]struct{} type set map[string]struct{}
@ -305,25 +294,27 @@ func (s set) toSlice() []string {
// BuildConfig is a type for build // BuildConfig is a type for build
type BuildConfig struct { type BuildConfig struct {
Context string `yaml:",omitempty" json:"context,omitempty"` Context string `yaml:",omitempty" json:"context,omitempty"`
Dockerfile string `yaml:",omitempty" json:"dockerfile,omitempty"` Dockerfile string `yaml:",omitempty" json:"dockerfile,omitempty"`
Args MappingWithEquals `yaml:",omitempty" json:"args,omitempty"` DockerfileInline string `mapstructure:"dockerfile_inline,omitempty" yaml:"dockerfile_inline,omitempty" json:"dockerfile_inline,omitempty"`
SSH SSHConfig `yaml:"ssh,omitempty" json:"ssh,omitempty"` Args MappingWithEquals `yaml:",omitempty" json:"args,omitempty"`
Labels Labels `yaml:",omitempty" json:"labels,omitempty"` SSH SSHConfig `yaml:"ssh,omitempty" json:"ssh,omitempty"`
CacheFrom StringList `mapstructure:"cache_from" yaml:"cache_from,omitempty" json:"cache_from,omitempty"` Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
CacheTo StringList `mapstructure:"cache_to" yaml:"cache_to,omitempty" json:"cache_to,omitempty"` CacheFrom StringList `mapstructure:"cache_from" yaml:"cache_from,omitempty" json:"cache_from,omitempty"`
NoCache bool `mapstructure:"no_cache" yaml:"no_cache,omitempty" json:"no_cache,omitempty"` CacheTo StringList `mapstructure:"cache_to" yaml:"cache_to,omitempty" json:"cache_to,omitempty"`
Pull bool `mapstructure:"pull" yaml:"pull,omitempty" json:"pull,omitempty"` NoCache bool `mapstructure:"no_cache" yaml:"no_cache,omitempty" json:"no_cache,omitempty"`
ExtraHosts HostsList `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` AdditionalContexts Mapping `mapstructure:"additional_contexts" yaml:"additional_contexts,omitempty" json:"additional_contexts,omitempty"`
Isolation string `yaml:",omitempty" json:"isolation,omitempty"` Pull bool `mapstructure:"pull" yaml:"pull,omitempty" json:"pull,omitempty"`
Network string `yaml:",omitempty" json:"network,omitempty"` ExtraHosts HostsList `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"`
Target string `yaml:",omitempty" json:"target,omitempty"` Isolation string `yaml:",omitempty" json:"isolation,omitempty"`
Secrets []ServiceSecretConfig `yaml:",omitempty" json:"secrets,omitempty"` Network string `yaml:",omitempty" json:"network,omitempty"`
Tags StringList `mapstructure:"tags" yaml:"tags,omitempty" json:"tags,omitempty"` Target string `yaml:",omitempty" json:"target,omitempty"`
Platforms StringList `mapstructure:"platforms" yaml:"platforms,omitempty" json:"platforms,omitempty"` Secrets []ServiceSecretConfig `yaml:",omitempty" json:"secrets,omitempty"`
Privileged bool `yaml:",omitempty" json:"privileged,omitempty"` Tags StringList `mapstructure:"tags" yaml:"tags,omitempty" json:"tags,omitempty"`
Platforms StringList `mapstructure:"platforms" yaml:"platforms,omitempty" json:"platforms,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Privileged bool `yaml:",omitempty" json:"privileged,omitempty"`
Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// BlkioConfig define blkio config // BlkioConfig define blkio config
@ -335,7 +326,7 @@ type BlkioConfig struct {
DeviceWriteBps []ThrottleDevice `mapstructure:"device_write_bps" yaml:",omitempty" json:"device_write_bps,omitempty"` DeviceWriteBps []ThrottleDevice `mapstructure:"device_write_bps" yaml:",omitempty" json:"device_write_bps,omitempty"`
DeviceWriteIOps []ThrottleDevice `mapstructure:"device_write_iops" yaml:",omitempty" json:"device_write_iops,omitempty"` DeviceWriteIOps []ThrottleDevice `mapstructure:"device_write_iops" yaml:",omitempty" json:"device_write_iops,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// WeightDevice is a structure that holds device:weight pair // WeightDevice is a structure that holds device:weight pair
@ -343,34 +334,34 @@ type WeightDevice struct {
Path string Path string
Weight uint16 Weight uint16
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// ThrottleDevice is a structure that holds device:rate_per_second pair // ThrottleDevice is a structure that holds device:rate_per_second pair
type ThrottleDevice struct { type ThrottleDevice struct {
Path string Path string
Rate uint64 Rate UnitBytes
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// ShellCommand is a string or list of string args. // ShellCommand is a string or list of string args.
// //
// When marshaled to YAML, nil command fields will be omitted if `omitempty` // When marshaled to YAML, nil command fields will be omitted if `omitempty`
// is specified as a struct tag. Explicitly empty commands (i.e. `[]` or `''`) // is specified as a struct tag. Explicitly empty commands (i.e. `[]` or
// will serialize to an empty array (`[]`). // empty string will serialize to an empty array (`[]`).
// //
// When marshaled to JSON, the `omitempty` struct must NOT be specified. // When marshaled to JSON, the `omitempty` struct must NOT be specified.
// If the command field is nil, it will be serialized as `null`. // If the command field is nil, it will be serialized as `null`.
// Explicitly empty commands (i.e. `[]` or `''`) will serialize to an empty // Explicitly empty commands (i.e. `[]` or empty string) will serialize to
// array (`[]`). // an empty array (`[]`).
// //
// The distinction between nil and explicitly empty is important to distinguish // The distinction between nil and explicitly empty is important to distinguish
// between an unset value and a provided, but empty, value, which should be // between an unset value and a provided, but empty, value, which should be
// preserved so that it can override any base value (e.g. container entrypoint). // preserved so that it can override any base value (e.g. container entrypoint).
// //
// The different semantics between YAML and JSON are due to limitations with // The different semantics between YAML and JSON are due to limitations with
// JSON marshaling + `omitempty` in the Go stdlib, while gopkg.in/yaml.v2 gives // JSON marshaling + `omitempty` in the Go stdlib, while gopkg.in/yaml.v3 gives
// us more flexibility via the yaml.IsZeroer interface. // us more flexibility via the yaml.IsZeroer interface.
// //
// In the future, it might make sense to make fields of this type be // In the future, it might make sense to make fields of this type be
@ -394,7 +385,7 @@ func (s ShellCommand) IsZero() bool {
// accurately if the `omitempty` struct tag is omitted/forgotten. // accurately if the `omitempty` struct tag is omitted/forgotten.
// //
// A similar MarshalJSON() implementation is not needed because the Go stdlib // A similar MarshalJSON() implementation is not needed because the Go stdlib
// already serializes nil slices to `null`, whereas gopkg.in/yaml.v2 by default // already serializes nil slices to `null`, whereas gopkg.in/yaml.v3 by default
// serializes nil slices to `[]`. // serializes nil slices to `[]`.
func (s ShellCommand) MarshalYAML() (interface{}, error) { func (s ShellCommand) MarshalYAML() (interface{}, error) {
if s == nil { if s == nil {
@ -574,7 +565,7 @@ type LoggingConfig struct {
Driver string `yaml:",omitempty" json:"driver,omitempty"` Driver string `yaml:",omitempty" json:"driver,omitempty"`
Options map[string]string `yaml:",omitempty" json:"options,omitempty"` Options map[string]string `yaml:",omitempty" json:"options,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// DeployConfig the deployment configuration for a service // DeployConfig the deployment configuration for a service
@ -589,7 +580,7 @@ type DeployConfig struct {
Placement Placement `yaml:",omitempty" json:"placement,omitempty"` Placement Placement `yaml:",omitempty" json:"placement,omitempty"`
EndpointMode string `mapstructure:"endpoint_mode" yaml:"endpoint_mode,omitempty" json:"endpoint_mode,omitempty"` EndpointMode string `mapstructure:"endpoint_mode" yaml:"endpoint_mode,omitempty" json:"endpoint_mode,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// HealthCheckConfig the healthcheck configuration for a service // HealthCheckConfig the healthcheck configuration for a service
@ -601,7 +592,7 @@ type HealthCheckConfig struct {
StartPeriod *Duration `mapstructure:"start_period" yaml:"start_period,omitempty" json:"start_period,omitempty"` StartPeriod *Duration `mapstructure:"start_period" yaml:"start_period,omitempty" json:"start_period,omitempty"`
Disable bool `yaml:",omitempty" json:"disable,omitempty"` Disable bool `yaml:",omitempty" json:"disable,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// HealthCheckTest is the command run to test the health of a service // HealthCheckTest is the command run to test the health of a service
@ -616,7 +607,7 @@ type UpdateConfig struct {
MaxFailureRatio float32 `mapstructure:"max_failure_ratio" yaml:"max_failure_ratio,omitempty" json:"max_failure_ratio,omitempty"` MaxFailureRatio float32 `mapstructure:"max_failure_ratio" yaml:"max_failure_ratio,omitempty" json:"max_failure_ratio,omitempty"`
Order string `yaml:",omitempty" json:"order,omitempty"` Order string `yaml:",omitempty" json:"order,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// Resources the resource limits and reservations // Resources the resource limits and reservations
@ -624,7 +615,7 @@ type Resources struct {
Limits *Resource `yaml:",omitempty" json:"limits,omitempty"` Limits *Resource `yaml:",omitempty" json:"limits,omitempty"`
Reservations *Resource `yaml:",omitempty" json:"reservations,omitempty"` Reservations *Resource `yaml:",omitempty" json:"reservations,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// Resource is a resource to be limited or reserved // Resource is a resource to be limited or reserved
@ -636,7 +627,7 @@ type Resource struct {
Devices []DeviceRequest `mapstructure:"devices" yaml:"devices,omitempty" json:"devices,omitempty"` Devices []DeviceRequest `mapstructure:"devices" yaml:"devices,omitempty" json:"devices,omitempty"`
GenericResources []GenericResource `mapstructure:"generic_resources" yaml:"generic_resources,omitempty" json:"generic_resources,omitempty"` GenericResources []GenericResource `mapstructure:"generic_resources" yaml:"generic_resources,omitempty" json:"generic_resources,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
type DeviceRequest struct { type DeviceRequest struct {
@ -651,7 +642,7 @@ type DeviceRequest struct {
type GenericResource struct { type GenericResource struct {
DiscreteResourceSpec *DiscreteGenericResource `mapstructure:"discrete_resource_spec" yaml:"discrete_resource_spec,omitempty" json:"discrete_resource_spec,omitempty"` DiscreteResourceSpec *DiscreteGenericResource `mapstructure:"discrete_resource_spec" yaml:"discrete_resource_spec,omitempty" json:"discrete_resource_spec,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// DiscreteGenericResource represents a "user defined" resource which is defined // DiscreteGenericResource represents a "user defined" resource which is defined
@ -662,7 +653,7 @@ type DiscreteGenericResource struct {
Kind string `json:"kind"` Kind string `json:"kind"`
Value int64 `json:"value"` Value int64 `json:"value"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// UnitBytes is the bytes type // UnitBytes is the bytes type
@ -685,7 +676,7 @@ type RestartPolicy struct {
MaxAttempts *uint64 `mapstructure:"max_attempts" yaml:"max_attempts,omitempty" json:"max_attempts,omitempty"` MaxAttempts *uint64 `mapstructure:"max_attempts" yaml:"max_attempts,omitempty" json:"max_attempts,omitempty"`
Window *Duration `yaml:",omitempty" json:"window,omitempty"` Window *Duration `yaml:",omitempty" json:"window,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// Placement constraints for the service // Placement constraints for the service
@ -694,14 +685,14 @@ type Placement struct {
Preferences []PlacementPreferences `yaml:",omitempty" json:"preferences,omitempty"` Preferences []PlacementPreferences `yaml:",omitempty" json:"preferences,omitempty"`
MaxReplicas uint64 `mapstructure:"max_replicas_per_node" yaml:"max_replicas_per_node,omitempty" json:"max_replicas_per_node,omitempty"` MaxReplicas uint64 `mapstructure:"max_replicas_per_node" yaml:"max_replicas_per_node,omitempty" json:"max_replicas_per_node,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// PlacementPreferences is the preferences for a service placement // PlacementPreferences is the preferences for a service placement
type PlacementPreferences struct { type PlacementPreferences struct {
Spread string `yaml:",omitempty" json:"spread,omitempty"` Spread string `yaml:",omitempty" json:"spread,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// ServiceNetworkConfig is the network configuration for a service // ServiceNetworkConfig is the network configuration for a service
@ -712,7 +703,7 @@ type ServiceNetworkConfig struct {
Ipv6Address string `mapstructure:"ipv6_address" yaml:"ipv6_address,omitempty" json:"ipv6_address,omitempty"` Ipv6Address string `mapstructure:"ipv6_address" yaml:"ipv6_address,omitempty" json:"ipv6_address,omitempty"`
LinkLocalIPs []string `mapstructure:"link_local_ips" yaml:"link_local_ips,omitempty" json:"link_local_ips,omitempty"` LinkLocalIPs []string `mapstructure:"link_local_ips" yaml:"link_local_ips,omitempty" json:"link_local_ips,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// ServicePortConfig is the port configuration for a service // ServicePortConfig is the port configuration for a service
@ -723,7 +714,7 @@ type ServicePortConfig struct {
Published string `yaml:",omitempty" json:"published,omitempty"` Published string `yaml:",omitempty" json:"published,omitempty"`
Protocol string `yaml:",omitempty" json:"protocol,omitempty"` Protocol string `yaml:",omitempty" json:"protocol,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// ParsePortConfig parse short syntax for service port configuration // ParsePortConfig parse short syntax for service port configuration
@ -776,7 +767,7 @@ type ServiceVolumeConfig struct {
Volume *ServiceVolumeVolume `yaml:",omitempty" json:"volume,omitempty"` Volume *ServiceVolumeVolume `yaml:",omitempty" json:"volume,omitempty"`
Tmpfs *ServiceVolumeTmpfs `yaml:",omitempty" json:"tmpfs,omitempty"` Tmpfs *ServiceVolumeTmpfs `yaml:",omitempty" json:"tmpfs,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// String render ServiceVolumeConfig as a volume string, one can parse back using loader.ParseVolume // String render ServiceVolumeConfig as a volume string, one can parse back using loader.ParseVolume
@ -820,7 +811,7 @@ type ServiceVolumeBind struct {
Propagation string `yaml:",omitempty" json:"propagation,omitempty"` Propagation string `yaml:",omitempty" json:"propagation,omitempty"`
CreateHostPath bool `mapstructure:"create_host_path" yaml:"create_host_path,omitempty" json:"create_host_path,omitempty"` CreateHostPath bool `mapstructure:"create_host_path" yaml:"create_host_path,omitempty" json:"create_host_path,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// SELinux represents the SELinux re-labeling options. // SELinux represents the SELinux re-labeling options.
@ -851,7 +842,7 @@ const (
type ServiceVolumeVolume struct { type ServiceVolumeVolume struct {
NoCopy bool `mapstructure:"nocopy" yaml:"nocopy,omitempty" json:"nocopy,omitempty"` NoCopy bool `mapstructure:"nocopy" yaml:"nocopy,omitempty" json:"nocopy,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// ServiceVolumeTmpfs are options for a service volume of type tmpfs // ServiceVolumeTmpfs are options for a service volume of type tmpfs
@ -860,7 +851,7 @@ type ServiceVolumeTmpfs struct {
Mode uint32 `yaml:",omitempty" json:"mode,omitempty"` Mode uint32 `yaml:",omitempty" json:"mode,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// FileReferenceConfig for a reference to a swarm file object // FileReferenceConfig for a reference to a swarm file object
@ -871,7 +862,7 @@ type FileReferenceConfig struct {
GID string `yaml:",omitempty" json:"gid,omitempty"` GID string `yaml:",omitempty" json:"gid,omitempty"`
Mode *uint32 `yaml:",omitempty" json:"mode,omitempty"` Mode *uint32 `yaml:",omitempty" json:"mode,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// ServiceConfigObjConfig is the config obj configuration for a service // ServiceConfigObjConfig is the config obj configuration for a service
@ -886,7 +877,7 @@ type UlimitsConfig struct {
Soft int `yaml:",omitempty" json:"soft,omitempty"` Soft int `yaml:",omitempty" json:"soft,omitempty"`
Hard int `yaml:",omitempty" json:"hard,omitempty"` Hard int `yaml:",omitempty" json:"hard,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// MarshalYAML makes UlimitsConfig implement yaml.Marshaller // MarshalYAML makes UlimitsConfig implement yaml.Marshaller
@ -894,7 +885,13 @@ func (u *UlimitsConfig) MarshalYAML() (interface{}, error) {
if u.Single != 0 { if u.Single != 0 {
return u.Single, nil return u.Single, nil
} }
return u, nil return struct {
Soft int
Hard int
}{
Soft: u.Soft,
Hard: u.Hard,
}, nil
} }
// MarshalJSON makes UlimitsConfig implement json.Marshaller // MarshalJSON makes UlimitsConfig implement json.Marshaller
@ -908,23 +905,23 @@ func (u *UlimitsConfig) MarshalJSON() ([]byte, error) {
// NetworkConfig for a network // NetworkConfig for a network
type NetworkConfig struct { type NetworkConfig struct {
Name string `yaml:",omitempty" json:"name,omitempty"` Name string `yaml:",omitempty" json:"name,omitempty"`
Driver string `yaml:",omitempty" json:"driver,omitempty"` Driver string `yaml:",omitempty" json:"driver,omitempty"`
DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"`
Ipam IPAMConfig `yaml:",omitempty" json:"ipam,omitempty"` Ipam IPAMConfig `yaml:",omitempty" json:"ipam,omitempty"`
External External `yaml:",omitempty" json:"external,omitempty"` External External `yaml:",omitempty" json:"external,omitempty"`
Internal bool `yaml:",omitempty" json:"internal,omitempty"` Internal bool `yaml:",omitempty" json:"internal,omitempty"`
Attachable bool `yaml:",omitempty" json:"attachable,omitempty"` Attachable bool `yaml:",omitempty" json:"attachable,omitempty"`
Labels Labels `yaml:",omitempty" json:"labels,omitempty"` Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
EnableIPv6 bool `mapstructure:"enable_ipv6" yaml:"enable_ipv6,omitempty" json:"enable_ipv6,omitempty"` EnableIPv6 bool `mapstructure:"enable_ipv6" yaml:"enable_ipv6,omitempty" json:"enable_ipv6,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// IPAMConfig for a network // IPAMConfig for a network
type IPAMConfig struct { type IPAMConfig struct {
Driver string `yaml:",omitempty" json:"driver,omitempty"` Driver string `yaml:",omitempty" json:"driver,omitempty"`
Config []*IPAMPool `yaml:",omitempty" json:"config,omitempty"` Config []*IPAMPool `yaml:",omitempty" json:"config,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// IPAMPool for a network // IPAMPool for a network
@ -938,21 +935,21 @@ type IPAMPool struct {
// VolumeConfig for a volume // VolumeConfig for a volume
type VolumeConfig struct { type VolumeConfig struct {
Name string `yaml:",omitempty" json:"name,omitempty"` Name string `yaml:",omitempty" json:"name,omitempty"`
Driver string `yaml:",omitempty" json:"driver,omitempty"` Driver string `yaml:",omitempty" json:"driver,omitempty"`
DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"`
External External `yaml:",omitempty" json:"external,omitempty"` External External `yaml:",omitempty" json:"external,omitempty"`
Labels Labels `yaml:",omitempty" json:"labels,omitempty"` Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// External identifies a Volume or Network as a reference to a resource that is // External identifies a Volume or Network as a reference to a resource that is
// not managed, and should already exist. // not managed, and should already exist.
// External.name is deprecated and replaced by Volume.name // External.name is deprecated and replaced by Volume.name
type External struct { type External struct {
Name string `yaml:",omitempty" json:"name,omitempty"` Name string `yaml:",omitempty" json:"name,omitempty"`
External bool `yaml:",omitempty" json:"external,omitempty"` External bool `yaml:",omitempty" json:"external,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// MarshalYAML makes External implement yaml.Marshaller // MarshalYAML makes External implement yaml.Marshaller
@ -973,23 +970,23 @@ func (e External) MarshalJSON() ([]byte, error) {
// CredentialSpecConfig for credential spec on Windows // CredentialSpecConfig for credential spec on Windows
type CredentialSpecConfig struct { type CredentialSpecConfig struct {
Config string `yaml:",omitempty" json:"config,omitempty"` // Config was added in API v1.40 Config string `yaml:",omitempty" json:"config,omitempty"` // Config was added in API v1.40
File string `yaml:",omitempty" json:"file,omitempty"` File string `yaml:",omitempty" json:"file,omitempty"`
Registry string `yaml:",omitempty" json:"registry,omitempty"` Registry string `yaml:",omitempty" json:"registry,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
// FileObjectConfig is a config type for a file used by a service // FileObjectConfig is a config type for a file used by a service
type FileObjectConfig struct { type FileObjectConfig struct {
Name string `yaml:",omitempty" json:"name,omitempty"` Name string `yaml:",omitempty" json:"name,omitempty"`
File string `yaml:",omitempty" json:"file,omitempty"` File string `yaml:",omitempty" json:"file,omitempty"`
Environment string `yaml:",omitempty" json:"environment,omitempty"` Environment string `yaml:",omitempty" json:"environment,omitempty"`
External External `yaml:",omitempty" json:"external,omitempty"` External External `yaml:",omitempty" json:"external,omitempty"`
Labels Labels `yaml:",omitempty" json:"labels,omitempty"` Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
Driver string `yaml:",omitempty" json:"driver,omitempty"` Driver string `yaml:",omitempty" json:"driver,omitempty"`
DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"`
TemplateDriver string `mapstructure:"template_driver" yaml:"template_driver,omitempty" json:"template_driver,omitempty"` TemplateDriver string `mapstructure:"template_driver" yaml:"template_driver,omitempty" json:"template_driver,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
const ( const (
@ -1006,11 +1003,15 @@ const (
type DependsOnConfig map[string]ServiceDependency type DependsOnConfig map[string]ServiceDependency
type ServiceDependency struct { type ServiceDependency struct {
Condition string `yaml:",omitempty" json:"condition,omitempty"` Condition string `yaml:",omitempty" json:"condition,omitempty"`
Extensions map[string]interface{} `yaml:",inline" json:"-"` Restart bool `yaml:",omitempty" json:"restart,omitempty"`
Extensions Extensions `mapstructure:"#extensions" yaml:",inline" json:"-"`
} }
type ExtendsConfig MappingWithEquals type ExtendsConfig struct {
File string `yaml:",omitempty" json:"file,omitempty"`
Service string `yaml:",omitempty" json:"service,omitempty"`
}
// SecretConfig for a secret // SecretConfig for a secret
type SecretConfig FileObjectConfig type SecretConfig FileObjectConfig

@ -1,247 +0,0 @@
package digestset
import (
"errors"
"sort"
"strings"
"sync"
digest "github.com/opencontainers/go-digest"
)
var (
// ErrDigestNotFound is used when a matching digest
// could not be found in a set.
ErrDigestNotFound = errors.New("digest not found")
// ErrDigestAmbiguous is used when multiple digests
// are found in a set. None of the matching digests
// should be considered valid matches.
ErrDigestAmbiguous = errors.New("ambiguous digest string")
)
// Set is used to hold a unique set of digests which
// may be easily referenced by easily referenced by a string
// representation of the digest as well as short representation.
// The uniqueness of the short representation is based on other
// digests in the set. If digests are omitted from this set,
// collisions in a larger set may not be detected, therefore it
// is important to always do short representation lookups on
// the complete set of digests. To mitigate collisions, an
// appropriately long short code should be used.
type Set struct {
mutex sync.RWMutex
entries digestEntries
}
// NewSet creates an empty set of digests
// which may have digests added.
func NewSet() *Set {
return &Set{
entries: digestEntries{},
}
}
// checkShortMatch checks whether two digests match as either whole
// values or short values. This function does not test equality,
// rather whether the second value could match against the first
// value.
func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
if len(hex) == len(shortHex) {
if hex != shortHex {
return false
}
if len(shortAlg) > 0 && string(alg) != shortAlg {
return false
}
} else if !strings.HasPrefix(hex, shortHex) {
return false
} else if len(shortAlg) > 0 && string(alg) != shortAlg {
return false
}
return true
}
// Lookup looks for a digest matching the given string representation.
// If no digests could be found ErrDigestNotFound will be returned
// with an empty digest value. If multiple matches are found
// ErrDigestAmbiguous will be returned with an empty digest value.
func (dst *Set) Lookup(d string) (digest.Digest, error) {
dst.mutex.RLock()
defer dst.mutex.RUnlock()
if len(dst.entries) == 0 {
return "", ErrDigestNotFound
}
var (
searchFunc func(int) bool
alg digest.Algorithm
hex string
)
dgst, err := digest.Parse(d)
if err == digest.ErrDigestInvalidFormat {
hex = d
searchFunc = func(i int) bool {
return dst.entries[i].val >= d
}
} else {
hex = dgst.Hex()
alg = dgst.Algorithm()
searchFunc = func(i int) bool {
if dst.entries[i].val == hex {
return dst.entries[i].alg >= alg
}
return dst.entries[i].val >= hex
}
}
idx := sort.Search(len(dst.entries), searchFunc)
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
return "", ErrDigestNotFound
}
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
return dst.entries[idx].digest, nil
}
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
return "", ErrDigestAmbiguous
}
return dst.entries[idx].digest, nil
}
// Add adds the given digest to the set. An error will be returned
// if the given digest is invalid. If the digest already exists in the
// set, this operation will be a no-op.
func (dst *Set) Add(d digest.Digest) error {
if err := d.Validate(); err != nil {
return err
}
dst.mutex.Lock()
defer dst.mutex.Unlock()
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
searchFunc := func(i int) bool {
if dst.entries[i].val == entry.val {
return dst.entries[i].alg >= entry.alg
}
return dst.entries[i].val >= entry.val
}
idx := sort.Search(len(dst.entries), searchFunc)
if idx == len(dst.entries) {
dst.entries = append(dst.entries, entry)
return nil
} else if dst.entries[idx].digest == d {
return nil
}
entries := append(dst.entries, nil)
copy(entries[idx+1:], entries[idx:len(entries)-1])
entries[idx] = entry
dst.entries = entries
return nil
}
// Remove removes the given digest from the set. An err will be
// returned if the given digest is invalid. If the digest does
// not exist in the set, this operation will be a no-op.
func (dst *Set) Remove(d digest.Digest) error {
if err := d.Validate(); err != nil {
return err
}
dst.mutex.Lock()
defer dst.mutex.Unlock()
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
searchFunc := func(i int) bool {
if dst.entries[i].val == entry.val {
return dst.entries[i].alg >= entry.alg
}
return dst.entries[i].val >= entry.val
}
idx := sort.Search(len(dst.entries), searchFunc)
// Not found if idx is after or value at idx is not digest
if idx == len(dst.entries) || dst.entries[idx].digest != d {
return nil
}
entries := dst.entries
copy(entries[idx:], entries[idx+1:])
entries = entries[:len(entries)-1]
dst.entries = entries
return nil
}
// All returns all the digests in the set
func (dst *Set) All() []digest.Digest {
dst.mutex.RLock()
defer dst.mutex.RUnlock()
retValues := make([]digest.Digest, len(dst.entries))
for i := range dst.entries {
retValues[i] = dst.entries[i].digest
}
return retValues
}
// ShortCodeTable returns a map of Digest to unique short codes. The
// length represents the minimum value, the maximum length may be the
// entire value of digest if uniqueness cannot be achieved without the
// full value. This function will attempt to make short codes as short
// as possible to be unique.
func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
dst.mutex.RLock()
defer dst.mutex.RUnlock()
m := make(map[digest.Digest]string, len(dst.entries))
l := length
resetIdx := 0
for i := 0; i < len(dst.entries); i++ {
var short string
extended := true
for extended {
extended = false
if len(dst.entries[i].val) <= l {
short = dst.entries[i].digest.String()
} else {
short = dst.entries[i].val[:l]
for j := i + 1; j < len(dst.entries); j++ {
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
if j > resetIdx {
resetIdx = j
}
extended = true
} else {
break
}
}
if extended {
l++
}
}
}
m[dst.entries[i].digest] = short
if i >= resetIdx {
l = length
}
}
return m
}
type digestEntry struct {
alg digest.Algorithm
val string
digest digest.Digest
}
type digestEntries []*digestEntry
func (d digestEntries) Len() int {
return len(d)
}
func (d digestEntries) Less(i, j int) bool {
if d[i].val != d[j].val {
return d[i].val < d[j].val
}
return d[i].alg < d[j].alg
}
func (d digestEntries) Swap(i, j int) {
d[i], d[j] = d[j], d[i]
}

@ -1,12 +0,0 @@
// +build gofuzz
package reference
// fuzzParseNormalizedNamed implements a fuzzer
// that targets ParseNormalizedNamed
// Export before building the fuzzer.
// nolint:deadcode
func fuzzParseNormalizedNamed(data []byte) int {
_, _ = ParseNormalizedNamed(string(data))
return 1
}

@ -32,7 +32,7 @@ func FamiliarString(ref Reference) string {
} }
// FamiliarMatch reports whether ref matches the specified pattern. // FamiliarMatch reports whether ref matches the specified pattern.
// See https://godoc.org/path#Match for supported patterns. // See [path.Match] for supported patterns.
func FamiliarMatch(pattern string, ref Reference) (bool, error) { func FamiliarMatch(pattern string, ref Reference) (bool, error) {
matched, err := path.Match(pattern, FamiliarString(ref)) matched, err := path.Match(pattern, FamiliarString(ref))
if namedRef, isNamed := ref.(Named); isNamed && !matched { if namedRef, isNamed := ref.(Named); isNamed && !matched {

@ -4,15 +4,39 @@ import (
"fmt" "fmt"
"strings" "strings"
"github.com/distribution/distribution/v3/digestset"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
) )
var ( const (
// legacyDefaultDomain is the legacy domain for Docker Hub (which was
// originally named "the Docker Index"). This domain is still used for
// authentication and image search, which were part of the "v1" Docker
// registry specification.
//
// This domain will continue to be supported, but there are plans to consolidate
// legacy domains to new "canonical" domains. Once those domains are decided
// on, we must update the normalization functions, but preserve compatibility
// with existing installs, clients, and user configuration.
legacyDefaultDomain = "index.docker.io" legacyDefaultDomain = "index.docker.io"
defaultDomain = "docker.io"
officialRepoName = "library" // defaultDomain is the default domain used for images on Docker Hub.
defaultTag = "latest" // It is used to normalize "familiar" names to canonical names, for example,
// to convert "ubuntu" to "docker.io/library/ubuntu:latest".
//
// Note that actual domain of Docker Hub's registry is registry-1.docker.io.
// This domain will continue to be supported, but there are plans to consolidate
// legacy domains to new "canonical" domains. Once those domains are decided
// on, we must update the normalization functions, but preserve compatibility
// with existing installs, clients, and user configuration.
defaultDomain = "docker.io"
// officialRepoPrefix is the namespace used for official images on Docker Hub.
// It is used to normalize "familiar" names to canonical names, for example,
// to convert "ubuntu" to "docker.io/library/ubuntu:latest".
officialRepoPrefix = "library/"
// defaultTag is the default tag if no tag is provided.
defaultTag = "latest"
) )
// normalizedNamed represents a name which has been // normalizedNamed represents a name which has been
@ -34,14 +58,14 @@ func ParseNormalizedNamed(s string) (Named, error) {
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
} }
domain, remainder := splitDockerDomain(s) domain, remainder := splitDockerDomain(s)
var remoteName string var remote string
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
remoteName = remainder[:tagSep] remote = remainder[:tagSep]
} else { } else {
remoteName = remainder remote = remainder
} }
if strings.ToLower(remoteName) != remoteName { if strings.ToLower(remote) != remote {
return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remoteName) return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remote)
} }
ref, err := Parse(domain + "/" + remainder) ref, err := Parse(domain + "/" + remainder)
@ -55,41 +79,53 @@ func ParseNormalizedNamed(s string) (Named, error) {
return named, nil return named, nil
} }
// ParseDockerRef normalizes the image reference following the docker convention. This is added // namedTaggedDigested is a reference that has both a tag and a digest.
// mainly for backward compatibility. type namedTaggedDigested interface {
// The reference returned can only be either tagged or digested. For reference contains both tag NamedTagged
// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ Digested
// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as }
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa.
// ParseDockerRef normalizes the image reference following the docker convention,
// which allows for references to contain both a tag and a digest. It returns a
// reference that is either tagged or digested. For references containing both
// a tag and a digest, it returns a digested reference. For example, the following
// reference:
//
// docker.io/library/busybox:latest@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
//
// Is returned as a digested reference (with the ":latest" tag removed):
//
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
//
// References that are already "tagged" or "digested" are returned unmodified:
//
// // Already a digested reference
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
//
// // Already a named reference
// docker.io/library/busybox:latest
func ParseDockerRef(ref string) (Named, error) { func ParseDockerRef(ref string) (Named, error) {
named, err := ParseNormalizedNamed(ref) named, err := ParseNormalizedNamed(ref)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if _, ok := named.(NamedTagged); ok { if canonical, ok := named.(namedTaggedDigested); ok {
if canonical, ok := named.(Canonical); ok { // The reference is both tagged and digested; only return digested.
// The reference is both tagged and digested, only newNamed, err := WithName(canonical.Name())
// return digested. if err != nil {
newNamed, err := WithName(canonical.Name()) return nil, err
if err != nil {
return nil, err
}
newCanonical, err := WithDigest(newNamed, canonical.Digest())
if err != nil {
return nil, err
}
return newCanonical, nil
} }
return WithDigest(newNamed, canonical.Digest())
} }
return TagNameOnly(named), nil return TagNameOnly(named), nil
} }
// splitDockerDomain splits a repository name to domain and remotename string. // splitDockerDomain splits a repository name to domain and remote-name.
// If no valid domain is found, the default domain is used. Repository name // If no valid domain is found, the default domain is used. Repository name
// needs to be already validated before. // needs to be already validated before.
func splitDockerDomain(name string) (domain, remainder string) { func splitDockerDomain(name string) (domain, remainder string) {
i := strings.IndexRune(name, '/') i := strings.IndexRune(name, '/')
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost" && strings.ToLower(name[:i]) == name[:i]) { if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != localhost && strings.ToLower(name[:i]) == name[:i]) {
domain, remainder = defaultDomain, name domain, remainder = defaultDomain, name
} else { } else {
domain, remainder = name[:i], name[i+1:] domain, remainder = name[:i], name[i+1:]
@ -98,7 +134,7 @@ func splitDockerDomain(name string) (domain, remainder string) {
domain = defaultDomain domain = defaultDomain
} }
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
remainder = officialRepoName + "/" + remainder remainder = officialRepoPrefix + remainder
} }
return return
} }
@ -118,8 +154,15 @@ func familiarizeName(named namedRepository) repository {
if repo.domain == defaultDomain { if repo.domain == defaultDomain {
repo.domain = "" repo.domain = ""
// Handle official repositories which have the pattern "library/<official repo name>" // Handle official repositories which have the pattern "library/<official repo name>"
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { if strings.HasPrefix(repo.path, officialRepoPrefix) {
repo.path = split[1] // TODO(thaJeztah): this check may be too strict, as it assumes the
// "library/" namespace does not have nested namespaces. While this
// is true (currently), technically it would be possible for Docker
// Hub to use those (e.g. "library/distros/ubuntu:latest").
// See https://github.com/distribution/distribution/pull/3769#issuecomment-1302031785.
if remainder := strings.TrimPrefix(repo.path, officialRepoPrefix); !strings.ContainsRune(remainder, '/') {
repo.path = remainder
}
} }
} }
return repo return repo
@ -179,20 +222,3 @@ func ParseAnyReference(ref string) (Reference, error) {
return ParseNormalizedNamed(ref) return ParseNormalizedNamed(ref)
} }
// ParseAnyReferenceWithSet parses a reference string as a possible short
// identifier to be matched in a digest set, a full digest, or familiar name.
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
dgst, err := ds.Lookup(ref)
if err == nil {
return digestReference(dgst), nil
}
} else {
if dgst, err := digest.Parse(ref); err == nil {
return digestReference(dgst), nil
}
}
return ParseNormalizedNamed(ref)
}

@ -3,15 +3,16 @@
// //
// Grammar // Grammar
// //
// reference := name [ ":" tag ] [ "@" digest ] // reference := name [ ":" tag ] [ "@" digest ]
// name := [domain '/'] path-component ['/' path-component]* // name := [domain '/'] remote-name
// domain := host [':' port-number] // domain := host [':' port-number]
// host := domain-name | IPv4address | \[ IPv6address \] ; rfc3986 appendix-A // host := domain-name | IPv4address | \[ IPv6address \] ; rfc3986 appendix-A
// domain-name := domain-component ['.' domain-component]* // domain-name := domain-component ['.' domain-component]*
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
// port-number := /[0-9]+/ // port-number := /[0-9]+/
// path-component := alpha-numeric [separator alpha-numeric]* // path-component := alpha-numeric [separator alpha-numeric]*
// alpha-numeric := /[a-z0-9]+/ // path (or "remote-name") := path-component ['/' path-component]*
// alpha-numeric := /[a-z0-9]+/
// separator := /[_.]|__|[-]*/ // separator := /[_.]|__|[-]*/
// //
// tag := /[\w][\w.-]{0,127}/ // tag := /[\w][\w.-]{0,127}/
@ -23,7 +24,6 @@
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value // digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
// //
// identifier := /[a-f0-9]{64}/ // identifier := /[a-f0-9]{64}/
// short-identifier := /[a-f0-9]{6,64}/
package reference package reference
import ( import (
@ -147,7 +147,7 @@ type namedRepository interface {
Path() string Path() string
} }
// Domain returns the domain part of the Named reference // Domain returns the domain part of the [Named] reference.
func Domain(named Named) string { func Domain(named Named) string {
if r, ok := named.(namedRepository); ok { if r, ok := named.(namedRepository); ok {
return r.Domain() return r.Domain()
@ -156,7 +156,7 @@ func Domain(named Named) string {
return domain return domain
} }
// Path returns the name without the domain part of the Named reference // Path returns the name without the domain part of the [Named] reference.
func Path(named Named) (name string) { func Path(named Named) (name string) {
if r, ok := named.(namedRepository); ok { if r, ok := named.(namedRepository); ok {
return r.Path() return r.Path()
@ -177,7 +177,8 @@ func splitDomain(name string) (string, string) {
// hostname and name string. If no valid hostname is // hostname and name string. If no valid hostname is
// found, the hostname is empty and the full value // found, the hostname is empty and the full value
// is returned as name // is returned as name
// DEPRECATED: Use Domain or Path //
// Deprecated: Use [Domain] or [Path].
func SplitHostname(named Named) (string, string) { func SplitHostname(named Named) (string, string) {
if r, ok := named.(namedRepository); ok { if r, ok := named.(namedRepository); ok {
return r.Domain(), r.Path() return r.Domain(), r.Path()
@ -187,7 +188,6 @@ func SplitHostname(named Named) (string, string) {
// Parse parses s and returns a syntactically valid Reference. // Parse parses s and returns a syntactically valid Reference.
// If an error was encountered it is returned, along with a nil Reference. // If an error was encountered it is returned, along with a nil Reference.
// NOTE: Parse will not handle short digests.
func Parse(s string) (Reference, error) { func Parse(s string) (Reference, error) {
matches := ReferenceRegexp.FindStringSubmatch(s) matches := ReferenceRegexp.FindStringSubmatch(s)
if matches == nil { if matches == nil {
@ -239,7 +239,6 @@ func Parse(s string) (Reference, error) {
// the Named interface. The reference must have a name and be in the canonical // the Named interface. The reference must have a name and be in the canonical
// form, otherwise an error is returned. // form, otherwise an error is returned.
// If an error was encountered it is returned, along with a nil Reference. // If an error was encountered it is returned, along with a nil Reference.
// NOTE: ParseNamed will not handle short digests.
func ParseNamed(s string) (Named, error) { func ParseNamed(s string) (Named, error) {
named, err := ParseNormalizedNamed(s) named, err := ParseNormalizedNamed(s)
if err != nil { if err != nil {
@ -322,11 +321,13 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
// TrimNamed removes any tag or digest from the named reference. // TrimNamed removes any tag or digest from the named reference.
func TrimNamed(ref Named) Named { func TrimNamed(ref Named) Named {
domain, path := SplitHostname(ref) repo := repository{}
return repository{ if r, ok := ref.(namedRepository); ok {
domain: domain, repo.domain, repo.path = r.Domain(), r.Path()
path: path, } else {
repo.domain, repo.path = splitDomain(ref.Name())
} }
return repo
} }
func getBestReferenceType(ref reference) Reference { func getBestReferenceType(ref reference) Reference {

@ -1,48 +1,102 @@
package reference package reference
import "regexp" import (
"regexp"
"strings"
)
var ( // DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:<encoded>").
// alphaNumeric defines the alpha numeric atom, typically a var DigestRegexp = regexp.MustCompile(digestPat)
// DomainRegexp matches hostname or IP-addresses, optionally including a port
// number. It defines the structure of potential domain components that may be
// part of image names. This is purposely a subset of what is allowed by DNS to
// ensure backwards compatibility with Docker image names. It may be a subset of
// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between
// square brackets (excluding zone identifiers as defined by [RFC 6874] or special
// addresses such as IPv4-Mapped).
//
// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874.
var DomainRegexp = regexp.MustCompile(domainAndPort)
// IdentifierRegexp is the format for string identifier used as a
// content addressable identifier using sha256. These identifiers
// are like digests without the algorithm, since sha256 is used.
var IdentifierRegexp = regexp.MustCompile(identifier)
// NameRegexp is the format for the name component of references, including
// an optional domain and port, but without tag or digest suffix.
var NameRegexp = regexp.MustCompile(namePat)
// ReferenceRegexp is the full supported format of a reference. The regexp
// is anchored and has capturing groups for name, tag, and digest
// components.
var ReferenceRegexp = regexp.MustCompile(referencePat)
// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go].
//
// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28
var TagRegexp = regexp.MustCompile(tag)
const (
// alphanumeric defines the alphanumeric atom, typically a
// component of names. This only allows lower case characters and digits. // component of names. This only allows lower case characters and digits.
alphaNumeric = `[a-z0-9]+` alphanumeric = `[a-z0-9]+`
// separator defines the separators allowed to be embedded in name // separator defines the separators allowed to be embedded in name
// components. This allow one period, one or two underscore and multiple // components. This allows one period, one or two underscore and multiple
// dashes. Repeated dashes and underscores are intentionally treated // dashes. Repeated dashes and underscores are intentionally treated
// differently. In order to support valid hostnames as name components, // differently. In order to support valid hostnames as name components,
// supporting repeated dash was added. Additionally double underscore is // supporting repeated dash was added. Additionally double underscore is
// now allowed as a separator to loosen the restriction for previously // now allowed as a separator to loosen the restriction for previously
// supported names. // supported names.
separator = `(?:[._]|__|[-]*)` separator = `(?:[._]|__|[-]+)`
// nameComponent restricts registry path component names to start // localhost is treated as a special value for domain-name. Any other
// with at least one letter or number, with following parts able to be // domain-name without a "." or a ":port" are considered a path component.
// separated by one period, one or two underscore and multiple dashes. localhost = `localhost`
nameComponent = expression(
alphaNumeric,
optional(repeated(separator, alphaNumeric)))
// domainNameComponent restricts the registry domain component of a // domainNameComponent restricts the registry domain component of a
// repository name to start with a component as defined by DomainRegexp. // repository name to start with a component as defined by DomainRegexp.
domainNameComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])` domainNameComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`
// optionalPort matches an optional port-number including the port separator
// (e.g. ":80").
optionalPort = `(?::[0-9]+)?`
// tag matches valid tag names. From docker/docker:graph/tags.go.
tag = `[\w][\w.-]{0,127}`
// digestPat matches well-formed digests, including algorithm (e.g. "sha256:<encoded>").
//
// TODO(thaJeztah): this should follow the same rules as https://pkg.go.dev/github.com/opencontainers/go-digest@v1.0.0#DigestRegexp
// so that go-digest defines the canonical format. Note that the go-digest is
// more relaxed:
// - it allows multiple algorithms (e.g. "sha256+b64:<encoded>") to allow
// future expansion of supported algorithms.
// - it allows the "<encoded>" value to use urlsafe base64 encoding as defined
// in [rfc4648, section 5].
//
// [rfc4648, section 5]: https://www.rfc-editor.org/rfc/rfc4648#section-5.
digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`
// identifier is the format for a content addressable identifier using sha256.
// These identifiers are like digests without the algorithm, since sha256 is used.
identifier = `([a-f0-9]{64})`
// ipv6address are enclosed between square brackets and may be represented // ipv6address are enclosed between square brackets and may be represented
// in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format // in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format
// are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as // are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as
// IPv4-Mapped are deliberately excluded. // IPv4-Mapped are deliberately excluded.
ipv6address = expression( ipv6address = `\[(?:[a-fA-F0-9:]+)\]`
literal(`[`), `(?:[a-fA-F0-9:]+)`, literal(`]`), )
)
var (
// domainName defines the structure of potential domain components // domainName defines the structure of potential domain components
// that may be part of image names. This is purposely a subset of what is // that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image // allowed by DNS to ensure backwards compatibility with Docker image
// names. This includes IPv4 addresses on decimal format. // names. This includes IPv4 addresses on decimal format.
domainName = expression( domainName = domainNameComponent + anyTimes(`\.`+domainNameComponent)
domainNameComponent,
optional(repeated(literal(`.`), domainNameComponent)),
)
// host defines the structure of potential domains based on the URI // host defines the structure of potential domains based on the URI
// Host subcomponent on rfc3986. It may be a subset of DNS domain name, // Host subcomponent on rfc3986. It may be a subset of DNS domain name,
@ -53,129 +107,57 @@ var (
// allowed by the URI Host subcomponent on rfc3986 to ensure backwards // allowed by the URI Host subcomponent on rfc3986 to ensure backwards
// compatibility with Docker image names. // compatibility with Docker image names.
domain = expression( domainAndPort = host + optionalPort
host,
optional(literal(`:`), `[0-9]+`))
// DomainRegexp defines the structure of potential domain components
// that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image
// names.
DomainRegexp = regexp.MustCompile(domain)
tag = `[\w][\w.-]{0,127}`
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
TagRegexp = regexp.MustCompile(tag)
anchoredTag = anchored(tag)
// anchoredTagRegexp matches valid tag names, anchored at the start and // anchoredTagRegexp matches valid tag names, anchored at the start and
// end of the matched string. // end of the matched string.
anchoredTagRegexp = regexp.MustCompile(anchoredTag) anchoredTagRegexp = regexp.MustCompile(anchored(tag))
digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`
// DigestRegexp matches valid digests.
DigestRegexp = regexp.MustCompile(digestPat)
anchoredDigest = anchored(digestPat)
// anchoredDigestRegexp matches valid digests, anchored at the start and // anchoredDigestRegexp matches valid digests, anchored at the start and
// end of the matched string. // end of the matched string.
anchoredDigestRegexp = regexp.MustCompile(anchoredDigest) anchoredDigestRegexp = regexp.MustCompile(anchored(digestPat))
namePat = expression( // pathComponent restricts path-components to start with an alphanumeric
optional(domain, literal(`/`)), // character, with following parts able to be separated by a separator
nameComponent, // (one period, one or two underscore and multiple dashes).
optional(repeated(literal(`/`), nameComponent))) pathComponent = alphanumeric + anyTimes(separator+alphanumeric)
// NameRegexp is the format for the name component of references. The
// regexp has capturing groups for the domain and name part omitting // remoteName matches the remote-name of a repository. It consists of one
// the separating forward slash from either. // or more forward slash (/) delimited path-components:
NameRegexp = regexp.MustCompile(namePat) //
// pathComponent[[/pathComponent] ...] // e.g., "library/ubuntu"
anchoredName = anchored( remoteName = pathComponent + anyTimes(`/`+pathComponent)
optional(capture(domain), literal(`/`)), namePat = optional(domainAndPort+`/`) + remoteName
capture(nameComponent,
optional(repeated(literal(`/`), nameComponent))))
// anchoredNameRegexp is used to parse a name value, capturing the // anchoredNameRegexp is used to parse a name value, capturing the
// domain and trailing components. // domain and trailing components.
anchoredNameRegexp = regexp.MustCompile(anchoredName) anchoredNameRegexp = regexp.MustCompile(anchored(optional(capture(domainAndPort), `/`), capture(remoteName)))
referencePat = anchored(capture(namePat), referencePat = anchored(capture(namePat), optional(`:`, capture(tag)), optional(`@`, capture(digestPat)))
optional(literal(":"), capture(tag)),
optional(literal("@"), capture(digestPat)))
// ReferenceRegexp is the full supported format of a reference. The regexp
// is anchored and has capturing groups for name, tag, and digest
// components.
ReferenceRegexp = regexp.MustCompile(referencePat)
identifier = `([a-f0-9]{64})`
// IdentifierRegexp is the format for string identifier used as a
// content addressable identifier using sha256. These identifiers
// are like digests without the algorithm, since sha256 is used.
IdentifierRegexp = regexp.MustCompile(identifier)
shortIdentifier = `([a-f0-9]{6,64})`
// ShortIdentifierRegexp is the format used to represent a prefix
// of an identifier. A prefix may be used to match a sha256 identifier
// within a list of trusted identifiers.
ShortIdentifierRegexp = regexp.MustCompile(shortIdentifier)
anchoredIdentifier = anchored(identifier)
// anchoredIdentifierRegexp is used to check or match an // anchoredIdentifierRegexp is used to check or match an
// identifier value, anchored at start and end of string. // identifier value, anchored at start and end of string.
anchoredIdentifierRegexp = regexp.MustCompile(anchoredIdentifier) anchoredIdentifierRegexp = regexp.MustCompile(anchored(identifier))
anchoredShortIdentifier = anchored(shortIdentifier)
// anchoredShortIdentifierRegexp is used to check if a value
// is a possible identifier prefix, anchored at start and end
// of string.
anchoredShortIdentifierRegexp = regexp.MustCompile(anchoredShortIdentifier)
) )
// literal compiles s into a literal regular expression, escaping any regexp
// reserved characters.
func literal(s string) string {
re := regexp.MustCompile(regexp.QuoteMeta(s))
if _, complete := re.LiteralPrefix(); !complete {
panic("must be a literal")
}
return re.String()
}
// expression defines a full expression, where each regular expression must
// follow the previous.
func expression(res ...string) string {
var s string
for _, re := range res {
s += re
}
return s
}
// optional wraps the expression in a non-capturing group and makes the // optional wraps the expression in a non-capturing group and makes the
// production optional. // production optional.
func optional(res ...string) string { func optional(res ...string) string {
return group(expression(res...)) + `?` return `(?:` + strings.Join(res, "") + `)?`
}
// repeated wraps the regexp in a non-capturing group to get one or more
// matches.
func repeated(res ...string) string {
return group(expression(res...)) + `+`
} }
// group wraps the regexp in a non-capturing group. // anyTimes wraps the expression in a non-capturing group that can occur
func group(res ...string) string { // any number of times.
return `(?:` + expression(res...) + `)` func anyTimes(res ...string) string {
return `(?:` + strings.Join(res, "") + `)*`
} }
// capture wraps the expression in a capturing group. // capture wraps the expression in a capturing group.
func capture(res ...string) string { func capture(res ...string) string {
return `(` + expression(res...) + `)` return `(` + strings.Join(res, "") + `)`
} }
// anchored anchors the regular expression by adding start and end delimiters. // anchored anchors the regular expression by adding start and end delimiters.
func anchored(res ...string) string { func anchored(res ...string) string {
return `^` + expression(res...) + `$` return `^` + strings.Join(res, "") + `$`
} }

@ -0,0 +1,75 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reference
import (
"sort"
)
// Sort sorts string references preferring higher information references.
//
// The precedence is as follows:
//
// 1. [Named] + [Tagged] + [Digested] (e.g., "docker.io/library/busybox:latest@sha256:<digest>")
// 2. [Named] + [Tagged] (e.g., "docker.io/library/busybox:latest")
// 3. [Named] + [Digested] (e.g., "docker.io/library/busybo@sha256:<digest>")
// 4. [Named] (e.g., "docker.io/library/busybox")
// 5. [Digested] (e.g., "docker.io@sha256:<digest>")
// 6. Parse error
func Sort(references []string) []string {
var prefs []Reference
var bad []string
for _, ref := range references {
pref, err := ParseAnyReference(ref)
if err != nil {
bad = append(bad, ref)
} else {
prefs = append(prefs, pref)
}
}
sort.Slice(prefs, func(a, b int) bool {
ar := refRank(prefs[a])
br := refRank(prefs[b])
if ar == br {
return prefs[a].String() < prefs[b].String()
}
return ar < br
})
sort.Strings(bad)
var refs []string
for _, pref := range prefs {
refs = append(refs, pref.String())
}
return append(refs, bad...)
}
func refRank(ref Reference) uint8 {
if _, ok := ref.(Named); ok {
if _, ok = ref.(Tagged); ok {
if _, ok = ref.(Digested); ok {
return 1
}
return 2
}
if _, ok = ref.(Digested); ok {
return 3
}
return 4
}
return 5
}

@ -0,0 +1,112 @@
<!-- omit in toc -->
# Contributing to mergo
First off, thanks for taking the time to contribute! ❤️
All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉
> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
> - Star the project
> - Tweet about it
> - Refer this project in your project's readme
> - Mention the project at local meetups and tell your friends/colleagues
<!-- omit in toc -->
## Table of Contents
- [Code of Conduct](#code-of-conduct)
- [I Have a Question](#i-have-a-question)
- [I Want To Contribute](#i-want-to-contribute)
- [Reporting Bugs](#reporting-bugs)
- [Suggesting Enhancements](#suggesting-enhancements)
## Code of Conduct
This project and everyone participating in it is governed by the
[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md).
By participating, you are expected to uphold this code. Please report unacceptable behavior
to <>.
## I Have a Question
> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo).
Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first.
If you then still feel the need to ask a question and need clarification, we recommend the following:
- Open an [Issue](https://github.com/imdario/mergo/issues/new).
- Provide as much context as you can about what you're running into.
- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant.
We will then take care of the issue as soon as possible.
## I Want To Contribute
> ### Legal Notice <!-- omit in toc -->
> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license.
### Reporting Bugs
<!-- omit in toc -->
#### Before Submitting a Bug Report
A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible.
- Make sure that you are using the latest version.
- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)).
- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug).
- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue.
- Collect information about the bug:
- Stack trace (Traceback)
- OS, Platform and Version (Windows, Linux, macOS, x86, ARM)
- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant.
- Possibly your input and the output
- Can you reliably reproduce the issue? And can you also reproduce it with older versions?
<!-- omit in toc -->
#### How Do I Submit a Good Bug Report?
> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to .
<!-- You may add a PGP key to allow the messages to be sent encrypted as well. -->
We use GitHub issues to track bugs and errors. If you run into an issue with the project:
- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.)
- Explain the behavior you would expect and the actual behavior.
- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case.
- Provide the information you collected in the previous section.
Once it's filed:
- The project team will label the issue accordingly.
- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced.
- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone.
### Suggesting Enhancements
This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions.
<!-- omit in toc -->
#### Before Submitting an Enhancement
- Make sure that you are using the latest version.
- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration.
- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one.
- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library.
<!-- omit in toc -->
#### How Do I Submit a Good Enhancement Suggestion?
Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues).
- Use a **clear and descriptive title** for the issue to identify the suggestion.
- Provide a **step-by-step description of the suggested enhancement** in as many details as possible.
- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you.
- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. <!-- this should only be included if the project has a GUI -->
- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration.
<!-- omit in toc -->
## Attribution
This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)!

@ -1,6 +1,5 @@
# Mergo # Mergo
[![GoDoc][3]][4] [![GoDoc][3]][4]
[![GitHub release][5]][6] [![GitHub release][5]][6]
[![GoCard][7]][8] [![GoCard][7]][8]
@ -9,6 +8,7 @@
[![Sourcegraph][11]][12] [![Sourcegraph][11]][12]
[![FOSSA Status][13]][14] [![FOSSA Status][13]][14]
[![Become my sponsor][15]][16] [![Become my sponsor][15]][16]
[![Tidelift][17]][18]
[1]: https://travis-ci.org/imdario/mergo.png [1]: https://travis-ci.org/imdario/mergo.png
[2]: https://travis-ci.org/imdario/mergo [2]: https://travis-ci.org/imdario/mergo
@ -26,6 +26,8 @@
[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield [14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
[15]: https://img.shields.io/github/sponsors/imdario [15]: https://img.shields.io/github/sponsors/imdario
[16]: https://github.com/sponsors/imdario [16]: https://github.com/sponsors/imdario
[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo
[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
@ -55,7 +57,6 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont
### Mergo in the wild ### Mergo in the wild
- [cli/cli](https://github.com/cli/cli)
- [moby/moby](https://github.com/moby/moby) - [moby/moby](https://github.com/moby/moby)
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
- [vmware/dispatch](https://github.com/vmware/dispatch) - [vmware/dispatch](https://github.com/vmware/dispatch)

@ -0,0 +1,14 @@
# Security Policy
## Supported Versions
| Version | Supported |
| ------- | ------------------ |
| 0.3.x | :white_check_mark: |
| < 0.3 | :x: |
## Security contact information
To report a security vulnerability, please use the
[Tidelift security contact](https://tidelift.com/security).
Tidelift will coordinate the fix and disclosure.

@ -44,7 +44,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
} }
} }
// Remember, remember... // Remember, remember...
visited[h] = &visit{addr, typ, seen} visited[h] = &visit{typ, seen, addr}
} }
zeroValue := reflect.Value{} zeroValue := reflect.Value{}
switch dst.Kind() { switch dst.Kind() {
@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
} }
fieldName := field.Name fieldName := field.Name
fieldName = changeInitialCase(fieldName, unicode.ToLower) fieldName = changeInitialCase(fieldName, unicode.ToLower)
if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) {
dstMap[fieldName] = src.Field(i).Interface() dstMap[fieldName] = src.Field(i).Interface()
} }
} }
@ -142,7 +142,7 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
func _map(dst, src interface{}, opts ...func(*Config)) error { func _map(dst, src interface{}, opts ...func(*Config)) error {
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
return ErrNonPointerAgument return ErrNonPointerArgument
} }
var ( var (
vDst, vSrc reflect.Value vDst, vSrc reflect.Value

@ -38,10 +38,11 @@ func isExportedComponent(field *reflect.StructField) bool {
} }
type Config struct { type Config struct {
Transformers Transformers
Overwrite bool Overwrite bool
ShouldNotDereference bool
AppendSlice bool AppendSlice bool
TypeCheck bool TypeCheck bool
Transformers Transformers
overwriteWithEmptyValue bool overwriteWithEmptyValue bool
overwriteSliceWithEmptyValue bool overwriteSliceWithEmptyValue bool
sliceDeepCopy bool sliceDeepCopy bool
@ -76,7 +77,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
} }
} }
// Remember, remember... // Remember, remember...
visited[h] = &visit{addr, typ, seen} visited[h] = &visit{typ, seen, addr}
} }
if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
@ -95,7 +96,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
} }
} }
} else { } else {
if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) {
dst.Set(src) dst.Set(src)
} }
} }
@ -110,7 +111,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
} }
if src.Kind() != reflect.Map { if src.Kind() != reflect.Map {
if overwrite { if overwrite && dst.CanSet() {
dst.Set(src) dst.Set(src)
} }
return return
@ -162,7 +163,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
dstSlice = reflect.ValueOf(dstElement.Interface()) dstSlice = reflect.ValueOf(dstElement.Interface())
} }
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
if typeCheck && srcSlice.Type() != dstSlice.Type() { if typeCheck && srcSlice.Type() != dstSlice.Type() {
return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
} }
@ -194,22 +195,38 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
dst.SetMapIndex(key, dstSlice) dst.SetMapIndex(key, dstSlice)
} }
} }
if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) {
continue if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) {
if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice {
continue
}
if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map {
continue
}
} }
if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) {
if dst.IsNil() { if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type())) dst.Set(reflect.MakeMap(dst.Type()))
} }
dst.SetMapIndex(key, srcElement) dst.SetMapIndex(key, srcElement)
} }
} }
// Ensure that all keys in dst are deleted if they are not in src.
if overwriteWithEmptySrc {
for _, key := range dst.MapKeys() {
srcElement := src.MapIndex(key)
if !srcElement.IsValid() {
dst.SetMapIndex(key, reflect.Value{})
}
}
}
case reflect.Slice: case reflect.Slice:
if !dst.CanSet() { if !dst.CanSet() {
break break
} }
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
dst.Set(src) dst.Set(src)
} else if config.AppendSlice { } else if config.AppendSlice {
if src.Type() != dst.Type() { if src.Type() != dst.Type() {
@ -244,12 +261,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
if src.Kind() != reflect.Interface { if src.Kind() != reflect.Interface {
if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
if dst.CanSet() && (overwrite || isEmptyValue(dst)) { if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
dst.Set(src) dst.Set(src)
} }
} else if src.Kind() == reflect.Ptr { } else if src.Kind() == reflect.Ptr {
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { if !config.ShouldNotDereference {
return if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
return
}
} else {
if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() {
dst.Set(src)
}
} }
} else if dst.Elem().Type() == src.Type() { } else if dst.Elem().Type() == src.Type() {
if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
@ -262,7 +285,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
} }
if dst.IsNil() || overwrite { if dst.IsNil() || overwrite {
if dst.CanSet() && (overwrite || isEmptyValue(dst)) { if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
dst.Set(src) dst.Set(src)
} }
break break
@ -275,7 +298,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
break break
} }
default: default:
mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc)
if mustSet { if mustSet {
if dst.CanSet() { if dst.CanSet() {
dst.Set(src) dst.Set(src)
@ -326,6 +349,12 @@ func WithOverrideEmptySlice(config *Config) {
config.overwriteSliceWithEmptyValue = true config.overwriteSliceWithEmptyValue = true
} }
// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty
// (i.e. a non-nil pointer is never considered empty).
func WithoutDereference(config *Config) {
config.ShouldNotDereference = true
}
// WithAppendSlice will make merge append slices instead of overwriting it. // WithAppendSlice will make merge append slices instead of overwriting it.
func WithAppendSlice(config *Config) { func WithAppendSlice(config *Config) {
config.AppendSlice = true config.AppendSlice = true
@ -344,7 +373,7 @@ func WithSliceDeepCopy(config *Config) {
func merge(dst, src interface{}, opts ...func(*Config)) error { func merge(dst, src interface{}, opts ...func(*Config)) error {
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
return ErrNonPointerAgument return ErrNonPointerArgument
} }
var ( var (
vDst, vSrc reflect.Value vDst, vSrc reflect.Value

@ -20,7 +20,7 @@ var (
ErrNotSupported = errors.New("only structs, maps, and slices are supported") ErrNotSupported = errors.New("only structs, maps, and slices are supported")
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
ErrNonPointerAgument = errors.New("dst must be a pointer") ErrNonPointerArgument = errors.New("dst must be a pointer")
) )
// During deepMerge, must keep track of checks that are // During deepMerge, must keep track of checks that are
@ -28,13 +28,13 @@ var (
// checks in progress are true when it reencounters them. // checks in progress are true when it reencounters them.
// Visited are stored in a map indexed by 17 * a1 + a2; // Visited are stored in a map indexed by 17 * a1 + a2;
type visit struct { type visit struct {
ptr uintptr
typ reflect.Type typ reflect.Type
next *visit next *visit
ptr uintptr
} }
// From src/pkg/encoding/json/encode.go. // From src/pkg/encoding/json/encode.go.
func isEmptyValue(v reflect.Value) bool { func isEmptyValue(v reflect.Value, shouldDereference bool) bool {
switch v.Kind() { switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String: case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0 return v.Len() == 0
@ -50,7 +50,10 @@ func isEmptyValue(v reflect.Value) bool {
if v.IsNil() { if v.IsNil() {
return true return true
} }
return isEmptyValue(v.Elem()) if shouldDereference {
return isEmptyValue(v.Elem(), shouldDereference)
}
return false
case reflect.Func: case reflect.Func:
return v.IsNil() return v.IsNil()
case reflect.Invalid: case reflect.Invalid:

@ -144,8 +144,8 @@ github.com/cenkalti/backoff/v4
github.com/cespare/xxhash/v2 github.com/cespare/xxhash/v2
# github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e # github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e
## explicit ## explicit
# github.com/compose-spec/compose-go v1.9.0 # github.com/compose-spec/compose-go v1.13.4
## explicit; go 1.18 ## explicit; go 1.19
github.com/compose-spec/compose-go/cli github.com/compose-spec/compose-go/cli
github.com/compose-spec/compose-go/consts github.com/compose-spec/compose-go/consts
github.com/compose-spec/compose-go/dotenv github.com/compose-spec/compose-go/dotenv
@ -208,9 +208,8 @@ github.com/cyphar/filepath-securejoin
# github.com/davecgh/go-spew v1.1.1 # github.com/davecgh/go-spew v1.1.1
## explicit ## explicit
github.com/davecgh/go-spew/spew github.com/davecgh/go-spew/spew
# github.com/distribution/distribution/v3 v3.0.0-20221103125252-ebfa2a0ac0a9 # github.com/distribution/distribution/v3 v3.0.0-20230214150026-36d8c594d7aa
## explicit; go 1.18 ## explicit; go 1.18
github.com/distribution/distribution/v3/digestset
github.com/distribution/distribution/v3/reference github.com/distribution/distribution/v3/reference
# github.com/docker/cli v23.0.1+incompatible # github.com/docker/cli v23.0.1+incompatible
## explicit ## explicit
@ -450,7 +449,7 @@ github.com/hashicorp/hcl/v2/hclparse
github.com/hashicorp/hcl/v2/hclsyntax github.com/hashicorp/hcl/v2/hclsyntax
github.com/hashicorp/hcl/v2/hclwrite github.com/hashicorp/hcl/v2/hclwrite
github.com/hashicorp/hcl/v2/json github.com/hashicorp/hcl/v2/json
# github.com/imdario/mergo v0.3.13 # github.com/imdario/mergo v0.3.15
## explicit; go 1.13 ## explicit; go 1.13
github.com/imdario/mergo github.com/imdario/mergo
# github.com/inconshreveable/mousetrap v1.0.1 # github.com/inconshreveable/mousetrap v1.0.1

Loading…
Cancel
Save