Merge pull request #1971 from glours/bump-compose-go-v1.17.0

bump compose-go version to v1.17.0 to fix issue with depends_on
pull/1994/head
CrazyMax 1 year ago committed by GitHub
commit 14747a490a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -303,7 +303,7 @@ services:
require.True(t, ok) require.True(t, ok)
require.Equal(t, "Dockerfile.webapp", *m["webapp"].Dockerfile) require.Equal(t, "Dockerfile.webapp", *m["webapp"].Dockerfile)
require.Equal(t, ".", *m["webapp"].Context) require.Equal(t, "/src/bake", *m["webapp"].Context)
require.Equal(t, ptrstr("1"), m["webapp"].Args["buildno"]) require.Equal(t, ptrstr("1"), m["webapp"].Args["buildno"])
require.Equal(t, ptrstr("12"), m["webapp"].Args["buildno2"]) require.Equal(t, ptrstr("12"), m["webapp"].Args["buildno2"])
@ -364,7 +364,7 @@ services:
_, ok = m["web_app"] _, ok = m["web_app"]
require.True(t, ok) require.True(t, ok)
require.Equal(t, "Dockerfile.webapp", *m["web_app"].Dockerfile) require.Equal(t, "Dockerfile.webapp", *m["web_app"].Dockerfile)
require.Equal(t, ".", *m["web_app"].Context) require.Equal(t, "/src/bake", *m["web_app"].Context)
require.Equal(t, ptrstr("1"), m["web_app"].Args["buildno"]) require.Equal(t, ptrstr("1"), m["web_app"].Args["buildno"])
require.Equal(t, ptrstr("12"), m["web_app"].Args["buildno2"]) require.Equal(t, ptrstr("12"), m["web_app"].Args["buildno2"])
@ -555,7 +555,7 @@ services:
require.Equal(t, "Dockerfile", *m["app1"].Dockerfile) require.Equal(t, "Dockerfile", *m["app1"].Dockerfile)
require.Equal(t, ".", *m["app1"].Context) require.Equal(t, ".", *m["app1"].Context)
require.Equal(t, "Dockerfile", *m["app2"].Dockerfile) require.Equal(t, "Dockerfile", *m["app2"].Dockerfile)
require.Equal(t, ".", *m["app2"].Context) require.Equal(t, "/src/bake", *m["app2"].Context)
} }
func TestReadContextFromTargetChain(t *testing.T) { func TestReadContextFromTargetChain(t *testing.T) {

@ -62,11 +62,11 @@ secrets:
return c.Targets[i].Name < c.Targets[j].Name return c.Targets[i].Name < c.Targets[j].Name
}) })
require.Equal(t, "db", c.Targets[0].Name) require.Equal(t, "db", c.Targets[0].Name)
require.Equal(t, "./db", *c.Targets[0].Context) require.Equal(t, "/src/bake/db", *c.Targets[0].Context)
require.Equal(t, []string{"docker.io/tonistiigi/db"}, c.Targets[0].Tags) require.Equal(t, []string{"docker.io/tonistiigi/db"}, c.Targets[0].Tags)
require.Equal(t, "webapp", c.Targets[1].Name) require.Equal(t, "webapp", c.Targets[1].Name)
require.Equal(t, "./dir", *c.Targets[1].Context) require.Equal(t, "/src/bake/dir", *c.Targets[1].Context)
require.Equal(t, map[string]string{"foo": "/bar"}, c.Targets[1].Contexts) require.Equal(t, map[string]string{"foo": "/bar"}, c.Targets[1].Contexts)
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile) require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
require.Equal(t, 1, len(c.Targets[1].Args)) require.Equal(t, 1, len(c.Targets[1].Args))
@ -80,7 +80,7 @@ secrets:
}, c.Targets[1].Secrets) }, c.Targets[1].Secrets)
require.Equal(t, "webapp2", c.Targets[2].Name) require.Equal(t, "webapp2", c.Targets[2].Name)
require.Equal(t, "./dir", *c.Targets[2].Context) require.Equal(t, "/src/bake/dir", *c.Targets[2].Context)
require.Equal(t, "FROM alpine\n", *c.Targets[2].DockerfileInline) require.Equal(t, "FROM alpine\n", *c.Targets[2].DockerfileInline)
} }
@ -656,6 +656,66 @@ services:
require.Equal(t, map[string]*string{"bar": ptrstr("baz")}, c.Targets[0].Args) require.Equal(t, map[string]*string{"bar": ptrstr("baz")}, c.Targets[0].Args)
} }
func TestDependsOn(t *testing.T) {
var dt = []byte(`
services:
foo:
build:
context: .
ports:
- 3306:3306
depends_on:
- bar
bar:
build:
context: .
`)
_, err := ParseCompose([]compose.ConfigFile{{Content: dt}}, nil)
require.NoError(t, err)
}
func TestInclude(t *testing.T) {
tmpdir := t.TempDir()
err := os.WriteFile(filepath.Join(tmpdir, "compose-foo.yml"), []byte(`
services:
foo:
build:
context: .
target: buildfoo
ports:
- 3306:3306
`), 0644)
require.NoError(t, err)
var dt = []byte(`
include:
- compose-foo.yml
services:
bar:
build:
context: .
target: buildbar
`)
chdir(t, tmpdir)
c, err := ParseComposeFiles([]File{{
Name: "compose.yml",
Data: dt,
}})
require.NoError(t, err)
require.Equal(t, 2, len(c.Targets))
sort.Slice(c.Targets, func(i, j int) bool {
return c.Targets[i].Name < c.Targets[j].Name
})
require.Equal(t, "bar", c.Targets[0].Name)
require.Equal(t, "buildbar", *c.Targets[0].Target)
require.Equal(t, "foo", c.Targets[1].Name)
require.Equal(t, "buildfoo", *c.Targets[1].Target)
}
// chdir changes the current working directory to the named directory, // chdir changes the current working directory to the named directory,
// and then restore the original working directory at the end of the test. // and then restore the original working directory at the end of the test.
func chdir(t *testing.T, dir string) { func chdir(t *testing.T, dir string) {

@ -5,7 +5,7 @@ go 1.20
require ( require (
github.com/Masterminds/semver/v3 v3.2.1 github.com/Masterminds/semver/v3 v3.2.1
github.com/aws/aws-sdk-go-v2/config v1.18.16 github.com/aws/aws-sdk-go-v2/config v1.18.16
github.com/compose-spec/compose-go v1.14.0 github.com/compose-spec/compose-go v1.17.0
github.com/containerd/console v1.0.3 github.com/containerd/console v1.0.3
github.com/containerd/containerd v1.7.2 github.com/containerd/containerd v1.7.2
github.com/containerd/continuity v0.4.1 github.com/containerd/continuity v0.4.1
@ -39,8 +39,8 @@ require (
github.com/zclconf/go-cty v1.10.0 github.com/zclconf/go-cty v1.10.0
go.opentelemetry.io/otel v1.14.0 go.opentelemetry.io/otel v1.14.0
go.opentelemetry.io/otel/trace v1.14.0 go.opentelemetry.io/otel/trace v1.14.0
golang.org/x/mod v0.9.0 golang.org/x/mod v0.11.0
golang.org/x/sync v0.2.0 golang.org/x/sync v0.3.0
golang.org/x/term v0.8.0 golang.org/x/term v0.8.0
google.golang.org/grpc v1.53.0 google.golang.org/grpc v1.53.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
@ -105,7 +105,7 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
github.com/imdario/mergo v0.3.15 // indirect github.com/imdario/mergo v0.3.16 // indirect
github.com/in-toto/in-toto-golang v0.5.0 // indirect github.com/in-toto/in-toto-golang v0.5.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jinzhu/gorm v1.9.2 // indirect github.com/jinzhu/gorm v1.9.2 // indirect
@ -157,6 +157,7 @@ require (
go.opentelemetry.io/otel/sdk v1.14.0 // indirect go.opentelemetry.io/otel/sdk v1.14.0 // indirect
go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect
golang.org/x/crypto v0.2.0 // indirect golang.org/x/crypto v0.2.0 // indirect
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect
golang.org/x/net v0.10.0 // indirect golang.org/x/net v0.10.0 // indirect
golang.org/x/oauth2 v0.5.0 // indirect golang.org/x/oauth2 v0.5.0 // indirect
golang.org/x/sys v0.8.0 // indirect golang.org/x/sys v0.8.0 // indirect

@ -122,8 +122,8 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
github.com/compose-spec/compose-go v1.14.0 h1:/+tQxBEPIrfsi87Qh7/VjMzcJN3BRNER/RO71ku+u6E= github.com/compose-spec/compose-go v1.17.0 h1:cvje90CU94dQyTnJoHJYjx9yE4Iggse1XmGcO3Qi5ts=
github.com/compose-spec/compose-go v1.14.0/go.mod h1:m0o4G6MQDHjjz9rY7No9FpnNi+9sKic262rzrwuCqic= github.com/compose-spec/compose-go v1.17.0/go.mod h1:zR2tP1+kZHi5vJz7PjpW6oMoDji/Js3GHjP+hfjf70Q=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
@ -316,8 +316,8 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl/v2 v2.8.2 h1:wmFle3D1vu0okesm8BTLVDyJ6/OL9DCLUwn0b2OptiY= github.com/hashicorp/hcl/v2 v2.8.2 h1:wmFle3D1vu0okesm8BTLVDyJ6/OL9DCLUwn0b2OptiY=
github.com/hashicorp/hcl/v2 v2.8.2/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= github.com/hashicorp/hcl/v2 v2.8.2/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY= github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY=
github.com/in-toto/in-toto-golang v0.5.0/go.mod h1:/Rq0IZHLV7Ku5gielPT4wPHJfH1GdHMCq8+WPxw8/BE= github.com/in-toto/in-toto-golang v0.5.0/go.mod h1:/Rq0IZHLV7Ku5gielPT4wPHJfH1GdHMCq8+WPxw8/BE=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
@ -565,6 +565,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw=
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -585,8 +587,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -638,8 +640,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

@ -17,7 +17,6 @@
package cli package cli
import ( import (
"bytes"
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
@ -250,7 +249,7 @@ func WithDotEnv(o *ProjectOptions) error {
if err != nil { if err != nil {
return err return err
} }
envMap, err := GetEnvFromFile(o.Environment, wd, o.EnvFiles) envMap, err := dotenv.GetEnvFromFile(o.Environment, wd, o.EnvFiles)
if err != nil { if err != nil {
return err return err
} }
@ -262,65 +261,6 @@ func WithDotEnv(o *ProjectOptions) error {
return nil return nil
} }
func GetEnvFromFile(currentEnv map[string]string, workingDir string, filenames []string) (map[string]string, error) {
envMap := make(map[string]string)
dotEnvFiles := filenames
if len(dotEnvFiles) == 0 {
dotEnvFiles = append(dotEnvFiles, filepath.Join(workingDir, ".env"))
}
for _, dotEnvFile := range dotEnvFiles {
abs, err := filepath.Abs(dotEnvFile)
if err != nil {
return envMap, err
}
dotEnvFile = abs
s, err := os.Stat(dotEnvFile)
if os.IsNotExist(err) {
if len(filenames) == 0 {
return envMap, nil
}
return envMap, errors.Errorf("Couldn't find env file: %s", dotEnvFile)
}
if err != nil {
return envMap, err
}
if s.IsDir() {
if len(filenames) == 0 {
return envMap, nil
}
return envMap, errors.Errorf("%s is a directory", dotEnvFile)
}
b, err := os.ReadFile(dotEnvFile)
if os.IsNotExist(err) {
return nil, errors.Errorf("Couldn't read env file: %s", dotEnvFile)
}
if err != nil {
return envMap, err
}
env, err := dotenv.ParseWithLookup(bytes.NewReader(b), func(k string) (string, bool) {
v, ok := currentEnv[k]
if ok {
return v, true
}
v, ok = envMap[k]
return v, ok
})
if err != nil {
return envMap, errors.Wrapf(err, "failed to read %s", dotEnvFile)
}
for k, v := range env {
envMap[k] = v
}
}
return envMap, nil
}
// WithInterpolation set ProjectOptions to enable/skip interpolation // WithInterpolation set ProjectOptions to enable/skip interpolation
func WithInterpolation(interpolation bool) ProjectOptionsFn { func WithInterpolation(interpolation bool) ProjectOptionsFn {
return func(o *ProjectOptions) error { return func(o *ProjectOptions) error {

@ -0,0 +1,84 @@
/*
Copyright 2020 The Compose Specification Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dotenv
import (
"bytes"
"os"
"path/filepath"
"github.com/pkg/errors"
)
func GetEnvFromFile(currentEnv map[string]string, workingDir string, filenames []string) (map[string]string, error) {
envMap := make(map[string]string)
dotEnvFiles := filenames
if len(dotEnvFiles) == 0 {
dotEnvFiles = append(dotEnvFiles, filepath.Join(workingDir, ".env"))
}
for _, dotEnvFile := range dotEnvFiles {
abs, err := filepath.Abs(dotEnvFile)
if err != nil {
return envMap, err
}
dotEnvFile = abs
s, err := os.Stat(dotEnvFile)
if os.IsNotExist(err) {
if len(filenames) == 0 {
return envMap, nil
}
return envMap, errors.Errorf("Couldn't find env file: %s", dotEnvFile)
}
if err != nil {
return envMap, err
}
if s.IsDir() {
if len(filenames) == 0 {
return envMap, nil
}
return envMap, errors.Errorf("%s is a directory", dotEnvFile)
}
b, err := os.ReadFile(dotEnvFile)
if os.IsNotExist(err) {
return nil, errors.Errorf("Couldn't read env file: %s", dotEnvFile)
}
if err != nil {
return envMap, err
}
env, err := ParseWithLookup(bytes.NewReader(b), func(k string) (string, bool) {
v, ok := currentEnv[k]
if ok {
return v, true
}
v, ok = envMap[k]
return v, ok
})
if err != nil {
return envMap, errors.Wrapf(err, "failed to read %s", dotEnvFile)
}
for k, v := range env {
envMap[k] = v
}
}
return envMap, nil
}

@ -123,8 +123,8 @@ loop:
} }
return "", "", inherited, fmt.Errorf( return "", "", inherited, fmt.Errorf(
`line %d: unexpected character %q in variable name`, `line %d: unexpected character %q in variable name %q`,
p.line, string(rune)) p.line, string(rune), strings.Split(src, "\n")[0])
} }
} }
@ -153,17 +153,24 @@ func (p *parser) extractVarValue(src string, envMap map[string]string, lookupFn
return retVal, rest, err return retVal, rest, err
} }
previousCharIsEscape := false
// lookup quoted string terminator // lookup quoted string terminator
for i := 1; i < len(src); i++ { for i := 1; i < len(src); i++ {
if src[i] == '\n' { if src[i] == '\n' {
p.line++ p.line++
} }
if char := src[i]; char != quote { if char := src[i]; char != quote {
if !previousCharIsEscape && char == '\\' {
previousCharIsEscape = true
} else {
previousCharIsEscape = false
}
continue continue
} }
// skip escaped quote symbol (\" or \', depends on quote) // skip escaped quote symbol (\" or \', depends on quote)
if prevChar := src[i-1]; prevChar == '\\' { if previousCharIsEscape {
previousCharIsEscape = false
continue continue
} }

@ -24,7 +24,7 @@ services:
- bar - bar
labels: [FOO=BAR] labels: [FOO=BAR]
additional_contexts: additional_contexts:
foo: /bar foo: ./bar
secrets: secrets:
- secret1 - secret1
- source: secret2 - source: secret2
@ -181,6 +181,7 @@ services:
timeout: 1s timeout: 1s
retries: 5 retries: 5
start_period: 15s start_period: 15s
start_interval: 5s
# Any valid image reference - repo, tag, id, sha # Any valid image reference - repo, tag, id, sha
image: redis image: redis

@ -0,0 +1,120 @@
/*
Copyright 2020 The Compose Specification Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package loader
import (
"fmt"
"path/filepath"
"github.com/compose-spec/compose-go/dotenv"
"github.com/compose-spec/compose-go/types"
"github.com/pkg/errors"
)
// LoadIncludeConfig parse the require config from raw yaml
func LoadIncludeConfig(source []interface{}) ([]types.IncludeConfig, error) {
var requires []types.IncludeConfig
err := Transform(source, &requires)
return requires, err
}
var transformIncludeConfig TransformerFunc = func(data interface{}) (interface{}, error) {
switch value := data.(type) {
case string:
return map[string]interface{}{"path": value}, nil
case map[string]interface{}:
return value, nil
default:
return data, errors.Errorf("invalid type %T for `include` configuration", value)
}
}
func loadInclude(configDetails types.ConfigDetails, model *types.Config, options *Options, loaded []string) (*types.Config, error) {
for _, r := range model.Include {
for i, p := range r.Path {
if !filepath.IsAbs(p) {
r.Path[i] = filepath.Join(configDetails.WorkingDir, p)
}
}
if r.ProjectDirectory == "" {
r.ProjectDirectory = filepath.Dir(r.Path[0])
}
loadOptions := options.clone()
loadOptions.SetProjectName(model.Name, true)
loadOptions.ResolvePaths = true
loadOptions.SkipNormalization = true
loadOptions.SkipConsistencyCheck = true
env, err := dotenv.GetEnvFromFile(configDetails.Environment, r.ProjectDirectory, r.EnvFile)
if err != nil {
return nil, err
}
imported, err := load(types.ConfigDetails{
WorkingDir: r.ProjectDirectory,
ConfigFiles: types.ToConfigFiles(r.Path),
Environment: env,
}, loadOptions, loaded)
if err != nil {
return nil, err
}
err = importResources(model, imported, r.Path)
if err != nil {
return nil, err
}
}
model.Include = nil
return model, nil
}
// importResources import into model all resources defined by imported, and report error on conflict
func importResources(model *types.Config, imported *types.Project, path []string) error {
services := mapByName(model.Services)
for _, service := range imported.Services {
if _, ok := services[service.Name]; ok {
return fmt.Errorf("imported compose file %s defines conflicting service %s", path, service.Name)
}
model.Services = append(model.Services, service)
}
for n, network := range imported.Networks {
if _, ok := model.Networks[n]; ok {
return fmt.Errorf("imported compose file %s defines conflicting network %s", path, n)
}
model.Networks[n] = network
}
for n, volume := range imported.Volumes {
if _, ok := model.Volumes[n]; ok {
return fmt.Errorf("imported compose file %s defines conflicting volume %s", path, n)
}
model.Volumes[n] = volume
}
for n, secret := range imported.Secrets {
if _, ok := model.Secrets[n]; ok {
return fmt.Errorf("imported compose file %s defines conflicting secret %s", path, n)
}
model.Secrets[n] = secret
}
for n, config := range imported.Configs {
if _, ok := model.Configs[n]; ok {
return fmt.Errorf("imported compose file %s defines conflicting config %s", path, n)
}
model.Configs[n] = config
}
return nil
}

@ -56,6 +56,8 @@ type Options struct {
SkipConsistencyCheck bool SkipConsistencyCheck bool
// Skip extends // Skip extends
SkipExtends bool SkipExtends bool
// SkipInclude will ignore `include` and only load model from file(s) set by ConfigDetails
SkipInclude bool
// Interpolation options // Interpolation options
Interpolate *interp.Options Interpolate *interp.Options
// Discard 'env_file' entries after resolving to 'environment' section // Discard 'env_file' entries after resolving to 'environment' section
@ -68,6 +70,24 @@ type Options struct {
Profiles []string Profiles []string
} }
func (o *Options) clone() *Options {
return &Options{
SkipValidation: o.SkipValidation,
SkipInterpolation: o.SkipInterpolation,
SkipNormalization: o.SkipNormalization,
ResolvePaths: o.ResolvePaths,
ConvertWindowsPaths: o.ConvertWindowsPaths,
SkipConsistencyCheck: o.SkipConsistencyCheck,
SkipExtends: o.SkipExtends,
SkipInclude: o.SkipInclude,
Interpolate: o.Interpolate,
discardEnvFiles: o.discardEnvFiles,
projectName: o.projectName,
projectNameImperativelySet: o.projectNameImperativelySet,
Profiles: o.Profiles,
}
}
func (o *Options) SetProjectName(name string, imperativelySet bool) { func (o *Options) SetProjectName(name string, imperativelySet bool) {
o.projectName = name o.projectName = name
o.projectNameImperativelySet = imperativelySet o.projectNameImperativelySet = imperativelySet
@ -185,6 +205,7 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
LookupValue: configDetails.LookupEnv, LookupValue: configDetails.LookupEnv,
TypeCastMapping: interpolateTypeCastMapping, TypeCastMapping: interpolateTypeCastMapping,
}, },
ResolvePaths: true,
} }
for _, op := range options { for _, op := range options {
@ -195,8 +216,22 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
if err != nil { if err != nil {
return nil, err return nil, err
} }
opts.projectName = projectName
return load(configDetails, opts, nil)
}
func load(configDetails types.ConfigDetails, opts *Options, loaded []string) (*types.Project, error) {
var model *types.Config var model *types.Config
mainFile := configDetails.ConfigFiles[0].Filename
for _, f := range loaded {
if f == mainFile {
loaded = append(loaded, mainFile)
return nil, errors.Errorf("include cycle detected:\n%s\n include %s", loaded[0], strings.Join(loaded[1:], "\n include "))
}
}
loaded = append(loaded, mainFile)
for i, file := range configDetails.ConfigFiles { for i, file := range configDetails.ConfigFiles {
var postProcessor PostProcessor var postProcessor PostProcessor
configDict := file.Config configDict := file.Config
@ -231,10 +266,18 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
return nil, err return nil, err
} }
if !opts.SkipInclude {
cfg, err = loadInclude(configDetails, cfg, opts, loaded)
if err != nil {
return nil, err
}
}
if i == 0 { if i == 0 {
model = cfg model = cfg
continue continue
} }
merged, err := merge([]*types.Config{model, cfg}) merged, err := merge([]*types.Config{model, cfg})
if err != nil { if err != nil {
return nil, err return nil, err
@ -248,16 +291,8 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
model = merged model = merged
} }
for _, s := range model.Services {
var newEnvFiles types.StringList
for _, ef := range s.EnvFile {
newEnvFiles = append(newEnvFiles, absPath(configDetails.WorkingDir, ef))
}
s.EnvFile = newEnvFiles
}
project := &types.Project{ project := &types.Project{
Name: projectName, Name: opts.projectName,
WorkingDir: configDetails.WorkingDir, WorkingDir: configDetails.WorkingDir,
Services: model.Services, Services: model.Services,
Networks: model.Networks, Networks: model.Networks,
@ -269,14 +304,30 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
} }
if !opts.SkipNormalization { if !opts.SkipNormalization {
err = Normalize(project, opts.ResolvePaths) err := Normalize(project)
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
if opts.ResolvePaths {
err := ResolveRelativePaths(project)
if err != nil {
return nil, err
}
}
if opts.ConvertWindowsPaths {
for i, service := range project.Services {
for j, volume := range service.Volumes {
service.Volumes[j] = convertVolumePath(volume)
}
project.Services[i] = service
}
}
if !opts.SkipConsistencyCheck { if !opts.SkipConsistencyCheck {
err = checkConsistency(project) err := checkConsistency(project)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -287,7 +338,7 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
} }
project.ApplyProfiles(opts.Profiles) project.ApplyProfiles(opts.Profiles)
err = project.ResolveServicesEnvironment(opts.discardEnvFiles) err := project.ResolveServicesEnvironment(opts.discardEnvFiles)
return project, err return project, err
} }
@ -419,7 +470,6 @@ func loadSections(filename string, config map[string]interface{}, configDetails
if err != nil { if err != nil {
return nil, err return nil, err
} }
cfg.Networks, err = LoadNetworks(getSection(config, "networks")) cfg.Networks, err = LoadNetworks(getSection(config, "networks"))
if err != nil { if err != nil {
return nil, err return nil, err
@ -428,11 +478,15 @@ func loadSections(filename string, config map[string]interface{}, configDetails
if err != nil { if err != nil {
return nil, err return nil, err
} }
cfg.Secrets, err = LoadSecrets(getSection(config, "secrets"), configDetails, opts.ResolvePaths) cfg.Secrets, err = LoadSecrets(getSection(config, "secrets"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
cfg.Configs, err = LoadConfigObjs(getSection(config, "configs"), configDetails, opts.ResolvePaths) cfg.Configs, err = LoadConfigObjs(getSection(config, "configs"))
if err != nil {
return nil, err
}
cfg.Include, err = LoadIncludeConfig(getSequence(config, "include"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -451,6 +505,14 @@ func getSection(config map[string]interface{}, key string) map[string]interface{
return section.(map[string]interface{}) return section.(map[string]interface{})
} }
func getSequence(config map[string]interface{}, key string) []interface{} {
section, ok := config[key]
if !ok {
return make([]interface{}, 0)
}
return section.([]interface{})
}
// ForbiddenPropertiesError is returned when there are properties in the Compose // ForbiddenPropertiesError is returned when there are properties in the Compose
// file that are forbidden. // file that are forbidden.
type ForbiddenPropertiesError struct { type ForbiddenPropertiesError struct {
@ -515,6 +577,7 @@ func createTransformHook(additionalTransformers ...Transformer) mapstructure.Dec
reflect.TypeOf(types.ExtendsConfig{}): transformExtendsConfig, reflect.TypeOf(types.ExtendsConfig{}): transformExtendsConfig,
reflect.TypeOf(types.DeviceRequest{}): transformServiceDeviceRequest, reflect.TypeOf(types.DeviceRequest{}): transformServiceDeviceRequest,
reflect.TypeOf(types.SSHConfig{}): transformSSHConfig, reflect.TypeOf(types.SSHConfig{}): transformSSHConfig,
reflect.TypeOf(types.IncludeConfig{}): transformIncludeConfig,
} }
for _, transformer := range additionalTransformers { for _, transformer := range additionalTransformers {
@ -605,6 +668,7 @@ func LoadServices(filename string, servicesDict map[string]interface{}, workingD
for k, v := range x.(map[string]interface{}) { for k, v := range x.(map[string]interface{}) {
servicesDict[k] = v servicesDict[k] = v
} }
delete(servicesDict, extensions)
} }
for name := range servicesDict { for name := range servicesDict {
@ -633,7 +697,7 @@ func loadServiceWithExtends(filename, name string, servicesDict map[string]inter
target = map[string]interface{}{} target = map[string]interface{}{}
} }
serviceConfig, err := LoadService(name, target.(map[string]interface{}), workingDir, lookupEnv, opts.ResolvePaths, opts.ConvertWindowsPaths) serviceConfig, err := LoadService(name, target.(map[string]interface{}))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -671,21 +735,9 @@ func loadServiceWithExtends(filename, name string, servicesDict map[string]inter
// make the paths relative to `file` rather than `baseFilePath` so // make the paths relative to `file` rather than `baseFilePath` so
// that the resulting paths won't be absolute if `file` isn't an // that the resulting paths won't be absolute if `file` isn't an
// absolute path. // absolute path.
baseFileParent := filepath.Dir(file)
if baseService.Build != nil {
baseService.Build.Context = resolveBuildContextPath(baseFileParent, baseService.Build.Context)
}
for i, vol := range baseService.Volumes {
if vol.Type != types.VolumeTypeBind {
continue
}
baseService.Volumes[i].Source = resolveMaybeUnixPath(vol.Source, baseFileParent, lookupEnv)
}
for i, envFile := range baseService.EnvFile { baseFileParent := filepath.Dir(file)
baseService.EnvFile[i] = resolveMaybeUnixPath(envFile, baseFileParent, lookupEnv) ResolveServiceRelativePaths(baseFileParent, baseService)
}
} }
serviceConfig, err = _merge(baseService, serviceConfig) serviceConfig, err = _merge(baseService, serviceConfig)
@ -698,22 +750,9 @@ func loadServiceWithExtends(filename, name string, servicesDict map[string]inter
return serviceConfig, nil return serviceConfig, nil
} }
func resolveBuildContextPath(baseFileParent string, context string) string {
// Checks if the context is an HTTP(S) URL or a remote git repository URL
for _, prefix := range []string{"https://", "http://", "git://", "github.com/", "git@"} {
if strings.HasPrefix(context, prefix) {
return context
}
}
// Note that the Dockerfile is always defined relative to the
// build context, so there's no need to update the Dockerfile field.
return absPath(baseFileParent, context)
}
// LoadService produces a single ServiceConfig from a compose file Dict // LoadService produces a single ServiceConfig from a compose file Dict
// the serviceDict is not validated if directly used. Use Load() to enable validation // the serviceDict is not validated if directly used. Use Load() to enable validation
func LoadService(name string, serviceDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, resolvePaths bool, convertPaths bool) (*types.ServiceConfig, error) { func LoadService(name string, serviceDict map[string]interface{}) (*types.ServiceConfig, error) {
serviceConfig := &types.ServiceConfig{ serviceConfig := &types.ServiceConfig{
Scale: 1, Scale: 1,
} }
@ -730,13 +769,6 @@ func LoadService(name string, serviceDict map[string]interface{}, workingDir str
return nil, errors.New(`invalid mount config for type "bind": field Source must not be empty`) return nil, errors.New(`invalid mount config for type "bind": field Source must not be empty`)
} }
if resolvePaths || convertPaths {
volume = resolveVolumePath(volume, workingDir, lookupEnv)
}
if convertPaths {
volume = convertVolumePath(volume)
}
serviceConfig.Volumes[i] = volume serviceConfig.Volumes[i] = volume
} }
@ -758,8 +790,8 @@ func convertVolumePath(volume types.ServiceVolumeConfig) types.ServiceVolumeConf
return volume return volume
} }
func resolveMaybeUnixPath(path string, workingDir string, lookupEnv template.Mapping) string { func resolveMaybeUnixPath(workingDir string, path string) string {
filePath := expandUser(path, lookupEnv) filePath := expandUser(path)
// Check if source is an absolute path (either Unix or Windows), to // Check if source is an absolute path (either Unix or Windows), to
// handle a Windows client with a Unix daemon or vice-versa. // handle a Windows client with a Unix daemon or vice-versa.
// //
@ -772,20 +804,8 @@ func resolveMaybeUnixPath(path string, workingDir string, lookupEnv template.Map
return filePath return filePath
} }
func resolveVolumePath(volume types.ServiceVolumeConfig, workingDir string, lookupEnv template.Mapping) types.ServiceVolumeConfig {
volume.Source = resolveMaybeUnixPath(volume.Source, workingDir, lookupEnv)
return volume
}
func resolveSecretsPath(secret types.SecretConfig, workingDir string, lookupEnv template.Mapping) types.SecretConfig {
if !secret.External.External && secret.File != "" {
secret.File = resolveMaybeUnixPath(secret.File, workingDir, lookupEnv)
}
return secret
}
// TODO: make this more robust // TODO: make this more robust
func expandUser(path string, lookupEnv template.Mapping) string { func expandUser(path string) string {
if strings.HasPrefix(path, "~") { if strings.HasPrefix(path, "~") {
home, err := os.UserHomeDir() home, err := os.UserHomeDir()
if err != nil { if err != nil {
@ -885,44 +905,39 @@ func LoadVolumes(source map[string]interface{}) (map[string]types.VolumeConfig,
// LoadSecrets produces a SecretConfig map from a compose file Dict // LoadSecrets produces a SecretConfig map from a compose file Dict
// the source Dict is not validated if directly used. Use Load() to enable validation // the source Dict is not validated if directly used. Use Load() to enable validation
func LoadSecrets(source map[string]interface{}, details types.ConfigDetails, resolvePaths bool) (map[string]types.SecretConfig, error) { func LoadSecrets(source map[string]interface{}) (map[string]types.SecretConfig, error) {
secrets := make(map[string]types.SecretConfig) secrets := make(map[string]types.SecretConfig)
if err := Transform(source, &secrets); err != nil { if err := Transform(source, &secrets); err != nil {
return secrets, err return secrets, err
} }
for name, secret := range secrets { for name, secret := range secrets {
obj, err := loadFileObjectConfig(name, "secret", types.FileObjectConfig(secret), details, false) obj, err := loadFileObjectConfig(name, "secret", types.FileObjectConfig(secret))
if err != nil { if err != nil {
return nil, err return nil, err
} }
secretConfig := types.SecretConfig(obj) secrets[name] = types.SecretConfig(obj)
if resolvePaths {
secretConfig = resolveSecretsPath(secretConfig, details.WorkingDir, details.LookupEnv)
}
secrets[name] = secretConfig
} }
return secrets, nil return secrets, nil
} }
// LoadConfigObjs produces a ConfigObjConfig map from a compose file Dict // LoadConfigObjs produces a ConfigObjConfig map from a compose file Dict
// the source Dict is not validated if directly used. Use Load() to enable validation // the source Dict is not validated if directly used. Use Load() to enable validation
func LoadConfigObjs(source map[string]interface{}, details types.ConfigDetails, resolvePaths bool) (map[string]types.ConfigObjConfig, error) { func LoadConfigObjs(source map[string]interface{}) (map[string]types.ConfigObjConfig, error) {
configs := make(map[string]types.ConfigObjConfig) configs := make(map[string]types.ConfigObjConfig)
if err := Transform(source, &configs); err != nil { if err := Transform(source, &configs); err != nil {
return configs, err return configs, err
} }
for name, config := range configs { for name, config := range configs {
obj, err := loadFileObjectConfig(name, "config", types.FileObjectConfig(config), details, resolvePaths) obj, err := loadFileObjectConfig(name, "config", types.FileObjectConfig(config))
if err != nil { if err != nil {
return nil, err return nil, err
} }
configConfig := types.ConfigObjConfig(obj) configs[name] = types.ConfigObjConfig(obj)
configs[name] = configConfig
} }
return configs, nil return configs, nil
} }
func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfig, details types.ConfigDetails, resolvePaths bool) (types.FileObjectConfig, error) { func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfig) (types.FileObjectConfig, error) {
// if "external: true" // if "external: true"
switch { switch {
case obj.External.External: case obj.External.External:
@ -942,26 +957,11 @@ func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfi
if obj.File != "" { if obj.File != "" {
return obj, errors.Errorf("%[1]s %[2]s: %[1]s.driver and %[1]s.file conflict; only use %[1]s.driver", objType, name) return obj, errors.Errorf("%[1]s %[2]s: %[1]s.driver and %[1]s.file conflict; only use %[1]s.driver", objType, name)
} }
default:
if obj.File != "" && resolvePaths {
obj.File = absPath(details.WorkingDir, obj.File)
}
} }
return obj, nil return obj, nil
} }
func absPath(workingDir string, filePath string) string {
if strings.HasPrefix(filePath, "~") {
home, _ := os.UserHomeDir()
return filepath.Join(home, filePath[1:])
}
if filepath.IsAbs(filePath) {
return filePath
}
return filepath.Join(workingDir, filePath)
}
var transformMapStringString TransformerFunc = func(data interface{}) (interface{}, error) { var transformMapStringString TransformerFunc = func(data interface{}) (interface{}, error) {
switch value := data.(type) { switch value := data.(type) {
case map[string]interface{}: case map[string]interface{}:
@ -1088,13 +1088,24 @@ var transformDependsOnConfig TransformerFunc = func(data interface{}) (interface
for _, serviceIntf := range value { for _, serviceIntf := range value {
service, ok := serviceIntf.(string) service, ok := serviceIntf.(string)
if !ok { if !ok {
return data, errors.Errorf("invalid type %T for service depends_on elementn, expected string", value) return data, errors.Errorf("invalid type %T for service depends_on element, expected string", value)
} }
transformed[service] = map[string]interface{}{"condition": types.ServiceConditionStarted} transformed[service] = map[string]interface{}{"condition": types.ServiceConditionStarted, "required": true}
} }
return transformed, nil return transformed, nil
case map[string]interface{}: case map[string]interface{}:
return groupXFieldsIntoExtensions(data.(map[string]interface{})), nil transformed := map[string]interface{}{}
for service, val := range value {
dependsConfigIntf, ok := val.(map[string]interface{})
if !ok {
return data, errors.Errorf("invalid type %T for service depends_on element", value)
}
if _, ok := dependsConfigIntf["required"]; !ok {
dependsConfigIntf["required"] = true
}
transformed[service] = dependsConfigIntf
}
return groupXFieldsIntoExtensions(transformed), nil
default: default:
return data, errors.Errorf("invalid type %T for service depends_on", value) return data, errors.Errorf("invalid type %T for service depends_on", value)
} }

@ -150,13 +150,12 @@ func unique(slice []string) []string {
return nil return nil
} }
uniqMap := make(map[string]struct{}) uniqMap := make(map[string]struct{})
var uniqSlice []string
for _, v := range slice { for _, v := range slice {
uniqMap[v] = struct{}{} if _, ok := uniqMap[v]; !ok {
} uniqSlice = append(uniqSlice, v)
uniqMap[v] = struct{}{}
uniqSlice := make([]string, 0, len(uniqMap)) }
for v := range uniqMap {
uniqSlice = append(uniqSlice, v)
} }
return uniqSlice return uniqSlice
} }

@ -18,8 +18,6 @@ package loader
import ( import (
"fmt" "fmt"
"os"
"path/filepath"
"strings" "strings"
"github.com/compose-spec/compose-go/errdefs" "github.com/compose-spec/compose-go/errdefs"
@ -29,19 +27,7 @@ import (
) )
// Normalize compose project by moving deprecated attributes to their canonical position and injecting implicit defaults // Normalize compose project by moving deprecated attributes to their canonical position and injecting implicit defaults
func Normalize(project *types.Project, resolvePaths bool) error { func Normalize(project *types.Project) error {
absWorkingDir, err := filepath.Abs(project.WorkingDir)
if err != nil {
return err
}
project.WorkingDir = absWorkingDir
absComposeFiles, err := absComposeFiles(project.ComposeFiles)
if err != nil {
return err
}
project.ComposeFiles = absComposeFiles
if project.Networks == nil { if project.Networks == nil {
project.Networks = make(map[string]types.NetworkConfig) project.Networks = make(map[string]types.NetworkConfig)
} }
@ -51,8 +37,7 @@ func Normalize(project *types.Project, resolvePaths bool) error {
project.Networks["default"] = types.NetworkConfig{} project.Networks["default"] = types.NetworkConfig{}
} }
err = relocateExternalName(project) if err := relocateExternalName(project); err != nil {
if err != nil {
return err return err
} }
@ -72,38 +57,16 @@ func Normalize(project *types.Project, resolvePaths bool) error {
} }
if s.Build != nil { if s.Build != nil {
if s.Build.Context == "" {
s.Build.Context = "."
}
if s.Build.Dockerfile == "" && s.Build.DockerfileInline == "" { if s.Build.Dockerfile == "" && s.Build.DockerfileInline == "" {
s.Build.Dockerfile = "Dockerfile" s.Build.Dockerfile = "Dockerfile"
} }
if resolvePaths {
// Build context might be a remote http/git context. Unfortunately supported "remote"
// syntax is highly ambiguous in moby/moby and not defined by compose-spec,
// so let's assume runtime will check
localContext := absPath(project.WorkingDir, s.Build.Context)
if _, err := os.Stat(localContext); err == nil {
s.Build.Context = localContext
}
for name, path := range s.Build.AdditionalContexts {
if strings.Contains(path, "://") { // `docker-image://` or any builder specific context type
continue
}
path = absPath(project.WorkingDir, path)
if _, err := os.Stat(path); err == nil {
s.Build.AdditionalContexts[name] = path
}
}
}
s.Build.Args = s.Build.Args.Resolve(fn) s.Build.Args = s.Build.Args.Resolve(fn)
} }
for j, f := range s.EnvFile {
s.EnvFile[j] = absPath(project.WorkingDir, f)
}
s.Environment = s.Environment.Resolve(fn) s.Environment = s.Environment.Resolve(fn)
if s.Extends != nil && s.Extends.File != "" {
s.Extends.File = absPath(project.WorkingDir, s.Extends.File)
}
for _, link := range s.Links { for _, link := range s.Links {
parts := strings.Split(link, ":") parts := strings.Split(link, ":")
if len(parts) == 2 { if len(parts) == 2 {
@ -112,6 +75,7 @@ func Normalize(project *types.Project, resolvePaths bool) error {
s.DependsOn = setIfMissing(s.DependsOn, link, types.ServiceDependency{ s.DependsOn = setIfMissing(s.DependsOn, link, types.ServiceDependency{
Condition: types.ServiceConditionStarted, Condition: types.ServiceConditionStarted,
Restart: true, Restart: true,
Required: true,
}) })
} }
@ -121,6 +85,7 @@ func Normalize(project *types.Project, resolvePaths bool) error {
s.DependsOn = setIfMissing(s.DependsOn, name, types.ServiceDependency{ s.DependsOn = setIfMissing(s.DependsOn, name, types.ServiceDependency{
Condition: types.ServiceConditionStarted, Condition: types.ServiceConditionStarted,
Restart: true, Restart: true,
Required: true,
}) })
} }
} }
@ -131,6 +96,7 @@ func Normalize(project *types.Project, resolvePaths bool) error {
s.DependsOn = setIfMissing(s.DependsOn, spec[0], types.ServiceDependency{ s.DependsOn = setIfMissing(s.DependsOn, spec[0], types.ServiceDependency{
Condition: types.ServiceConditionStarted, Condition: types.ServiceConditionStarted,
Restart: false, Restart: false,
Required: true,
}) })
} }
} }
@ -160,14 +126,6 @@ func Normalize(project *types.Project, resolvePaths bool) error {
project.Services[i] = s project.Services[i] = s
} }
for name, config := range project.Volumes {
if config.Driver == "local" && config.DriverOpts["o"] == "bind" {
// This is actually a bind mount
config.DriverOpts["device"] = absPath(project.WorkingDir, config.DriverOpts["device"])
project.Volumes[name] = config
}
}
setNameFromKey(project) setNameFromKey(project)
return nil return nil
@ -223,6 +181,7 @@ func inferImplicitDependencies(service *types.ServiceConfig) {
if _, ok := service.DependsOn[d]; !ok { if _, ok := service.DependsOn[d]; !ok {
service.DependsOn[d] = types.ServiceDependency{ service.DependsOn[d] = types.ServiceDependency{
Condition: types.ServiceConditionStarted, Condition: types.ServiceConditionStarted,
Required: true,
} }
} }
} }
@ -254,18 +213,6 @@ func relocateScale(s *types.ServiceConfig) error {
return nil return nil
} }
func absComposeFiles(composeFiles []string) ([]string, error) {
absComposeFiles := make([]string, len(composeFiles))
for i, composeFile := range composeFiles {
absComposefile, err := filepath.Abs(composeFile)
if err != nil {
return nil, err
}
absComposeFiles[i] = absComposefile
}
return absComposeFiles, nil
}
// Resources with no explicit name are actually named by their key in map // Resources with no explicit name are actually named by their key in map
func setNameFromKey(project *types.Project) { func setNameFromKey(project *types.Project) {
for i, n := range project.Networks { for i, n := range project.Networks {

@ -0,0 +1,135 @@
/*
Copyright 2020 The Compose Specification Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package loader
import (
"os"
"path/filepath"
"strings"
"github.com/compose-spec/compose-go/types"
)
// ResolveRelativePaths resolves relative paths based on project WorkingDirectory
func ResolveRelativePaths(project *types.Project) error {
absWorkingDir, err := filepath.Abs(project.WorkingDir)
if err != nil {
return err
}
project.WorkingDir = absWorkingDir
absComposeFiles, err := absComposeFiles(project.ComposeFiles)
if err != nil {
return err
}
project.ComposeFiles = absComposeFiles
for i, s := range project.Services {
ResolveServiceRelativePaths(project.WorkingDir, &s)
project.Services[i] = s
}
for i, obj := range project.Configs {
if obj.File != "" {
obj.File = absPath(project.WorkingDir, obj.File)
project.Configs[i] = obj
}
}
for i, obj := range project.Secrets {
if obj.File != "" {
obj.File = resolveMaybeUnixPath(project.WorkingDir, obj.File)
project.Secrets[i] = obj
}
}
for name, config := range project.Volumes {
if config.Driver == "local" && config.DriverOpts["o"] == "bind" {
// This is actually a bind mount
config.DriverOpts["device"] = resolveMaybeUnixPath(project.WorkingDir, config.DriverOpts["device"])
project.Volumes[name] = config
}
}
return nil
}
func ResolveServiceRelativePaths(workingDir string, s *types.ServiceConfig) {
if s.Build != nil {
if !isRemoteContext(s.Build.Context) {
s.Build.Context = absPath(workingDir, s.Build.Context)
}
for name, path := range s.Build.AdditionalContexts {
if strings.Contains(path, "://") { // `docker-image://` or any builder specific context type
continue
}
if isRemoteContext(path) {
continue
}
s.Build.AdditionalContexts[name] = absPath(workingDir, path)
}
}
for j, f := range s.EnvFile {
s.EnvFile[j] = absPath(workingDir, f)
}
if s.Extends != nil && s.Extends.File != "" {
s.Extends.File = absPath(workingDir, s.Extends.File)
}
for i, vol := range s.Volumes {
if vol.Type != types.VolumeTypeBind {
continue
}
s.Volumes[i].Source = resolveMaybeUnixPath(workingDir, vol.Source)
}
}
func absPath(workingDir string, filePath string) string {
if strings.HasPrefix(filePath, "~") {
home, _ := os.UserHomeDir()
return filepath.Join(home, filePath[1:])
}
if filepath.IsAbs(filePath) {
return filePath
}
return filepath.Join(workingDir, filePath)
}
func absComposeFiles(composeFiles []string) ([]string, error) {
for i, composeFile := range composeFiles {
absComposefile, err := filepath.Abs(composeFile)
if err != nil {
return nil, err
}
composeFiles[i] = absComposefile
}
return composeFiles, nil
}
// isRemoteContext returns true if the value is a Git reference or HTTP(S) URL.
//
// Any other value is assumed to be a local filesystem path and returns false.
//
// See: https://github.com/moby/buildkit/blob/18fc875d9bfd6e065cd8211abc639434ba65aa56/frontend/dockerui/context.go#L76-L79
func isRemoteContext(maybeURL string) bool {
for _, prefix := range []string{"https://", "http://", "git://", "ssh://", "github.com/", "git@"} {
if strings.HasPrefix(maybeURL, prefix) {
return true
}
}
return false
}

@ -1,5 +1,5 @@
{ {
"$schema": "http://json-schema.org/draft/2019-09/schema#", "$schema": "https://json-schema.org/draft/2019-09/schema#",
"id": "compose_spec.json", "id": "compose_spec.json",
"type": "object", "type": "object",
"title": "Compose Specification", "title": "Compose Specification",
@ -17,6 +17,15 @@
"description": "define the Compose project name, until user defines one explicitly." "description": "define the Compose project name, until user defines one explicitly."
}, },
"include": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/include"
},
"description": "compose sub-projects to be included."
},
"services": { "services": {
"id": "#/properties/services", "id": "#/properties/services",
"type": "object", "type": "object",
@ -84,6 +93,7 @@
"properties": { "properties": {
"deploy": {"$ref": "#/definitions/deployment"}, "deploy": {"$ref": "#/definitions/deployment"},
"annotations": {"$ref": "#/definitions/list_or_dict"}, "annotations": {"$ref": "#/definitions/list_or_dict"},
"attach": {"type": "boolean"},
"build": { "build": {
"oneOf": [ "oneOf": [
{"type": "string"}, {"type": "string"},
@ -181,6 +191,10 @@
"additionalProperties": false, "additionalProperties": false,
"properties": { "properties": {
"restart": {"type": "boolean"}, "restart": {"type": "boolean"},
"required": {
"type": "boolean",
"default": true
},
"condition": { "condition": {
"type": "string", "type": "string",
"enum": ["service_started", "service_healthy", "service_completed_successfully"] "enum": ["service_started", "service_healthy", "service_completed_successfully"]
@ -443,7 +457,8 @@
] ]
}, },
"timeout": {"type": "string", "format": "duration"}, "timeout": {"type": "string", "format": "duration"},
"start_period": {"type": "string", "format": "duration"} "start_period": {"type": "string", "format": "duration"},
"start_interval": {"type": "string", "format": "duration"}
}, },
"additionalProperties": false, "additionalProperties": false,
"patternProperties": {"^x-": {}} "patternProperties": {"^x-": {}}
@ -588,6 +603,22 @@
} }
}, },
"include": {
"id": "#/definitions/include",
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"path": {"$ref": "#/definitions/string_or_list"},
"env_file": {"$ref": "#/definitions/string_or_list"},
"project_directory": {"type": "string"}
},
"additionalProperties": false
}
]
},
"network": { "network": {
"id": "#/definitions/network", "id": "#/definitions/network",
"type": ["object", "null"], "type": ["object", "null"],

@ -17,19 +17,18 @@
package schema package schema
import ( import (
// Enable support for embedded static resources
_ "embed"
"fmt" "fmt"
"strings" "strings"
"time" "time"
"github.com/xeipuuv/gojsonschema" "github.com/xeipuuv/gojsonschema"
// Enable support for embedded static resources
_ "embed"
) )
type portsFormatChecker struct{} type portsFormatChecker struct{}
func (checker portsFormatChecker) IsFormat(input interface{}) bool { func (checker portsFormatChecker) IsFormat(_ interface{}) bool {
// TODO: implement this // TODO: implement this
return true return true
} }

@ -17,6 +17,7 @@
package template package template
import ( import (
"errors"
"fmt" "fmt"
"regexp" "regexp"
"sort" "sort"
@ -71,77 +72,148 @@ type Mapping func(string) (string, bool)
// the substitution and an error. // the substitution and an error.
type SubstituteFunc func(string, Mapping) (string, bool, error) type SubstituteFunc func(string, Mapping) (string, bool, error)
// SubstituteWith substitute variables in the string with their values. // ReplacementFunc is a user-supplied function that is apply to the matching
// It accepts additional substitute function. // substring. Returns the value as a string and an error.
func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) { type ReplacementFunc func(string, Mapping, *Config) (string, error)
var outerErr error
var returnErr error
result := pattern.ReplaceAllStringFunc(template, func(substring string) string { type Config struct {
_, subsFunc := getSubstitutionFunctionForTemplate(substring) pattern *regexp.Regexp
if len(subsFuncs) > 0 { substituteFunc SubstituteFunc
subsFunc = subsFuncs[0] replacementFunc ReplacementFunc
} logging bool
}
closingBraceIndex := getFirstBraceClosingIndex(substring) type Option func(*Config)
rest := ""
if closingBraceIndex > -1 {
rest = substring[closingBraceIndex+1:]
substring = substring[0 : closingBraceIndex+1]
}
matches := pattern.FindStringSubmatch(substring) func WithPattern(pattern *regexp.Regexp) Option {
groups := matchGroups(matches, pattern) return func(cfg *Config) {
if escaped := groups["escaped"]; escaped != "" { cfg.pattern = pattern
return escaped }
} }
braced := false func WithSubstitutionFunction(subsFunc SubstituteFunc) Option {
substitution := groups["named"] return func(cfg *Config) {
if substitution == "" { cfg.substituteFunc = subsFunc
substitution = groups["braced"] }
braced = true }
}
if substitution == "" { func WithReplacementFunction(replacementFunc ReplacementFunc) Option {
outerErr = &InvalidTemplateError{Template: template} return func(cfg *Config) {
if returnErr == nil { cfg.replacementFunc = replacementFunc
returnErr = outerErr }
} }
return ""
} func WithoutLogging(cfg *Config) {
cfg.logging = false
}
// SubstituteWithOptions substitute variables in the string with their values.
// It accepts additional options such as a custom function or pattern.
func SubstituteWithOptions(template string, mapping Mapping, options ...Option) (string, error) {
var returnErr error
cfg := &Config{
pattern: defaultPattern,
replacementFunc: DefaultReplacementFunc,
logging: true,
}
for _, o := range options {
o(cfg)
}
if braced { result := cfg.pattern.ReplaceAllStringFunc(template, func(substring string) string {
var ( replacement, err := cfg.replacementFunc(substring, mapping, cfg)
value string if err != nil {
applied bool // Add the template for template errors
) var tmplErr *InvalidTemplateError
value, applied, outerErr = subsFunc(substitution, mapping) if errors.As(err, &tmplErr) {
if outerErr != nil { if tmplErr.Template == "" {
if returnErr == nil { tmplErr.Template = template
returnErr = outerErr
} }
return ""
} }
if applied { // Save the first error to be returned
interpolatedNested, err := SubstituteWith(rest, mapping, pattern) if returnErr == nil {
if err != nil { returnErr = err
return ""
}
return value + interpolatedNested
} }
}
value, ok := mapping(substitution)
if !ok {
logrus.Warnf("The %q variable is not set. Defaulting to a blank string.", substitution)
} }
return value return replacement
}) })
return result, returnErr return result, returnErr
} }
func DefaultReplacementFunc(substring string, mapping Mapping, cfg *Config) (string, error) {
value, _, err := DefaultReplacementAppliedFunc(substring, mapping, cfg)
return value, err
}
func DefaultReplacementAppliedFunc(substring string, mapping Mapping, cfg *Config) (string, bool, error) {
pattern := cfg.pattern
subsFunc := cfg.substituteFunc
if subsFunc == nil {
_, subsFunc = getSubstitutionFunctionForTemplate(substring)
}
closingBraceIndex := getFirstBraceClosingIndex(substring)
rest := ""
if closingBraceIndex > -1 {
rest = substring[closingBraceIndex+1:]
substring = substring[0 : closingBraceIndex+1]
}
matches := pattern.FindStringSubmatch(substring)
groups := matchGroups(matches, pattern)
if escaped := groups["escaped"]; escaped != "" {
return escaped, true, nil
}
braced := false
substitution := groups["named"]
if substitution == "" {
substitution = groups["braced"]
braced = true
}
if substitution == "" {
return "", false, &InvalidTemplateError{}
}
if braced {
value, applied, err := subsFunc(substitution, mapping)
if err != nil {
return "", false, err
}
if applied {
interpolatedNested, err := SubstituteWith(rest, mapping, pattern)
if err != nil {
return "", false, err
}
return value + interpolatedNested, true, nil
}
}
value, ok := mapping(substitution)
if !ok && cfg.logging {
logrus.Warnf("The %q variable is not set. Defaulting to a blank string.", substitution)
}
return value, ok, nil
}
// SubstituteWith substitute variables in the string with their values.
// It accepts additional substitute function.
func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) {
options := []Option{
WithPattern(pattern),
}
if len(subsFuncs) > 0 {
options = append(options, WithSubstitutionFunction(subsFuncs[0]))
}
return SubstituteWithOptions(template, mapping, options...)
}
func getSubstitutionFunctionForTemplate(template string) (string, SubstituteFunc) { func getSubstitutionFunctionForTemplate(template string) (string, SubstituteFunc) {
interpolationMapping := []struct { interpolationMapping := []struct {
string string

@ -67,16 +67,24 @@ type ConfigFile struct {
Config map[string]interface{} Config map[string]interface{}
} }
func ToConfigFiles(path []string) (f []ConfigFile) {
for _, p := range path {
f = append(f, ConfigFile{Filename: p})
}
return
}
// Config is a full compose file configuration and model // Config is a full compose file configuration and model
type Config struct { type Config struct {
Filename string `yaml:"-" json:"-"` Filename string `yaml:"-" json:"-"`
Name string `yaml:",omitempty" json:"name,omitempty"` Name string `yaml:"name,omitempty" json:"name,omitempty"`
Services Services `json:"services"` Services Services `yaml:"services" json:"services"`
Networks Networks `yaml:",omitempty" json:"networks,omitempty"` Networks Networks `yaml:"networks,omitempty" json:"networks,omitempty"`
Volumes Volumes `yaml:",omitempty" json:"volumes,omitempty"` Volumes Volumes `yaml:"volumes,omitempty" json:"volumes,omitempty"`
Secrets Secrets `yaml:",omitempty" json:"secrets,omitempty"` Secrets Secrets `yaml:"secrets,omitempty" json:"secrets,omitempty"`
Configs Configs `yaml:",omitempty" json:"configs,omitempty"` Configs Configs `yaml:"configs,omitempty" json:"configs,omitempty"`
Extensions Extensions `yaml:",inline" json:"-"` Extensions Extensions `yaml:",inline" json:"-"`
Include []IncludeConfig `yaml:"include,omitempty" json:"include,omitempty"`
} }
// Volumes is a map of VolumeConfig // Volumes is a map of VolumeConfig

@ -24,6 +24,8 @@ import (
"path/filepath" "path/filepath"
"sort" "sort"
"github.com/compose-spec/compose-go/utils"
"github.com/compose-spec/compose-go/dotenv" "github.com/compose-spec/compose-go/dotenv"
"github.com/distribution/distribution/v3/reference" "github.com/distribution/distribution/v3/reference"
godigest "github.com/opencontainers/go-digest" godigest "github.com/opencontainers/go-digest"
@ -102,10 +104,19 @@ func (p *Project) ConfigNames() []string {
// GetServices retrieve services by names, or return all services if no name specified // GetServices retrieve services by names, or return all services if no name specified
func (p *Project) GetServices(names ...string) (Services, error) { func (p *Project) GetServices(names ...string) (Services, error) {
services, servicesNotFound := p.getServicesByNames(names...)
if len(servicesNotFound) > 0 {
return services, fmt.Errorf("no such service: %s", servicesNotFound[0])
}
return services, nil
}
func (p *Project) getServicesByNames(names ...string) (Services, []string) {
if len(names) == 0 { if len(names) == 0 {
return p.Services, nil return p.Services, nil
} }
services := Services{} services := Services{}
var servicesNotFound []string
for _, name := range names { for _, name := range names {
var serviceConfig *ServiceConfig var serviceConfig *ServiceConfig
for _, s := range p.Services { for _, s := range p.Services {
@ -115,11 +126,12 @@ func (p *Project) GetServices(names ...string) (Services, error) {
} }
} }
if serviceConfig == nil { if serviceConfig == nil {
return services, fmt.Errorf("no such service: %s", name) servicesNotFound = append(servicesNotFound, name)
continue
} }
services = append(services, *serviceConfig) services = append(services, *serviceConfig)
} }
return services, nil return services, servicesNotFound
} }
// GetDisabledService retrieve disabled service by name // GetDisabledService retrieve disabled service by name
@ -159,26 +171,30 @@ func (p *Project) WithServices(names []string, fn ServiceFunc, options ...Depend
// backward compatibility // backward compatibility
options = []DependencyOption{IncludeDependencies} options = []DependencyOption{IncludeDependencies}
} }
return p.withServices(names, fn, map[string]bool{}, options) return p.withServices(names, fn, map[string]bool{}, options, map[string]ServiceDependency{})
} }
func (p *Project) withServices(names []string, fn ServiceFunc, seen map[string]bool, options []DependencyOption) error { func (p *Project) withServices(names []string, fn ServiceFunc, seen map[string]bool, options []DependencyOption, dependencies map[string]ServiceDependency) error {
services, err := p.GetServices(names...) services, servicesNotFound := p.getServicesByNames(names...)
if err != nil { if len(servicesNotFound) > 0 {
return err for _, serviceNotFound := range servicesNotFound {
if dependency, ok := dependencies[serviceNotFound]; !ok || dependency.Required {
return fmt.Errorf("no such service: %s", serviceNotFound)
}
}
} }
for _, service := range services { for _, service := range services {
if seen[service.Name] { if seen[service.Name] {
continue continue
} }
seen[service.Name] = true seen[service.Name] = true
var dependencies []string var dependencies map[string]ServiceDependency
for _, policy := range options { for _, policy := range options {
switch policy { switch policy {
case IncludeDependents: case IncludeDependents:
dependencies = append(dependencies, p.GetDependentsForService(service)...) dependencies = utils.MapsAppend(dependencies, p.dependentsForService(service))
case IncludeDependencies: case IncludeDependencies:
dependencies = append(dependencies, service.GetDependencies()...) dependencies = utils.MapsAppend(dependencies, service.DependsOn)
case IgnoreDependencies: case IgnoreDependencies:
// Noop // Noop
default: default:
@ -186,7 +202,7 @@ func (p *Project) withServices(names []string, fn ServiceFunc, seen map[string]b
} }
} }
if len(dependencies) > 0 { if len(dependencies) > 0 {
err := p.withServices(dependencies, fn, seen, options) err := p.withServices(utils.MapKeys(dependencies), fn, seen, options, dependencies)
if err != nil { if err != nil {
return err return err
} }
@ -199,11 +215,15 @@ func (p *Project) withServices(names []string, fn ServiceFunc, seen map[string]b
} }
func (p *Project) GetDependentsForService(s ServiceConfig) []string { func (p *Project) GetDependentsForService(s ServiceConfig) []string {
var dependent []string return utils.MapKeys(p.dependentsForService(s))
}
func (p *Project) dependentsForService(s ServiceConfig) map[string]ServiceDependency {
dependent := make(map[string]ServiceDependency)
for _, service := range p.Services { for _, service := range p.Services {
for name := range service.DependsOn { for name, dependency := range service.DependsOn {
if name == s.Name { if name == s.Name {
dependent = append(dependent, service.Name) dependent[service.Name] = dependency
} }
} }
} }
@ -507,7 +527,7 @@ func (p Project) ResolveServicesEnvironment(discardEnvFiles bool) error {
fileVars, err := dotenv.ParseWithLookup(bytes.NewBuffer(b), resolve) fileVars, err := dotenv.ParseWithLookup(bytes.NewBuffer(b), resolve)
if err != nil { if err != nil {
return err return errors.Wrapf(err, "failed to read %s", envFile)
} }
environment.OverrideBy(Mapping(fileVars).ToMappingWithEquals()) environment.OverrideBy(Mapping(fileVars).ToMappingWithEquals())
} }

@ -89,6 +89,7 @@ type ServiceConfig struct {
Profiles []string `yaml:"profiles,omitempty" json:"profiles,omitempty"` Profiles []string `yaml:"profiles,omitempty" json:"profiles,omitempty"`
Annotations Mapping `yaml:"annotations,omitempty" json:"annotations,omitempty"` Annotations Mapping `yaml:"annotations,omitempty" json:"annotations,omitempty"`
Attach *bool `yaml:"attach,omitempty" json:"attach,omitempty"`
Build *BuildConfig `yaml:"build,omitempty" json:"build,omitempty"` Build *BuildConfig `yaml:"build,omitempty" json:"build,omitempty"`
BlkioConfig *BlkioConfig `yaml:"blkio_config,omitempty" json:"blkio_config,omitempty"` BlkioConfig *BlkioConfig `yaml:"blkio_config,omitempty" json:"blkio_config,omitempty"`
CapAdd []string `yaml:"cap_add,omitempty" json:"cap_add,omitempty"` CapAdd []string `yaml:"cap_add,omitempty" json:"cap_add,omitempty"`
@ -602,12 +603,13 @@ type DeployConfig struct {
// HealthCheckConfig the healthcheck configuration for a service // HealthCheckConfig the healthcheck configuration for a service
type HealthCheckConfig struct { type HealthCheckConfig struct {
Test HealthCheckTest `yaml:"test,omitempty" json:"test,omitempty"` Test HealthCheckTest `yaml:"test,omitempty" json:"test,omitempty"`
Timeout *Duration `yaml:"timeout,omitempty" json:"timeout,omitempty"` Timeout *Duration `yaml:"timeout,omitempty" json:"timeout,omitempty"`
Interval *Duration `yaml:"interval,omitempty" json:"interval,omitempty"` Interval *Duration `yaml:"interval,omitempty" json:"interval,omitempty"`
Retries *uint64 `yaml:"retries,omitempty" json:"retries,omitempty"` Retries *uint64 `yaml:"retries,omitempty" json:"retries,omitempty"`
StartPeriod *Duration `yaml:"start_period,omitempty" json:"start_period,omitempty"` StartPeriod *Duration `yaml:"start_period,omitempty" json:"start_period,omitempty"`
Disable bool `yaml:"disable,omitempty" json:"disable,omitempty"` StartInterval *Duration `yaml:"start_interval,omitempty" json:"start_interval,omitempty"`
Disable bool `yaml:"disable,omitempty" json:"disable,omitempty"`
Extensions Extensions `yaml:"#extensions,inline" json:"-"` Extensions Extensions `yaml:"#extensions,inline" json:"-"`
} }
@ -815,6 +817,8 @@ const (
VolumeTypeTmpfs = "tmpfs" VolumeTypeTmpfs = "tmpfs"
// VolumeTypeNamedPipe is the type for mounting Windows named pipes // VolumeTypeNamedPipe is the type for mounting Windows named pipes
VolumeTypeNamedPipe = "npipe" VolumeTypeNamedPipe = "npipe"
// VolumeTypeCluster is the type for mounting container storage interface (CSI) volumes
VolumeTypeCluster = "cluster"
// SElinuxShared share the volume content // SElinuxShared share the volume content
SElinuxShared = "z" SElinuxShared = "z"
@ -1023,6 +1027,7 @@ type ServiceDependency struct {
Condition string `yaml:"condition,omitempty" json:"condition,omitempty"` Condition string `yaml:"condition,omitempty" json:"condition,omitempty"`
Restart bool `yaml:"restart,omitempty" json:"restart,omitempty"` Restart bool `yaml:"restart,omitempty" json:"restart,omitempty"`
Extensions Extensions `yaml:"#extensions,inline" json:"-"` Extensions Extensions `yaml:"#extensions,inline" json:"-"`
Required bool `yaml:"required" json:"required"`
} }
type ExtendsConfig struct { type ExtendsConfig struct {
@ -1035,3 +1040,9 @@ type SecretConfig FileObjectConfig
// ConfigObjConfig is the config for the swarm "Config" object // ConfigObjConfig is the config for the swarm "Config" object
type ConfigObjConfig FileObjectConfig type ConfigObjConfig FileObjectConfig
type IncludeConfig struct {
Path StringList `yaml:"path,omitempty" json:"path,omitempty"`
ProjectDirectory string `yaml:"project_directory,omitempty" json:"project_directory,omitempty"`
EnvFile StringList `yaml:"env_file,omitempty" json:"env_file,omitempty"`
}

@ -0,0 +1,51 @@
/*
Copyright 2020 The Compose Specification Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import "golang.org/x/exp/slices"
func MapKeys[T comparable, U any](theMap map[T]U) []T {
var result []T
for key := range theMap {
result = append(result, key)
}
return result
}
func MapsAppend[T comparable, U any](target map[T]U, source map[T]U) map[T]U {
if target == nil {
return source
}
if source == nil {
return target
}
for key, value := range source {
if _, ok := target[key]; !ok {
target[key] = value
}
}
return target
}
func ArrayContains[T comparable](source []T, toCheck []T) bool {
for _, value := range toCheck {
if !slices.Contains(source, value) {
return false
}
}
return true
}

@ -1,17 +1,20 @@
# Mergo # Mergo
[![GoDoc][3]][4]
[![GitHub release][5]][6] [![GitHub release][5]][6]
[![GoCard][7]][8] [![GoCard][7]][8]
[![Build Status][1]][2] [![Test status][1]][2]
[![Coverage Status][9]][10] [![OpenSSF Scorecard][21]][22]
[![OpenSSF Best Practices][19]][20]
[![Coverage status][9]][10]
[![Sourcegraph][11]][12] [![Sourcegraph][11]][12]
[![FOSSA Status][13]][14] [![FOSSA status][13]][14]
[![GoDoc][3]][4]
[![Become my sponsor][15]][16] [![Become my sponsor][15]][16]
[![Tidelift][17]][18] [![Tidelift][17]][18]
[1]: https://travis-ci.org/imdario/mergo.png [1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master
[2]: https://travis-ci.org/imdario/mergo [2]: https://github.com/imdario/mergo/actions/workflows/tests.yml
[3]: https://godoc.org/github.com/imdario/mergo?status.svg [3]: https://godoc.org/github.com/imdario/mergo?status.svg
[4]: https://godoc.org/github.com/imdario/mergo [4]: https://godoc.org/github.com/imdario/mergo
[5]: https://img.shields.io/github/release/imdario/mergo.svg [5]: https://img.shields.io/github/release/imdario/mergo.svg
@ -28,6 +31,10 @@
[16]: https://github.com/sponsors/imdario [16]: https://github.com/sponsors/imdario
[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo [17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo
[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo [18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo
[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge
[20]: https://bestpractices.coreinfrastructure.org/projects/7177
[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge
[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
@ -232,5 +239,4 @@ Written by [Dario Castañé](http://dario.im).
[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)

27
vendor/golang.org/x/exp/LICENSE generated vendored

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/exp/PATENTS generated vendored

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

@ -0,0 +1,50 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package constraints defines a set of useful constraints to be used
// with type parameters.
package constraints
// Signed is a constraint that permits any signed integer type.
// If future releases of Go add new predeclared signed integer types,
// this constraint will be modified to include them.
type Signed interface {
~int | ~int8 | ~int16 | ~int32 | ~int64
}
// Unsigned is a constraint that permits any unsigned integer type.
// If future releases of Go add new predeclared unsigned integer types,
// this constraint will be modified to include them.
type Unsigned interface {
~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
}
// Integer is a constraint that permits any integer type.
// If future releases of Go add new predeclared integer types,
// this constraint will be modified to include them.
type Integer interface {
Signed | Unsigned
}
// Float is a constraint that permits any floating-point type.
// If future releases of Go add new predeclared floating-point types,
// this constraint will be modified to include them.
type Float interface {
~float32 | ~float64
}
// Complex is a constraint that permits any complex numeric type.
// If future releases of Go add new predeclared complex numeric types,
// this constraint will be modified to include them.
type Complex interface {
~complex64 | ~complex128
}
// Ordered is a constraint that permits any ordered type: any type
// that supports the operators < <= >= >.
// If future releases of Go add new ordered types,
// this constraint will be modified to include them.
type Ordered interface {
Integer | Float | ~string
}

@ -0,0 +1,282 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package slices defines various functions useful with slices of any type.
// Unless otherwise specified, these functions all apply to the elements
// of a slice at index 0 <= i < len(s).
//
// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a
// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings),
// or the sorting may fail to sort correctly. A common case is when sorting slices of
// floating-point numbers containing NaN values.
package slices
import "golang.org/x/exp/constraints"
// Equal reports whether two slices are equal: the same length and all
// elements equal. If the lengths are different, Equal returns false.
// Otherwise, the elements are compared in increasing index order, and the
// comparison stops at the first unequal pair.
// Floating point NaNs are not considered equal.
func Equal[E comparable](s1, s2 []E) bool {
if len(s1) != len(s2) {
return false
}
for i := range s1 {
if s1[i] != s2[i] {
return false
}
}
return true
}
// EqualFunc reports whether two slices are equal using a comparison
// function on each pair of elements. If the lengths are different,
// EqualFunc returns false. Otherwise, the elements are compared in
// increasing index order, and the comparison stops at the first index
// for which eq returns false.
func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
if len(s1) != len(s2) {
return false
}
for i, v1 := range s1 {
v2 := s2[i]
if !eq(v1, v2) {
return false
}
}
return true
}
// Compare compares the elements of s1 and s2.
// The elements are compared sequentially, starting at index 0,
// until one element is not equal to the other.
// The result of comparing the first non-matching elements is returned.
// If both slices are equal until one of them ends, the shorter slice is
// considered less than the longer one.
// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
// Comparisons involving floating point NaNs are ignored.
func Compare[E constraints.Ordered](s1, s2 []E) int {
s2len := len(s2)
for i, v1 := range s1 {
if i >= s2len {
return +1
}
v2 := s2[i]
switch {
case v1 < v2:
return -1
case v1 > v2:
return +1
}
}
if len(s1) < s2len {
return -1
}
return 0
}
// CompareFunc is like Compare but uses a comparison function
// on each pair of elements. The elements are compared in increasing
// index order, and the comparisons stop after the first time cmp
// returns non-zero.
// The result is the first non-zero result of cmp; if cmp always
// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
// and +1 if len(s1) > len(s2).
func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
s2len := len(s2)
for i, v1 := range s1 {
if i >= s2len {
return +1
}
v2 := s2[i]
if c := cmp(v1, v2); c != 0 {
return c
}
}
if len(s1) < s2len {
return -1
}
return 0
}
// Index returns the index of the first occurrence of v in s,
// or -1 if not present.
func Index[E comparable](s []E, v E) int {
for i := range s {
if v == s[i] {
return i
}
}
return -1
}
// IndexFunc returns the first index i satisfying f(s[i]),
// or -1 if none do.
func IndexFunc[E any](s []E, f func(E) bool) int {
for i := range s {
if f(s[i]) {
return i
}
}
return -1
}
// Contains reports whether v is present in s.
func Contains[E comparable](s []E, v E) bool {
return Index(s, v) >= 0
}
// ContainsFunc reports whether at least one
// element e of s satisfies f(e).
func ContainsFunc[E any](s []E, f func(E) bool) bool {
return IndexFunc(s, f) >= 0
}
// Insert inserts the values v... into s at index i,
// returning the modified slice.
// In the returned slice r, r[i] == v[0].
// Insert panics if i is out of range.
// This function is O(len(s) + len(v)).
func Insert[S ~[]E, E any](s S, i int, v ...E) S {
tot := len(s) + len(v)
if tot <= cap(s) {
s2 := s[:tot]
copy(s2[i+len(v):], s[i:])
copy(s2[i:], v)
return s2
}
s2 := make(S, tot)
copy(s2, s[:i])
copy(s2[i:], v)
copy(s2[i+len(v):], s[i:])
return s2
}
// Delete removes the elements s[i:j] from s, returning the modified slice.
// Delete panics if s[i:j] is not a valid slice of s.
// Delete modifies the contents of the slice s; it does not create a new slice.
// Delete is O(len(s)-j), so if many items must be deleted, it is better to
// make a single call deleting them all together than to delete one at a time.
// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
// elements contain pointers you might consider zeroing those elements so that
// objects they reference can be garbage collected.
func Delete[S ~[]E, E any](s S, i, j int) S {
_ = s[i:j] // bounds check
return append(s[:i], s[j:]...)
}
// DeleteFunc removes any elements from s for which del returns true,
// returning the modified slice.
// When DeleteFunc removes m elements, it might not modify the elements
// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
// zeroing those elements so that objects they reference can be garbage
// collected.
func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
// Don't start copying elements until we find one to delete.
for i, v := range s {
if del(v) {
j := i
for i++; i < len(s); i++ {
v = s[i]
if !del(v) {
s[j] = v
j++
}
}
return s[:j]
}
}
return s
}
// Replace replaces the elements s[i:j] by the given v, and returns the
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
_ = s[i:j] // verify that i:j is a valid subslice
tot := len(s[:i]) + len(v) + len(s[j:])
if tot <= cap(s) {
s2 := s[:tot]
copy(s2[i+len(v):], s[j:])
copy(s2[i:], v)
return s2
}
s2 := make(S, tot)
copy(s2, s[:i])
copy(s2[i:], v)
copy(s2[i+len(v):], s[j:])
return s2
}
// Clone returns a copy of the slice.
// The elements are copied using assignment, so this is a shallow clone.
func Clone[S ~[]E, E any](s S) S {
// Preserve nil in case it matters.
if s == nil {
return nil
}
return append(S([]E{}), s...)
}
// Compact replaces consecutive runs of equal elements with a single copy.
// This is like the uniq command found on Unix.
// Compact modifies the contents of the slice s; it does not create a new slice.
// When Compact discards m elements in total, it might not modify the elements
// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
// zeroing those elements so that objects they reference can be garbage collected.
func Compact[S ~[]E, E comparable](s S) S {
if len(s) < 2 {
return s
}
i := 1
for k := 1; k < len(s); k++ {
if s[k] != s[k-1] {
if i != k {
s[i] = s[k]
}
i++
}
}
return s[:i]
}
// CompactFunc is like Compact but uses a comparison function.
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
if len(s) < 2 {
return s
}
i := 1
for k := 1; k < len(s); k++ {
if !eq(s[k], s[k-1]) {
if i != k {
s[i] = s[k]
}
i++
}
}
return s[:i]
}
// Grow increases the slice's capacity, if necessary, to guarantee space for
// another n elements. After Grow(n), at least n elements can be appended
// to the slice without another allocation. If n is negative or too large to
// allocate the memory, Grow panics.
func Grow[S ~[]E, E any](s S, n int) S {
if n < 0 {
panic("cannot be negative")
}
if n -= cap(s) - len(s); n > 0 {
// TODO(https://go.dev/issue/53888): Make using []E instead of S
// to workaround a compiler bug where the runtime.growslice optimization
// does not take effect. Revert when the compiler is fixed.
s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
}
return s
}
// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
func Clip[S ~[]E, E any](s S) S {
return s[:len(s):len(s)]
}

@ -0,0 +1,128 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package slices
import (
"math/bits"
"golang.org/x/exp/constraints"
)
// Sort sorts a slice of any ordered type in ascending order.
// Sort may fail to sort correctly when sorting slices of floating-point
// numbers containing Not-a-number (NaN) values.
// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))})
// instead if the input may contain NaNs.
func Sort[E constraints.Ordered](x []E) {
n := len(x)
pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
}
// SortFunc sorts the slice x in ascending order as determined by the less function.
// This sort is not guaranteed to be stable.
//
// SortFunc requires that less is a strict weak ordering.
// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
func SortFunc[E any](x []E, less func(a, b E) bool) {
n := len(x)
pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less)
}
// SortStableFunc sorts the slice x while keeping the original order of equal
// elements, using less to compare elements.
func SortStableFunc[E any](x []E, less func(a, b E) bool) {
stableLessFunc(x, len(x), less)
}
// IsSorted reports whether x is sorted in ascending order.
func IsSorted[E constraints.Ordered](x []E) bool {
for i := len(x) - 1; i > 0; i-- {
if x[i] < x[i-1] {
return false
}
}
return true
}
// IsSortedFunc reports whether x is sorted in ascending order, with less as the
// comparison function.
func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool {
for i := len(x) - 1; i > 0; i-- {
if less(x[i], x[i-1]) {
return false
}
}
return true
}
// BinarySearch searches for target in a sorted slice and returns the position
// where target is found, or the position where target would appear in the
// sort order; it also returns a bool saying whether the target is really found
// in the slice. The slice must be sorted in increasing order.
func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
// Inlining is faster than calling BinarySearchFunc with a lambda.
n := len(x)
// Define x[-1] < target and x[n] >= target.
// Invariant: x[i-1] < target, x[j] >= target.
i, j := 0, n
for i < j {
h := int(uint(i+j) >> 1) // avoid overflow when computing h
// i ≤ h < j
if x[h] < target {
i = h + 1 // preserves x[i-1] < target
} else {
j = h // preserves x[j] >= target
}
}
// i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
return i, i < n && x[i] == target
}
// BinarySearchFunc works like BinarySearch, but uses a custom comparison
// function. The slice must be sorted in increasing order, where "increasing"
// is defined by cmp. cmp should return 0 if the slice element matches
// the target, a negative number if the slice element precedes the target,
// or a positive number if the slice element follows the target.
// cmp must implement the same ordering as the slice, such that if
// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) {
n := len(x)
// Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
// Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
i, j := 0, n
for i < j {
h := int(uint(i+j) >> 1) // avoid overflow when computing h
// i ≤ h < j
if cmp(x[h], target) < 0 {
i = h + 1 // preserves cmp(x[i - 1], target) < 0
} else {
j = h // preserves cmp(x[j], target) >= 0
}
}
// i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
return i, i < n && cmp(x[i], target) == 0
}
type sortedHint int // hint for pdqsort when choosing the pivot
const (
unknownHint sortedHint = iota
increasingHint
decreasingHint
)
// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
type xorshift uint64
func (r *xorshift) Next() uint64 {
*r ^= *r << 13
*r ^= *r >> 17
*r ^= *r << 5
return uint64(*r)
}
func nextPowerOfTwo(length int) uint {
return 1 << bits.Len(uint(length))
}

@ -0,0 +1,479 @@
// Code generated by gen_sort_variants.go; DO NOT EDIT.
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package slices
// insertionSortLessFunc sorts data[a:b] using insertion sort.
func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
for i := a + 1; i < b; i++ {
for j := i; j > a && less(data[j], data[j-1]); j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
}
// siftDownLessFunc implements the heap property on data[lo:hi].
// first is an offset into the array where the root of the heap lies.
func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) {
root := lo
for {
child := 2*root + 1
if child >= hi {
break
}
if child+1 < hi && less(data[first+child], data[first+child+1]) {
child++
}
if !less(data[first+root], data[first+child]) {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
root = child
}
}
func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
first := a
lo := 0
hi := b - a
// Build heap with greatest element at top.
for i := (hi - 1) / 2; i >= 0; i-- {
siftDownLessFunc(data, i, hi, first, less)
}
// Pop elements, largest first, into end of data.
for i := hi - 1; i >= 0; i-- {
data[first], data[first+i] = data[first+i], data[first]
siftDownLessFunc(data, lo, i, first, less)
}
}
// pdqsortLessFunc sorts data[a:b].
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
// C++ implementation: https://github.com/orlp/pdqsort
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
const maxInsertion = 12
var (
wasBalanced = true // whether the last partitioning was reasonably balanced
wasPartitioned = true // whether the slice was already partitioned
)
for {
length := b - a
if length <= maxInsertion {
insertionSortLessFunc(data, a, b, less)
return
}
// Fall back to heapsort if too many bad choices were made.
if limit == 0 {
heapSortLessFunc(data, a, b, less)
return
}
// If the last partitioning was imbalanced, we need to breaking patterns.
if !wasBalanced {
breakPatternsLessFunc(data, a, b, less)
limit--
}
pivot, hint := choosePivotLessFunc(data, a, b, less)
if hint == decreasingHint {
reverseRangeLessFunc(data, a, b, less)
// The chosen pivot was pivot-a elements after the start of the array.
// After reversing it is pivot-a elements before the end of the array.
// The idea came from Rust's implementation.
pivot = (b - 1) - (pivot - a)
hint = increasingHint
}
// The slice is likely already sorted.
if wasBalanced && wasPartitioned && hint == increasingHint {
if partialInsertionSortLessFunc(data, a, b, less) {
return
}
}
// Probably the slice contains many duplicate elements, partition the slice into
// elements equal to and elements greater than the pivot.
if a > 0 && !less(data[a-1], data[pivot]) {
mid := partitionEqualLessFunc(data, a, b, pivot, less)
a = mid
continue
}
mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less)
wasPartitioned = alreadyPartitioned
leftLen, rightLen := mid-a, b-mid
balanceThreshold := length / 8
if leftLen < rightLen {
wasBalanced = leftLen >= balanceThreshold
pdqsortLessFunc(data, a, mid, limit, less)
a = mid + 1
} else {
wasBalanced = rightLen >= balanceThreshold
pdqsortLessFunc(data, mid+1, b, limit, less)
b = mid
}
}
}
// partitionLessFunc does one quicksort partition.
// Let p = data[pivot]
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
// On return, data[newpivot] = p
func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for i <= j && less(data[i], data[a]) {
i++
}
for i <= j && !less(data[j], data[a]) {
j--
}
if i > j {
data[j], data[a] = data[a], data[j]
return j, true
}
data[i], data[j] = data[j], data[i]
i++
j--
for {
for i <= j && less(data[i], data[a]) {
i++
}
for i <= j && !less(data[j], data[a]) {
j--
}
if i > j {
break
}
data[i], data[j] = data[j], data[i]
i++
j--
}
data[j], data[a] = data[a], data[j]
return j, false
}
// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for {
for i <= j && !less(data[a], data[i]) {
i++
}
for i <= j && less(data[a], data[j]) {
j--
}
if i > j {
break
}
data[i], data[j] = data[j], data[i]
i++
j--
}
return i
}
// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end.
func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool {
const (
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
shortestShifting = 50 // don't shift any elements on short arrays
)
i := a + 1
for j := 0; j < maxSteps; j++ {
for i < b && !less(data[i], data[i-1]) {
i++
}
if i == b {
return true
}
if b-a < shortestShifting {
return false
}
data[i], data[i-1] = data[i-1], data[i]
// Shift the smaller one to the left.
if i-a >= 2 {
for j := i - 1; j >= 1; j-- {
if !less(data[j], data[j-1]) {
break
}
data[j], data[j-1] = data[j-1], data[j]
}
}
// Shift the greater one to the right.
if b-i >= 2 {
for j := i + 1; j < b; j++ {
if !less(data[j], data[j-1]) {
break
}
data[j], data[j-1] = data[j-1], data[j]
}
}
}
return false
}
// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns
// that might cause imbalanced partitions in quicksort.
func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
length := b - a
if length >= 8 {
random := xorshift(length)
modulus := nextPowerOfTwo(length)
for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
other := int(uint(random.Next()) & (modulus - 1))
if other >= length {
other -= length
}
data[idx], data[a+other] = data[a+other], data[idx]
}
}
}
// choosePivotLessFunc chooses a pivot in data[a:b].
//
// [0,8): chooses a static pivot.
// [8,shortestNinther): uses the simple median-of-three method.
// [shortestNinther,∞): uses the Tukey ninther method.
func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) {
const (
shortestNinther = 50
maxSwaps = 4 * 3
)
l := b - a
var (
swaps int
i = a + l/4*1
j = a + l/4*2
k = a + l/4*3
)
if l >= 8 {
if l >= shortestNinther {
// Tukey ninther method, the idea came from Rust's implementation.
i = medianAdjacentLessFunc(data, i, &swaps, less)
j = medianAdjacentLessFunc(data, j, &swaps, less)
k = medianAdjacentLessFunc(data, k, &swaps, less)
}
// Find the median among i, j, k and stores it into j.
j = medianLessFunc(data, i, j, k, &swaps, less)
}
switch swaps {
case 0:
return j, increasingHint
case maxSwaps:
return j, decreasingHint
default:
return j, unknownHint
}
}
// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) {
if less(data[b], data[a]) {
*swaps++
return b, a
}
return a, b
}
// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int {
a, b = order2LessFunc(data, a, b, swaps, less)
b, c = order2LessFunc(data, b, c, swaps, less)
a, b = order2LessFunc(data, a, b, swaps, less)
return b
}
// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int {
return medianLessFunc(data, a-1, a, a+1, swaps, less)
}
func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
i := a
j := b - 1
for i < j {
data[i], data[j] = data[j], data[i]
i++
j--
}
}
func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) {
for i := 0; i < n; i++ {
data[a+i], data[b+i] = data[b+i], data[a+i]
}
}
func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
blockSize := 20 // must be > 0
a, b := 0, blockSize
for b <= n {
insertionSortLessFunc(data, a, b, less)
a = b
b += blockSize
}
insertionSortLessFunc(data, a, n, less)
for blockSize < n {
a, b = 0, 2*blockSize
for b <= n {
symMergeLessFunc(data, a, a+blockSize, b, less)
a = b
b += 2 * blockSize
}
if m := a + blockSize; m < n {
symMergeLessFunc(data, a, m, n, less)
}
blockSize *= 2
}
}
// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
// Computer Science, pages 714-723. Springer, 2004.
//
// Let M = m-a and N = b-n. Wolog M < N.
// The recursion depth is bound by ceil(log(N+M)).
// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
//
// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
// in the paper carries through for Swap operations, especially as the block
// swapping rotate uses only O(M+N) Swaps.
//
// symMerge assumes non-degenerate arguments: a < m && m < b.
// Having the caller check this condition eliminates many leaf recursion calls,
// which improves performance.
func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
// Avoid unnecessary recursions of symMerge
// by direct insertion of data[a] into data[m:b]
// if data[a:m] only contains one element.
if m-a == 1 {
// Use binary search to find the lowest index i
// such that data[i] >= data[a] for m <= i < b.
// Exit the search loop with i == b in case no such index exists.
i := m
j := b
for i < j {
h := int(uint(i+j) >> 1)
if less(data[h], data[a]) {
i = h + 1
} else {
j = h
}
}
// Swap values until data[a] reaches the position before i.
for k := a; k < i-1; k++ {
data[k], data[k+1] = data[k+1], data[k]
}
return
}
// Avoid unnecessary recursions of symMerge
// by direct insertion of data[m] into data[a:m]
// if data[m:b] only contains one element.
if b-m == 1 {
// Use binary search to find the lowest index i
// such that data[i] > data[m] for a <= i < m.
// Exit the search loop with i == m in case no such index exists.
i := a
j := m
for i < j {
h := int(uint(i+j) >> 1)
if !less(data[m], data[h]) {
i = h + 1
} else {
j = h
}
}
// Swap values until data[m] reaches the position i.
for k := m; k > i; k-- {
data[k], data[k-1] = data[k-1], data[k]
}
return
}
mid := int(uint(a+b) >> 1)
n := mid + m
var start, r int
if m > mid {
start = n - b
r = mid
} else {
start = a
r = m
}
p := n - 1
for start < r {
c := int(uint(start+r) >> 1)
if !less(data[p-c], data[c]) {
start = c + 1
} else {
r = c
}
}
end := n - start
if start < m && m < end {
rotateLessFunc(data, start, m, end, less)
}
if a < start && start < mid {
symMergeLessFunc(data, a, start, mid, less)
}
if mid < end && end < b {
symMergeLessFunc(data, mid, end, b, less)
}
}
// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
// Data of the form 'x u v y' is changed to 'x v u y'.
// rotate performs at most b-a many calls to data.Swap,
// and it assumes non-degenerate arguments: a < m && m < b.
func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
i := m - a
j := b - m
for i != j {
if i > j {
swapRangeLessFunc(data, m-i, m, j, less)
i -= j
} else {
swapRangeLessFunc(data, m-i, m+j-i, i, less)
j -= i
}
}
// i == j
swapRangeLessFunc(data, m-i, m, i, less)
}

@ -0,0 +1,481 @@
// Code generated by gen_sort_variants.go; DO NOT EDIT.
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package slices
import "golang.org/x/exp/constraints"
// insertionSortOrdered sorts data[a:b] using insertion sort.
func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
for i := a + 1; i < b; i++ {
for j := i; j > a && (data[j] < data[j-1]); j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
}
// siftDownOrdered implements the heap property on data[lo:hi].
// first is an offset into the array where the root of the heap lies.
func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
root := lo
for {
child := 2*root + 1
if child >= hi {
break
}
if child+1 < hi && (data[first+child] < data[first+child+1]) {
child++
}
if !(data[first+root] < data[first+child]) {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
root = child
}
}
func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
first := a
lo := 0
hi := b - a
// Build heap with greatest element at top.
for i := (hi - 1) / 2; i >= 0; i-- {
siftDownOrdered(data, i, hi, first)
}
// Pop elements, largest first, into end of data.
for i := hi - 1; i >= 0; i-- {
data[first], data[first+i] = data[first+i], data[first]
siftDownOrdered(data, lo, i, first)
}
}
// pdqsortOrdered sorts data[a:b].
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
// C++ implementation: https://github.com/orlp/pdqsort
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
const maxInsertion = 12
var (
wasBalanced = true // whether the last partitioning was reasonably balanced
wasPartitioned = true // whether the slice was already partitioned
)
for {
length := b - a
if length <= maxInsertion {
insertionSortOrdered(data, a, b)
return
}
// Fall back to heapsort if too many bad choices were made.
if limit == 0 {
heapSortOrdered(data, a, b)
return
}
// If the last partitioning was imbalanced, we need to breaking patterns.
if !wasBalanced {
breakPatternsOrdered(data, a, b)
limit--
}
pivot, hint := choosePivotOrdered(data, a, b)
if hint == decreasingHint {
reverseRangeOrdered(data, a, b)
// The chosen pivot was pivot-a elements after the start of the array.
// After reversing it is pivot-a elements before the end of the array.
// The idea came from Rust's implementation.
pivot = (b - 1) - (pivot - a)
hint = increasingHint
}
// The slice is likely already sorted.
if wasBalanced && wasPartitioned && hint == increasingHint {
if partialInsertionSortOrdered(data, a, b) {
return
}
}
// Probably the slice contains many duplicate elements, partition the slice into
// elements equal to and elements greater than the pivot.
if a > 0 && !(data[a-1] < data[pivot]) {
mid := partitionEqualOrdered(data, a, b, pivot)
a = mid
continue
}
mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
wasPartitioned = alreadyPartitioned
leftLen, rightLen := mid-a, b-mid
balanceThreshold := length / 8
if leftLen < rightLen {
wasBalanced = leftLen >= balanceThreshold
pdqsortOrdered(data, a, mid, limit)
a = mid + 1
} else {
wasBalanced = rightLen >= balanceThreshold
pdqsortOrdered(data, mid+1, b, limit)
b = mid
}
}
}
// partitionOrdered does one quicksort partition.
// Let p = data[pivot]
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
// On return, data[newpivot] = p
func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for i <= j && (data[i] < data[a]) {
i++
}
for i <= j && !(data[j] < data[a]) {
j--
}
if i > j {
data[j], data[a] = data[a], data[j]
return j, true
}
data[i], data[j] = data[j], data[i]
i++
j--
for {
for i <= j && (data[i] < data[a]) {
i++
}
for i <= j && !(data[j] < data[a]) {
j--
}
if i > j {
break
}
data[i], data[j] = data[j], data[i]
i++
j--
}
data[j], data[a] = data[a], data[j]
return j, false
}
// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
for {
for i <= j && !(data[a] < data[i]) {
i++
}
for i <= j && (data[a] < data[j]) {
j--
}
if i > j {
break
}
data[i], data[j] = data[j], data[i]
i++
j--
}
return i
}
// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
const (
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
shortestShifting = 50 // don't shift any elements on short arrays
)
i := a + 1
for j := 0; j < maxSteps; j++ {
for i < b && !(data[i] < data[i-1]) {
i++
}
if i == b {
return true
}
if b-a < shortestShifting {
return false
}
data[i], data[i-1] = data[i-1], data[i]
// Shift the smaller one to the left.
if i-a >= 2 {
for j := i - 1; j >= 1; j-- {
if !(data[j] < data[j-1]) {
break
}
data[j], data[j-1] = data[j-1], data[j]
}
}
// Shift the greater one to the right.
if b-i >= 2 {
for j := i + 1; j < b; j++ {
if !(data[j] < data[j-1]) {
break
}
data[j], data[j-1] = data[j-1], data[j]
}
}
}
return false
}
// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
// that might cause imbalanced partitions in quicksort.
func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
length := b - a
if length >= 8 {
random := xorshift(length)
modulus := nextPowerOfTwo(length)
for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
other := int(uint(random.Next()) & (modulus - 1))
if other >= length {
other -= length
}
data[idx], data[a+other] = data[a+other], data[idx]
}
}
}
// choosePivotOrdered chooses a pivot in data[a:b].
//
// [0,8): chooses a static pivot.
// [8,shortestNinther): uses the simple median-of-three method.
// [shortestNinther,∞): uses the Tukey ninther method.
func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
const (
shortestNinther = 50
maxSwaps = 4 * 3
)
l := b - a
var (
swaps int
i = a + l/4*1
j = a + l/4*2
k = a + l/4*3
)
if l >= 8 {
if l >= shortestNinther {
// Tukey ninther method, the idea came from Rust's implementation.
i = medianAdjacentOrdered(data, i, &swaps)
j = medianAdjacentOrdered(data, j, &swaps)
k = medianAdjacentOrdered(data, k, &swaps)
}
// Find the median among i, j, k and stores it into j.
j = medianOrdered(data, i, j, k, &swaps)
}
switch swaps {
case 0:
return j, increasingHint
case maxSwaps:
return j, decreasingHint
default:
return j, unknownHint
}
}
// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
if data[b] < data[a] {
*swaps++
return b, a
}
return a, b
}
// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
a, b = order2Ordered(data, a, b, swaps)
b, c = order2Ordered(data, b, c, swaps)
a, b = order2Ordered(data, a, b, swaps)
return b
}
// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
return medianOrdered(data, a-1, a, a+1, swaps)
}
func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
i := a
j := b - 1
for i < j {
data[i], data[j] = data[j], data[i]
i++
j--
}
}
func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
for i := 0; i < n; i++ {
data[a+i], data[b+i] = data[b+i], data[a+i]
}
}
func stableOrdered[E constraints.Ordered](data []E, n int) {
blockSize := 20 // must be > 0
a, b := 0, blockSize
for b <= n {
insertionSortOrdered(data, a, b)
a = b
b += blockSize
}
insertionSortOrdered(data, a, n)
for blockSize < n {
a, b = 0, 2*blockSize
for b <= n {
symMergeOrdered(data, a, a+blockSize, b)
a = b
b += 2 * blockSize
}
if m := a + blockSize; m < n {
symMergeOrdered(data, a, m, n)
}
blockSize *= 2
}
}
// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
// Computer Science, pages 714-723. Springer, 2004.
//
// Let M = m-a and N = b-n. Wolog M < N.
// The recursion depth is bound by ceil(log(N+M)).
// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
//
// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
// in the paper carries through for Swap operations, especially as the block
// swapping rotate uses only O(M+N) Swaps.
//
// symMerge assumes non-degenerate arguments: a < m && m < b.
// Having the caller check this condition eliminates many leaf recursion calls,
// which improves performance.
func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
// Avoid unnecessary recursions of symMerge
// by direct insertion of data[a] into data[m:b]
// if data[a:m] only contains one element.
if m-a == 1 {
// Use binary search to find the lowest index i
// such that data[i] >= data[a] for m <= i < b.
// Exit the search loop with i == b in case no such index exists.
i := m
j := b
for i < j {
h := int(uint(i+j) >> 1)
if data[h] < data[a] {
i = h + 1
} else {
j = h
}
}
// Swap values until data[a] reaches the position before i.
for k := a; k < i-1; k++ {
data[k], data[k+1] = data[k+1], data[k]
}
return
}
// Avoid unnecessary recursions of symMerge
// by direct insertion of data[m] into data[a:m]
// if data[m:b] only contains one element.
if b-m == 1 {
// Use binary search to find the lowest index i
// such that data[i] > data[m] for a <= i < m.
// Exit the search loop with i == m in case no such index exists.
i := a
j := m
for i < j {
h := int(uint(i+j) >> 1)
if !(data[m] < data[h]) {
i = h + 1
} else {
j = h
}
}
// Swap values until data[m] reaches the position i.
for k := m; k > i; k-- {
data[k], data[k-1] = data[k-1], data[k]
}
return
}
mid := int(uint(a+b) >> 1)
n := mid + m
var start, r int
if m > mid {
start = n - b
r = mid
} else {
start = a
r = m
}
p := n - 1
for start < r {
c := int(uint(start+r) >> 1)
if !(data[p-c] < data[c]) {
start = c + 1
} else {
r = c
}
}
end := n - start
if start < m && m < end {
rotateOrdered(data, start, m, end)
}
if a < start && start < mid {
symMergeOrdered(data, a, start, mid)
}
if mid < end && end < b {
symMergeOrdered(data, mid, end, b)
}
}
// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
// Data of the form 'x u v y' is changed to 'x v u y'.
// rotate performs at most b-a many calls to data.Swap,
// and it assumes non-degenerate arguments: a < m && m < b.
func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
i := m - a
j := b - m
for i != j {
if i > j {
swapRangeOrdered(data, m-i, m, j)
i -= j
} else {
swapRangeOrdered(data, m-i, m+j-i, i)
j -= i
}
}
// i == j
swapRangeOrdered(data, m-i, m, i)
}

@ -20,7 +20,7 @@ type token struct{}
// A zero Group is valid, has no limit on the number of active goroutines, // A zero Group is valid, has no limit on the number of active goroutines,
// and does not cancel on error. // and does not cancel on error.
type Group struct { type Group struct {
cancel func() cancel func(error)
wg sync.WaitGroup wg sync.WaitGroup
@ -43,7 +43,7 @@ func (g *Group) done() {
// returns a non-nil error or the first time Wait returns, whichever occurs // returns a non-nil error or the first time Wait returns, whichever occurs
// first. // first.
func WithContext(ctx context.Context) (*Group, context.Context) { func WithContext(ctx context.Context) (*Group, context.Context) {
ctx, cancel := context.WithCancel(ctx) ctx, cancel := withCancelCause(ctx)
return &Group{cancel: cancel}, ctx return &Group{cancel: cancel}, ctx
} }
@ -52,7 +52,7 @@ func WithContext(ctx context.Context) (*Group, context.Context) {
func (g *Group) Wait() error { func (g *Group) Wait() error {
g.wg.Wait() g.wg.Wait()
if g.cancel != nil { if g.cancel != nil {
g.cancel() g.cancel(g.err)
} }
return g.err return g.err
} }
@ -76,7 +76,7 @@ func (g *Group) Go(f func() error) {
g.errOnce.Do(func() { g.errOnce.Do(func() {
g.err = err g.err = err
if g.cancel != nil { if g.cancel != nil {
g.cancel() g.cancel(g.err)
} }
}) })
} }
@ -105,7 +105,7 @@ func (g *Group) TryGo(f func() error) bool {
g.errOnce.Do(func() { g.errOnce.Do(func() {
g.err = err g.err = err
if g.cancel != nil { if g.cancel != nil {
g.cancel() g.cancel(g.err)
} }
}) })
} }

@ -0,0 +1,14 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.20
// +build go1.20
package errgroup
import "context"
func withCancelCause(parent context.Context) (context.Context, func(error)) {
return context.WithCancelCause(parent)
}

@ -0,0 +1,15 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.20
// +build !go1.20
package errgroup
import "context"
func withCancelCause(parent context.Context) (context.Context, func(error)) {
ctx, cancel := context.WithCancel(parent)
return ctx, func(error) { cancel() }
}

14
vendor/modules.txt vendored

@ -131,7 +131,7 @@ github.com/cenkalti/backoff/v4
github.com/cespare/xxhash/v2 github.com/cespare/xxhash/v2
# github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e # github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e
## explicit ## explicit
# github.com/compose-spec/compose-go v1.14.0 # github.com/compose-spec/compose-go v1.17.0
## explicit; go 1.19 ## explicit; go 1.19
github.com/compose-spec/compose-go/cli github.com/compose-spec/compose-go/cli
github.com/compose-spec/compose-go/consts github.com/compose-spec/compose-go/consts
@ -446,7 +446,7 @@ github.com/hashicorp/hcl/v2/hclparse
github.com/hashicorp/hcl/v2/hclsyntax github.com/hashicorp/hcl/v2/hclsyntax
github.com/hashicorp/hcl/v2/hclwrite github.com/hashicorp/hcl/v2/hclwrite
github.com/hashicorp/hcl/v2/json github.com/hashicorp/hcl/v2/json
# github.com/imdario/mergo v0.3.15 # github.com/imdario/mergo v0.3.16
## explicit; go 1.13 ## explicit; go 1.13
github.com/imdario/mergo github.com/imdario/mergo
# github.com/in-toto/in-toto-golang v0.5.0 # github.com/in-toto/in-toto-golang v0.5.0
@ -800,7 +800,11 @@ golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/terminal golang.org/x/crypto/ssh/terminal
# golang.org/x/mod v0.9.0 # golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1
## explicit; go 1.20
golang.org/x/exp/constraints
golang.org/x/exp/slices
# golang.org/x/mod v0.11.0
## explicit; go 1.17 ## explicit; go 1.17
golang.org/x/mod/internal/lazyregexp golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module golang.org/x/mod/module
@ -820,8 +824,8 @@ golang.org/x/net/trace
## explicit; go 1.17 ## explicit; go 1.17
golang.org/x/oauth2 golang.org/x/oauth2
golang.org/x/oauth2/internal golang.org/x/oauth2/internal
# golang.org/x/sync v0.2.0 # golang.org/x/sync v0.3.0
## explicit ## explicit; go 1.17
golang.org/x/sync/errgroup golang.org/x/sync/errgroup
golang.org/x/sync/semaphore golang.org/x/sync/semaphore
# golang.org/x/sys v0.8.0 # golang.org/x/sys v0.8.0

Loading…
Cancel
Save