Merge pull request #1100 from tonistiigi/print-outline
Build: Support for printing outline/targets of the current buildpull/1266/head v0.9.0-rc2
						commit
						da1f4b8496
					
				| @ -0,0 +1,48 @@ | |||||||
|  | package commands | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"fmt" | ||||||
|  | 	"io" | ||||||
|  | 	"log" | ||||||
|  | 	"os" | ||||||
|  | 
 | ||||||
|  | 	"github.com/docker/buildx/build" | ||||||
|  | 	"github.com/docker/docker/api/types/versions" | ||||||
|  | 	"github.com/moby/buildkit/frontend/subrequests" | ||||||
|  | 	"github.com/moby/buildkit/frontend/subrequests/outline" | ||||||
|  | 	"github.com/moby/buildkit/frontend/subrequests/targets" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func printResult(f *build.PrintFunc, res map[string]string) error { | ||||||
|  | 	switch f.Name { | ||||||
|  | 	case "outline": | ||||||
|  | 		return printValue(outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res) | ||||||
|  | 	case "targets": | ||||||
|  | 		return printValue(targets.PrintTargets, targets.SubrequestsTargetsDefinition.Version, f.Format, res) | ||||||
|  | 	case "subrequests.describe": | ||||||
|  | 		return printValue(subrequests.PrintDescribe, subrequests.SubrequestsDescribeDefinition.Version, f.Format, res) | ||||||
|  | 	default: | ||||||
|  | 		if dt, ok := res["result.txt"]; ok { | ||||||
|  | 			fmt.Print(dt) | ||||||
|  | 		} else { | ||||||
|  | 			log.Printf("%s %+v", f, res) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type printFunc func([]byte, io.Writer) error | ||||||
|  | 
 | ||||||
|  | func printValue(printer printFunc, version string, format string, res map[string]string) error { | ||||||
|  | 	if format == "json" { | ||||||
|  | 		fmt.Fprintln(os.Stdout, res["result.json"]) | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if res["version"] != "" && versions.LessThan(version, res["version"]) && res["result.txt"] != "" { | ||||||
|  | 		// structure is too new and we don't know how to print it
 | ||||||
|  | 		fmt.Fprint(os.Stdout, res["result.txt"]) | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	return printer([]byte(res["result.json"]), os.Stdout) | ||||||
|  | } | ||||||
| @ -1,5 +0,0 @@ | |||||||
| package huff0 |  | ||||||
| 
 |  | ||||||
| //go:generate go run generate.go
 |  | ||||||
| //go:generate asmfmt -w decompress_amd64.s
 |  | ||||||
| //go:generate asmfmt -w decompress_8b_amd64.s
 |  | ||||||
| @ -1,488 +0,0 @@ | |||||||
| // +build !appengine |  | ||||||
| // +build gc |  | ||||||
| // +build !noasm |  | ||||||
| 
 |  | ||||||
| #include "textflag.h" |  | ||||||
| #include "funcdata.h" |  | ||||||
| #include "go_asm.h" |  | ||||||
| 
 |  | ||||||
| #define bufoff      256 // see decompress.go, we're using [4][256]byte table |  | ||||||
| 
 |  | ||||||
| // func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, |  | ||||||
| //	peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) |  | ||||||
| TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8 |  | ||||||
| #define off             R8 |  | ||||||
| #define buffer          DI |  | ||||||
| #define table           SI |  | ||||||
| 
 |  | ||||||
| #define br_bits_read    R9 |  | ||||||
| #define br_value        R10 |  | ||||||
| #define br_offset       R11 |  | ||||||
| #define peek_bits       R12 |  | ||||||
| #define exhausted       DX |  | ||||||
| 
 |  | ||||||
| #define br0             R13 |  | ||||||
| #define br1             R14 |  | ||||||
| #define br2             R15 |  | ||||||
| #define br3             BP |  | ||||||
| 
 |  | ||||||
| 	MOVQ BP, 0(SP) |  | ||||||
| 
 |  | ||||||
| 	XORQ exhausted, exhausted // exhausted = false |  | ||||||
| 	XORQ off, off             // off = 0 |  | ||||||
| 
 |  | ||||||
| 	MOVBQZX peekBits+32(FP), peek_bits |  | ||||||
| 	MOVQ    buf+40(FP), buffer |  | ||||||
| 	MOVQ    tbl+48(FP), table |  | ||||||
| 
 |  | ||||||
| 	MOVQ pbr0+0(FP), br0 |  | ||||||
| 	MOVQ pbr1+8(FP), br1 |  | ||||||
| 	MOVQ pbr2+16(FP), br2 |  | ||||||
| 	MOVQ pbr3+24(FP), br3 |  | ||||||
| 
 |  | ||||||
| main_loop: |  | ||||||
| 
 |  | ||||||
| 	// const stream = 0 |  | ||||||
| 	// br0.fillFast() |  | ||||||
| 	MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read |  | ||||||
| 	MOVQ    bitReaderShifted_value(br0), br_value |  | ||||||
| 	MOVQ    bitReaderShifted_off(br0), br_offset |  | ||||||
| 
 |  | ||||||
| 	// if b.bitsRead >= 32 { |  | ||||||
| 	CMPQ br_bits_read, $32 |  | ||||||
| 	JB   skip_fill0 |  | ||||||
| 
 |  | ||||||
| 	SUBQ $32, br_bits_read // b.bitsRead -= 32 |  | ||||||
| 	SUBQ $4, br_offset     // b.off -= 4 |  | ||||||
| 
 |  | ||||||
| 	// v := b.in[b.off-4 : b.off] |  | ||||||
| 	// v = v[:4] |  | ||||||
| 	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) |  | ||||||
| 	MOVQ bitReaderShifted_in(br0), AX |  | ||||||
| 	MOVL 0(br_offset)(AX*1), AX       // AX = uint32(b.in[b.off:b.off+4]) |  | ||||||
| 
 |  | ||||||
| 	// b.value |= uint64(low) << (b.bitsRead & 63) |  | ||||||
| 	MOVQ br_bits_read, CX |  | ||||||
| 	SHLQ CL, AX |  | ||||||
| 	ORQ  AX, br_value |  | ||||||
| 
 |  | ||||||
| 	// exhausted = exhausted || (br0.off < 4) |  | ||||||
| 	CMPQ  br_offset, $4 |  | ||||||
| 	SETLT DL |  | ||||||
| 	ORB   DL, DH |  | ||||||
| 
 |  | ||||||
| 	// } |  | ||||||
| skip_fill0: |  | ||||||
| 
 |  | ||||||
| 	// val0 := br0.peekTopBits(peekBits) |  | ||||||
| 	MOVQ br_value, AX |  | ||||||
| 	MOVQ peek_bits, CX |  | ||||||
| 	SHRQ CL, AX        // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
| 	// v0 := table[val0&mask] |  | ||||||
| 	MOVW 0(table)(AX*2), AX // AX - v0 |  | ||||||
| 
 |  | ||||||
| 	// br0.advance(uint8(v0.entry)) |  | ||||||
| 	MOVB    AH, BL           // BL = uint8(v0.entry >> 8) |  | ||||||
| 	MOVBQZX AL, CX |  | ||||||
| 	SHLQ    CL, br_value     // value <<= n |  | ||||||
| 	ADDQ    CX, br_bits_read // bits_read += n |  | ||||||
| 
 |  | ||||||
| 	// val1 := br0.peekTopBits(peekBits) |  | ||||||
| 	MOVQ peek_bits, CX |  | ||||||
| 	MOVQ br_value, AX |  | ||||||
| 	SHRQ CL, AX        // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
| 	// v1 := table[val1&mask] |  | ||||||
| 	MOVW 0(table)(AX*2), AX // AX - v1 |  | ||||||
| 
 |  | ||||||
| 	// br0.advance(uint8(v1.entry)) |  | ||||||
| 	MOVB    AH, BH           // BH = uint8(v1.entry >> 8) |  | ||||||
| 	MOVBQZX AL, CX |  | ||||||
| 	SHLQ    CX, br_value     // value <<= n |  | ||||||
| 	ADDQ    CX, br_bits_read // bits_read += n |  | ||||||
| 
 |  | ||||||
| 	// these two writes get coalesced |  | ||||||
| 	// buf[stream][off] = uint8(v0.entry >> 8) |  | ||||||
| 	// buf[stream][off+1] = uint8(v1.entry >> 8) |  | ||||||
| 	MOVW BX, 0(buffer)(off*1) |  | ||||||
| 
 |  | ||||||
| 	// SECOND PART: |  | ||||||
| 	// val2 := br0.peekTopBits(peekBits) |  | ||||||
| 	MOVQ br_value, AX |  | ||||||
| 	MOVQ peek_bits, CX |  | ||||||
| 	SHRQ CL, AX        // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
| 	// v2 := table[val0&mask] |  | ||||||
| 	MOVW 0(table)(AX*2), AX // AX - v0 |  | ||||||
| 
 |  | ||||||
| 	// br0.advance(uint8(v0.entry)) |  | ||||||
| 	MOVB    AH, BL           // BL = uint8(v0.entry >> 8) |  | ||||||
| 	MOVBQZX AL, CX |  | ||||||
| 	SHLQ    CL, br_value     // value <<= n |  | ||||||
| 	ADDQ    CX, br_bits_read // bits_read += n |  | ||||||
| 
 |  | ||||||
| 	// val3 := br0.peekTopBits(peekBits) |  | ||||||
| 	MOVQ peek_bits, CX |  | ||||||
| 	MOVQ br_value, AX |  | ||||||
| 	SHRQ CL, AX        // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
| 	// v3 := table[val1&mask] |  | ||||||
| 	MOVW 0(table)(AX*2), AX // AX - v1 |  | ||||||
| 
 |  | ||||||
| 	// br0.advance(uint8(v1.entry)) |  | ||||||
| 	MOVB    AH, BH           // BH = uint8(v1.entry >> 8) |  | ||||||
| 	MOVBQZX AL, CX |  | ||||||
| 	SHLQ    CX, br_value     // value <<= n |  | ||||||
| 	ADDQ    CX, br_bits_read // bits_read += n |  | ||||||
| 
 |  | ||||||
| 	// these two writes get coalesced |  | ||||||
| 	// buf[stream][off+2] = uint8(v2.entry >> 8) |  | ||||||
| 	// buf[stream][off+3] = uint8(v3.entry >> 8) |  | ||||||
| 	MOVW BX, 0+2(buffer)(off*1) |  | ||||||
| 
 |  | ||||||
| 	// update the bitrader reader structure |  | ||||||
| 	MOVB br_bits_read, bitReaderShifted_bitsRead(br0) |  | ||||||
| 	MOVQ br_value, bitReaderShifted_value(br0) |  | ||||||
| 	MOVQ br_offset, bitReaderShifted_off(br0) |  | ||||||
| 
 |  | ||||||
| 	// const stream = 1 |  | ||||||
| 	// br1.fillFast() |  | ||||||
| 	MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read |  | ||||||
| 	MOVQ    bitReaderShifted_value(br1), br_value |  | ||||||
| 	MOVQ    bitReaderShifted_off(br1), br_offset |  | ||||||
| 
 |  | ||||||
| 	// if b.bitsRead >= 32 { |  | ||||||
| 	CMPQ br_bits_read, $32 |  | ||||||
| 	JB   skip_fill1 |  | ||||||
| 
 |  | ||||||
| 	SUBQ $32, br_bits_read // b.bitsRead -= 32 |  | ||||||
| 	SUBQ $4, br_offset     // b.off -= 4 |  | ||||||
| 
 |  | ||||||
| 	// v := b.in[b.off-4 : b.off] |  | ||||||
| 	// v = v[:4] |  | ||||||
| 	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) |  | ||||||
| 	MOVQ bitReaderShifted_in(br1), AX |  | ||||||
| 	MOVL 0(br_offset)(AX*1), AX       // AX = uint32(b.in[b.off:b.off+4]) |  | ||||||
| 
 |  | ||||||
| 	// b.value |= uint64(low) << (b.bitsRead & 63) |  | ||||||
| 	MOVQ br_bits_read, CX |  | ||||||
| 	SHLQ CL, AX |  | ||||||
| 	ORQ  AX, br_value |  | ||||||
| 
 |  | ||||||
| 	// exhausted = exhausted || (br1.off < 4) |  | ||||||
| 	CMPQ  br_offset, $4 |  | ||||||
| 	SETLT DL |  | ||||||
| 	ORB   DL, DH |  | ||||||
| 
 |  | ||||||
| 	// } |  | ||||||
| skip_fill1: |  | ||||||
| 
 |  | ||||||
| 	// val0 := br1.peekTopBits(peekBits) |  | ||||||
| 	MOVQ br_value, AX |  | ||||||
| 	MOVQ peek_bits, CX |  | ||||||
| 	SHRQ CL, AX        // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
| 	// v0 := table[val0&mask] |  | ||||||
| 	MOVW 0(table)(AX*2), AX // AX - v0 |  | ||||||
| 
 |  | ||||||
| 	// br1.advance(uint8(v0.entry)) |  | ||||||
| 	MOVB    AH, BL           // BL = uint8(v0.entry >> 8) |  | ||||||
| 	MOVBQZX AL, CX |  | ||||||
| 	SHLQ    CL, br_value     // value <<= n |  | ||||||
| 	ADDQ    CX, br_bits_read // bits_read += n |  | ||||||
| 
 |  | ||||||
| 	// val1 := br1.peekTopBits(peekBits) |  | ||||||
| 	MOVQ peek_bits, CX |  | ||||||
| 	MOVQ br_value, AX |  | ||||||
| 	SHRQ CL, AX        // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
| 	// v1 := table[val1&mask] |  | ||||||
| 	MOVW 0(table)(AX*2), AX // AX - v1 |  | ||||||
| 
 |  | ||||||
| 	// br1.advance(uint8(v1.entry)) |  | ||||||
| 	MOVB    AH, BH           // BH = uint8(v1.entry >> 8) |  | ||||||
| 	MOVBQZX AL, CX |  | ||||||
| 	SHLQ    CX, br_value     // value <<= n |  | ||||||
| 	ADDQ    CX, br_bits_read // bits_read += n |  | ||||||
| 
 |  | ||||||
| 	// these two writes get coalesced |  | ||||||
| 	// buf[stream][off] = uint8(v0.entry >> 8) |  | ||||||
| 	// buf[stream][off+1] = uint8(v1.entry >> 8) |  | ||||||
| 	MOVW BX, 256(buffer)(off*1) |  | ||||||
| 
 |  | ||||||
| 	// SECOND PART: |  | ||||||
| 	// val2 := br1.peekTopBits(peekBits) |  | ||||||
| 	MOVQ br_value, AX |  | ||||||
| 	MOVQ peek_bits, CX |  | ||||||
| 	SHRQ CL, AX        // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
| 	// v2 := table[val0&mask] |  | ||||||
| 	MOVW 0(table)(AX*2), AX // AX - v0 |  | ||||||
| 
 |  | ||||||
| 	// br1.advance(uint8(v0.entry)) |  | ||||||
| 	MOVB    AH, BL           // BL = uint8(v0.entry >> 8) |  | ||||||
| 	MOVBQZX AL, CX |  | ||||||
| 	SHLQ    CL, br_value     // value <<= n |  | ||||||
| 	ADDQ    CX, br_bits_read // bits_read += n |  | ||||||
| 
 |  | ||||||
| 	// val3 := br1.peekTopBits(peekBits) |  | ||||||
| 	MOVQ peek_bits, CX |  | ||||||
| 	MOVQ br_value, AX |  | ||||||
| 	SHRQ CL, AX        // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
| 	// v3 := table[val1&mask] |  | ||||||
| 	MOVW 0(table)(AX*2), AX // AX - v1 |  | ||||||
| 
 |  | ||||||
| 	// br1.advance(uint8(v1.entry)) |  | ||||||
| 	MOVB    AH, BH           // BH = uint8(v1.entry >> 8) |  | ||||||
| 	MOVBQZX AL, CX |  | ||||||
| 	SHLQ    CX, br_value     // value <<= n |  | ||||||
| 	ADDQ    CX, br_bits_read // bits_read += n |  | ||||||
| 
 |  | ||||||
| 	// these two writes get coalesced |  | ||||||
| 	// buf[stream][off+2] = uint8(v2.entry >> 8) |  | ||||||
| 	// buf[stream][off+3] = uint8(v3.entry >> 8) |  | ||||||
| 	MOVW BX, 256+2(buffer)(off*1) |  | ||||||
| 
 |  | ||||||
| 	// update the bitrader reader structure |  | ||||||
| 	MOVB br_bits_read, bitReaderShifted_bitsRead(br1) |  | ||||||
| 	MOVQ br_value, bitReaderShifted_value(br1) |  | ||||||
| 	MOVQ br_offset, bitReaderShifted_off(br1) |  | ||||||
| 
 |  | ||||||
| 	// const stream = 2 |  | ||||||
| 	// br2.fillFast() |  | ||||||
| 	MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read |  | ||||||
| 	MOVQ    bitReaderShifted_value(br2), br_value |  | ||||||
| 	MOVQ    bitReaderShifted_off(br2), br_offset |  | ||||||
| 
 |  | ||||||
| 	// if b.bitsRead >= 32 { |  | ||||||
| 	CMPQ br_bits_read, $32 |  | ||||||
| 	JB   skip_fill2 |  | ||||||
| 
 |  | ||||||
| 	SUBQ $32, br_bits_read // b.bitsRead -= 32 |  | ||||||
| 	SUBQ $4, br_offset     // b.off -= 4 |  | ||||||
| 
 |  | ||||||
| 	// v := b.in[b.off-4 : b.off] |  | ||||||
| 	// v = v[:4] |  | ||||||
| 	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) |  | ||||||
| 	MOVQ bitReaderShifted_in(br2), AX |  | ||||||
| 	MOVL 0(br_offset)(AX*1), AX       // AX = uint32(b.in[b.off:b.off+4]) |  | ||||||
| 
 |  | ||||||
| 	// b.value |= uint64(low) << (b.bitsRead & 63) |  | ||||||
| 	MOVQ br_bits_read, CX |  | ||||||
| 	SHLQ CL, AX |  | ||||||
| 	ORQ  AX, br_value |  | ||||||
| 
 |  | ||||||
| 	// exhausted = exhausted || (br2.off < 4) |  | ||||||
| 	CMPQ  br_offset, $4 |  | ||||||
| 	SETLT DL |  | ||||||
| 	ORB   DL, DH |  | ||||||
| 
 |  | ||||||
| 	// } |  | ||||||
| skip_fill2: |  | ||||||
| 
 |  | ||||||
| 	// val0 := br2.peekTopBits(peekBits) |  | ||||||
| 	MOVQ br_value, AX |  | ||||||
| 	MOVQ peek_bits, CX |  | ||||||
| 	SHRQ CL, AX        // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
| 	// v0 := table[val0&mask] |  | ||||||
| 	MOVW 0(table)(AX*2), AX // AX - v0 |  | ||||||
| 
 |  | ||||||
| 	// br2.advance(uint8(v0.entry)) |  | ||||||
| 	MOVB    AH, BL           // BL = uint8(v0.entry >> 8) |  | ||||||
| 	MOVBQZX AL, CX |  | ||||||
| 	SHLQ    CL, br_value     // value <<= n |  | ||||||
| 	ADDQ    CX, br_bits_read // bits_read += n |  | ||||||
| 
 |  | ||||||
| 	// val1 := br2.peekTopBits(peekBits) |  | ||||||
| 	MOVQ peek_bits, CX |  | ||||||
| 	MOVQ br_value, AX |  | ||||||
| 	SHRQ CL, AX        // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
| 	// v1 := table[val1&mask] |  | ||||||
| 	MOVW 0(table)(AX*2), AX // AX - v1 |  | ||||||
| 
 |  | ||||||
| 	// br2.advance(uint8(v1.entry)) |  | ||||||
| 	MOVB    AH, BH           // BH = uint8(v1.entry >> 8) |  | ||||||
| 	MOVBQZX AL, CX |  | ||||||
| 	SHLQ    CX, br_value     // value <<= n |  | ||||||
| 	ADDQ    CX, br_bits_read // bits_read += n |  | ||||||
| 
 |  | ||||||
| 	// these two writes get coalesced |  | ||||||
| 	// buf[stream][off] = uint8(v0.entry >> 8) |  | ||||||
| 	// buf[stream][off+1] = uint8(v1.entry >> 8) |  | ||||||
| 	MOVW BX, 512(buffer)(off*1) |  | ||||||
| 
 |  | ||||||
| 	// SECOND PART: |  | ||||||
| 	// val2 := br2.peekTopBits(peekBits) |  | ||||||
| 	MOVQ br_value, AX |  | ||||||
| 	MOVQ peek_bits, CX |  | ||||||
| 	SHRQ CL, AX        // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
| 	// v2 := table[val0&mask] |  | ||||||
| 	MOVW 0(table)(AX*2), AX // AX - v0 |  | ||||||
| 
 |  | ||||||
| 	// br2.advance(uint8(v0.entry)) |  | ||||||
| 	MOVB    AH, BL           // BL = uint8(v0.entry >> 8) |  | ||||||
| 	MOVBQZX AL, CX |  | ||||||
| 	SHLQ    CL, br_value     // value <<= n |  | ||||||
| 	ADDQ    CX, br_bits_read // bits_read += n |  | ||||||
| 
 |  | ||||||
| 	// val3 := br2.peekTopBits(peekBits) |  | ||||||
| 	MOVQ peek_bits, CX |  | ||||||
| 	MOVQ br_value, AX |  | ||||||
| 	SHRQ CL, AX        // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
| 	// v3 := table[val1&mask] |  | ||||||
| 	MOVW 0(table)(AX*2), AX // AX - v1 |  | ||||||
| 
 |  | ||||||
| 	// br2.advance(uint8(v1.entry)) |  | ||||||
| 	MOVB    AH, BH           // BH = uint8(v1.entry >> 8) |  | ||||||
| 	MOVBQZX AL, CX |  | ||||||
| 	SHLQ    CX, br_value     // value <<= n |  | ||||||
| 	ADDQ    CX, br_bits_read // bits_read += n |  | ||||||
| 
 |  | ||||||
| 	// these two writes get coalesced |  | ||||||
| 	// buf[stream][off+2] = uint8(v2.entry >> 8) |  | ||||||
| 	// buf[stream][off+3] = uint8(v3.entry >> 8) |  | ||||||
| 	MOVW BX, 512+2(buffer)(off*1) |  | ||||||
| 
 |  | ||||||
| 	// update the bitrader reader structure |  | ||||||
| 	MOVB br_bits_read, bitReaderShifted_bitsRead(br2) |  | ||||||
| 	MOVQ br_value, bitReaderShifted_value(br2) |  | ||||||
| 	MOVQ br_offset, bitReaderShifted_off(br2) |  | ||||||
| 
 |  | ||||||
| 	// const stream = 3 |  | ||||||
| 	// br3.fillFast() |  | ||||||
| 	MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read |  | ||||||
| 	MOVQ    bitReaderShifted_value(br3), br_value |  | ||||||
| 	MOVQ    bitReaderShifted_off(br3), br_offset |  | ||||||
| 
 |  | ||||||
| 	// if b.bitsRead >= 32 { |  | ||||||
| 	CMPQ br_bits_read, $32 |  | ||||||
| 	JB   skip_fill3 |  | ||||||
| 
 |  | ||||||
| 	SUBQ $32, br_bits_read // b.bitsRead -= 32 |  | ||||||
| 	SUBQ $4, br_offset     // b.off -= 4 |  | ||||||
| 
 |  | ||||||
| 	// v := b.in[b.off-4 : b.off] |  | ||||||
| 	// v = v[:4] |  | ||||||
| 	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) |  | ||||||
| 	MOVQ bitReaderShifted_in(br3), AX |  | ||||||
| 	MOVL 0(br_offset)(AX*1), AX       // AX = uint32(b.in[b.off:b.off+4]) |  | ||||||
| 
 |  | ||||||
| 	// b.value |= uint64(low) << (b.bitsRead & 63) |  | ||||||
| 	MOVQ br_bits_read, CX |  | ||||||
| 	SHLQ CL, AX |  | ||||||
| 	ORQ  AX, br_value |  | ||||||
| 
 |  | ||||||
| 	// exhausted = exhausted || (br3.off < 4) |  | ||||||
| 	CMPQ  br_offset, $4 |  | ||||||
| 	SETLT DL |  | ||||||
| 	ORB   DL, DH |  | ||||||
| 
 |  | ||||||
| 	// } |  | ||||||
| skip_fill3: |  | ||||||
| 
 |  | ||||||
| 	// val0 := br3.peekTopBits(peekBits) |  | ||||||
| 	MOVQ br_value, AX |  | ||||||
| 	MOVQ peek_bits, CX |  | ||||||
| 	SHRQ CL, AX        // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
| 	// v0 := table[val0&mask] |  | ||||||
| 	MOVW 0(table)(AX*2), AX // AX - v0 |  | ||||||
| 
 |  | ||||||
| 	// br3.advance(uint8(v0.entry)) |  | ||||||
| 	MOVB    AH, BL           // BL = uint8(v0.entry >> 8) |  | ||||||
| 	MOVBQZX AL, CX |  | ||||||
| 	SHLQ    CL, br_value     // value <<= n |  | ||||||
| 	ADDQ    CX, br_bits_read // bits_read += n |  | ||||||
| 
 |  | ||||||
| 	// val1 := br3.peekTopBits(peekBits) |  | ||||||
| 	MOVQ peek_bits, CX |  | ||||||
| 	MOVQ br_value, AX |  | ||||||
| 	SHRQ CL, AX        // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
| 	// v1 := table[val1&mask] |  | ||||||
| 	MOVW 0(table)(AX*2), AX // AX - v1 |  | ||||||
| 
 |  | ||||||
| 	// br3.advance(uint8(v1.entry)) |  | ||||||
| 	MOVB    AH, BH           // BH = uint8(v1.entry >> 8) |  | ||||||
| 	MOVBQZX AL, CX |  | ||||||
| 	SHLQ    CX, br_value     // value <<= n |  | ||||||
| 	ADDQ    CX, br_bits_read // bits_read += n |  | ||||||
| 
 |  | ||||||
| 	// these two writes get coalesced |  | ||||||
| 	// buf[stream][off] = uint8(v0.entry >> 8) |  | ||||||
| 	// buf[stream][off+1] = uint8(v1.entry >> 8) |  | ||||||
| 	MOVW BX, 768(buffer)(off*1) |  | ||||||
| 
 |  | ||||||
| 	// SECOND PART: |  | ||||||
| 	// val2 := br3.peekTopBits(peekBits) |  | ||||||
| 	MOVQ br_value, AX |  | ||||||
| 	MOVQ peek_bits, CX |  | ||||||
| 	SHRQ CL, AX        // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
| 	// v2 := table[val0&mask] |  | ||||||
| 	MOVW 0(table)(AX*2), AX // AX - v0 |  | ||||||
| 
 |  | ||||||
| 	// br3.advance(uint8(v0.entry)) |  | ||||||
| 	MOVB    AH, BL           // BL = uint8(v0.entry >> 8) |  | ||||||
| 	MOVBQZX AL, CX |  | ||||||
| 	SHLQ    CL, br_value     // value <<= n |  | ||||||
| 	ADDQ    CX, br_bits_read // bits_read += n |  | ||||||
| 
 |  | ||||||
| 	// val3 := br3.peekTopBits(peekBits) |  | ||||||
| 	MOVQ peek_bits, CX |  | ||||||
| 	MOVQ br_value, AX |  | ||||||
| 	SHRQ CL, AX        // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
| 	// v3 := table[val1&mask] |  | ||||||
| 	MOVW 0(table)(AX*2), AX // AX - v1 |  | ||||||
| 
 |  | ||||||
| 	// br3.advance(uint8(v1.entry)) |  | ||||||
| 	MOVB    AH, BH           // BH = uint8(v1.entry >> 8) |  | ||||||
| 	MOVBQZX AL, CX |  | ||||||
| 	SHLQ    CX, br_value     // value <<= n |  | ||||||
| 	ADDQ    CX, br_bits_read // bits_read += n |  | ||||||
| 
 |  | ||||||
| 	// these two writes get coalesced |  | ||||||
| 	// buf[stream][off+2] = uint8(v2.entry >> 8) |  | ||||||
| 	// buf[stream][off+3] = uint8(v3.entry >> 8) |  | ||||||
| 	MOVW BX, 768+2(buffer)(off*1) |  | ||||||
| 
 |  | ||||||
| 	// update the bitrader reader structure |  | ||||||
| 	MOVB br_bits_read, bitReaderShifted_bitsRead(br3) |  | ||||||
| 	MOVQ br_value, bitReaderShifted_value(br3) |  | ||||||
| 	MOVQ br_offset, bitReaderShifted_off(br3) |  | ||||||
| 
 |  | ||||||
| 	ADDQ $4, off // off += 2 |  | ||||||
| 
 |  | ||||||
| 	TESTB DH, DH // any br[i].ofs < 4? |  | ||||||
| 	JNZ   end |  | ||||||
| 
 |  | ||||||
| 	CMPQ off, $bufoff |  | ||||||
| 	JL   main_loop |  | ||||||
| 
 |  | ||||||
| end: |  | ||||||
| 	MOVQ 0(SP), BP |  | ||||||
| 
 |  | ||||||
| 	MOVB off, ret+56(FP) |  | ||||||
| 	RET |  | ||||||
| 
 |  | ||||||
| #undef off |  | ||||||
| #undef buffer |  | ||||||
| #undef table |  | ||||||
| 
 |  | ||||||
| #undef br_bits_read |  | ||||||
| #undef br_value |  | ||||||
| #undef br_offset |  | ||||||
| #undef peek_bits |  | ||||||
| #undef exhausted |  | ||||||
| 
 |  | ||||||
| #undef br0 |  | ||||||
| #undef br1 |  | ||||||
| #undef br2 |  | ||||||
| #undef br3 |  | ||||||
| @ -1,197 +0,0 @@ | |||||||
| // +build !appengine |  | ||||||
| // +build gc |  | ||||||
| // +build !noasm |  | ||||||
| 
 |  | ||||||
| #include "textflag.h" |  | ||||||
| #include "funcdata.h" |  | ||||||
| #include "go_asm.h" |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| #define bufoff      256     // see decompress.go, we're using [4][256]byte table |  | ||||||
| 
 |  | ||||||
| //func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, |  | ||||||
| //	peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) |  | ||||||
| TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8 |  | ||||||
| #define off             R8 |  | ||||||
| #define buffer          DI |  | ||||||
| #define table           SI |  | ||||||
| 
 |  | ||||||
| #define br_bits_read    R9 |  | ||||||
| #define br_value        R10 |  | ||||||
| #define br_offset       R11 |  | ||||||
| #define peek_bits       R12 |  | ||||||
| #define exhausted       DX |  | ||||||
| 
 |  | ||||||
| #define br0             R13 |  | ||||||
| #define br1             R14 |  | ||||||
| #define br2             R15 |  | ||||||
| #define br3             BP |  | ||||||
| 
 |  | ||||||
|     MOVQ    BP, 0(SP) |  | ||||||
| 
 |  | ||||||
|     XORQ    exhausted, exhausted    // exhausted = false |  | ||||||
|     XORQ    off, off                // off = 0 |  | ||||||
| 
 |  | ||||||
|     MOVBQZX peekBits+32(FP), peek_bits |  | ||||||
|     MOVQ    buf+40(FP), buffer |  | ||||||
|     MOVQ    tbl+48(FP), table |  | ||||||
| 
 |  | ||||||
|     MOVQ    pbr0+0(FP), br0 |  | ||||||
|     MOVQ    pbr1+8(FP), br1 |  | ||||||
|     MOVQ    pbr2+16(FP), br2 |  | ||||||
|     MOVQ    pbr3+24(FP), br3 |  | ||||||
| 
 |  | ||||||
| main_loop: |  | ||||||
| {{ define "decode_2_values_x86" }} |  | ||||||
|     // const stream = {{ var "id" }} |  | ||||||
|     // br{{ var "id"}}.fillFast() |  | ||||||
|     MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read |  | ||||||
|     MOVQ    bitReaderShifted_value(br{{ var "id" }}), br_value |  | ||||||
|     MOVQ    bitReaderShifted_off(br{{ var "id" }}), br_offset |  | ||||||
| 
 |  | ||||||
| 	// if b.bitsRead >= 32 { |  | ||||||
|     CMPQ    br_bits_read, $32 |  | ||||||
|     JB      skip_fill{{ var "id" }} |  | ||||||
| 
 |  | ||||||
|     SUBQ    $32, br_bits_read       // b.bitsRead -= 32 |  | ||||||
|     SUBQ    $4, br_offset           // b.off -= 4 |  | ||||||
| 
 |  | ||||||
| 	// v := b.in[b.off-4 : b.off] |  | ||||||
| 	// v = v[:4] |  | ||||||
| 	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) |  | ||||||
|     MOVQ    bitReaderShifted_in(br{{ var "id" }}), AX |  | ||||||
|     MOVL    0(br_offset)(AX*1), AX  // AX = uint32(b.in[b.off:b.off+4]) |  | ||||||
| 
 |  | ||||||
| 	// b.value |= uint64(low) << (b.bitsRead & 63) |  | ||||||
|     MOVQ    br_bits_read, CX |  | ||||||
|     SHLQ    CL, AX |  | ||||||
|     ORQ     AX, br_value |  | ||||||
| 
 |  | ||||||
|     // exhausted = exhausted || (br{{ var "id"}}.off < 4) |  | ||||||
|     CMPQ    br_offset, $4 |  | ||||||
|     SETLT   DL |  | ||||||
|     ORB     DL, DH |  | ||||||
|     // } |  | ||||||
| skip_fill{{ var "id" }}: |  | ||||||
| 
 |  | ||||||
|     // val0 := br{{ var "id"}}.peekTopBits(peekBits) |  | ||||||
|     MOVQ    br_value, AX |  | ||||||
|     MOVQ    peek_bits, CX |  | ||||||
|     SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
|     // v0 := table[val0&mask] |  | ||||||
|     MOVW    0(table)(AX*2), AX      // AX - v0 |  | ||||||
| 
 |  | ||||||
|     // br{{ var "id"}}.advance(uint8(v0.entry)) |  | ||||||
|     MOVB    AH, BL                  // BL = uint8(v0.entry >> 8) |  | ||||||
|     MOVBQZX AL, CX |  | ||||||
|     SHLQ    CL, br_value            // value <<= n |  | ||||||
|     ADDQ    CX, br_bits_read        // bits_read += n |  | ||||||
| 
 |  | ||||||
|     // val1 := br{{ var "id"}}.peekTopBits(peekBits) |  | ||||||
|     MOVQ    peek_bits, CX |  | ||||||
|     MOVQ    br_value, AX |  | ||||||
|     SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
|     // v1 := table[val1&mask] |  | ||||||
|     MOVW    0(table)(AX*2), AX      // AX - v1 |  | ||||||
| 
 |  | ||||||
|     // br{{ var "id"}}.advance(uint8(v1.entry)) |  | ||||||
|     MOVB    AH, BH                  // BH = uint8(v1.entry >> 8) |  | ||||||
|     MOVBQZX AL, CX |  | ||||||
|     SHLQ    CX, br_value            // value <<= n |  | ||||||
|     ADDQ    CX, br_bits_read        // bits_read += n |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|     // these two writes get coalesced |  | ||||||
|     // buf[stream][off] = uint8(v0.entry >> 8) |  | ||||||
|     // buf[stream][off+1] = uint8(v1.entry >> 8) |  | ||||||
|     MOVW    BX, {{ var "bufofs" }}(buffer)(off*1) |  | ||||||
| 
 |  | ||||||
|     // SECOND PART: |  | ||||||
|     // val2 := br{{ var "id"}}.peekTopBits(peekBits) |  | ||||||
|     MOVQ    br_value, AX |  | ||||||
|     MOVQ    peek_bits, CX |  | ||||||
|     SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
|     // v2 := table[val0&mask] |  | ||||||
|     MOVW    0(table)(AX*2), AX      // AX - v0 |  | ||||||
| 
 |  | ||||||
|     // br{{ var "id"}}.advance(uint8(v0.entry)) |  | ||||||
|     MOVB    AH, BL                  // BL = uint8(v0.entry >> 8) |  | ||||||
|     MOVBQZX AL, CX |  | ||||||
|     SHLQ    CL, br_value            // value <<= n |  | ||||||
|     ADDQ    CX, br_bits_read        // bits_read += n |  | ||||||
| 
 |  | ||||||
|     // val3 := br{{ var "id"}}.peekTopBits(peekBits) |  | ||||||
|     MOVQ    peek_bits, CX |  | ||||||
|     MOVQ    br_value, AX |  | ||||||
|     SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask |  | ||||||
| 
 |  | ||||||
|     // v3 := table[val1&mask] |  | ||||||
|     MOVW    0(table)(AX*2), AX      // AX - v1 |  | ||||||
| 
 |  | ||||||
|     // br{{ var "id"}}.advance(uint8(v1.entry)) |  | ||||||
|     MOVB    AH, BH                  // BH = uint8(v1.entry >> 8) |  | ||||||
|     MOVBQZX AL, CX |  | ||||||
|     SHLQ    CX, br_value            // value <<= n |  | ||||||
|     ADDQ    CX, br_bits_read        // bits_read += n |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|     // these two writes get coalesced |  | ||||||
|     // buf[stream][off+2] = uint8(v2.entry >> 8) |  | ||||||
|     // buf[stream][off+3] = uint8(v3.entry >> 8) |  | ||||||
|     MOVW    BX, {{ var "bufofs" }}+2(buffer)(off*1) |  | ||||||
| 
 |  | ||||||
|     // update the bitrader reader structure |  | ||||||
|     MOVB    br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }}) |  | ||||||
|     MOVQ    br_value, bitReaderShifted_value(br{{ var "id" }}) |  | ||||||
|     MOVQ    br_offset, bitReaderShifted_off(br{{ var "id" }}) |  | ||||||
| {{ end }} |  | ||||||
| 
 |  | ||||||
|     {{ set "id" "0" }} |  | ||||||
|     {{ set "ofs" "0" }} |  | ||||||
|     {{ set "bufofs" "0" }} {{/* id * bufoff */}} |  | ||||||
|     {{ template "decode_2_values_x86" . }} |  | ||||||
| 
 |  | ||||||
|     {{ set "id" "1" }} |  | ||||||
|     {{ set "ofs" "8" }} |  | ||||||
|     {{ set "bufofs" "256" }} |  | ||||||
|     {{ template "decode_2_values_x86" . }} |  | ||||||
| 
 |  | ||||||
|     {{ set "id" "2" }} |  | ||||||
|     {{ set "ofs" "16" }} |  | ||||||
|     {{ set "bufofs" "512" }} |  | ||||||
|     {{ template "decode_2_values_x86" . }} |  | ||||||
| 
 |  | ||||||
|     {{ set "id" "3" }} |  | ||||||
|     {{ set "ofs" "24" }} |  | ||||||
|     {{ set "bufofs" "768" }} |  | ||||||
|     {{ template "decode_2_values_x86" . }} |  | ||||||
| 
 |  | ||||||
|     ADDQ    $4, off     // off += 2 |  | ||||||
| 
 |  | ||||||
|     TESTB   DH, DH      // any br[i].ofs < 4? |  | ||||||
|     JNZ     end |  | ||||||
| 
 |  | ||||||
|     CMPQ    off, $bufoff |  | ||||||
|     JL      main_loop |  | ||||||
| end: |  | ||||||
|     MOVQ    0(SP), BP |  | ||||||
| 
 |  | ||||||
|     MOVB    off, ret+56(FP) |  | ||||||
|     RET |  | ||||||
| #undef  off |  | ||||||
| #undef  buffer |  | ||||||
| #undef  table |  | ||||||
| 
 |  | ||||||
| #undef  br_bits_read |  | ||||||
| #undef  br_value |  | ||||||
| #undef  br_offset |  | ||||||
| #undef  peek_bits |  | ||||||
| #undef  exhausted |  | ||||||
| 
 |  | ||||||
| #undef  br0 |  | ||||||
| #undef  br1 |  | ||||||
| #undef  br2 |  | ||||||
| #undef  br3 |  | ||||||
											
												
													File diff suppressed because it is too large
													Load Diff
												
											
										
									
								| @ -1,195 +0,0 @@ | |||||||
| // +build !appengine |  | ||||||
| // +build gc |  | ||||||
| // +build !noasm |  | ||||||
| 
 |  | ||||||
| #include "textflag.h" |  | ||||||
| #include "funcdata.h" |  | ||||||
| #include "go_asm.h" |  | ||||||
| 
 |  | ||||||
| #ifdef GOAMD64_v4 |  | ||||||
| #ifndef GOAMD64_v3 |  | ||||||
| #define GOAMD64_v3 |  | ||||||
| #endif |  | ||||||
| #endif |  | ||||||
| 
 |  | ||||||
| #define bufoff      256     // see decompress.go, we're using [4][256]byte table |  | ||||||
| 
 |  | ||||||
| //func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, |  | ||||||
| //	peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) |  | ||||||
| TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8 |  | ||||||
| #define off             R8 |  | ||||||
| #define buffer          DI |  | ||||||
| #define table           SI |  | ||||||
| 
 |  | ||||||
| #define br_bits_read    R9 |  | ||||||
| #define br_value        R10 |  | ||||||
| #define br_offset       R11 |  | ||||||
| #define peek_bits       R12 |  | ||||||
| #define exhausted       DX |  | ||||||
| 
 |  | ||||||
| #define br0             R13 |  | ||||||
| #define br1             R14 |  | ||||||
| #define br2             R15 |  | ||||||
| #define br3             BP |  | ||||||
| 
 |  | ||||||
|     MOVQ    BP, 0(SP) |  | ||||||
| 
 |  | ||||||
|     XORQ    exhausted, exhausted    // exhausted = false |  | ||||||
|     XORQ    off, off                // off = 0 |  | ||||||
| 
 |  | ||||||
|     MOVBQZX peekBits+32(FP), peek_bits |  | ||||||
|     MOVQ    buf+40(FP), buffer |  | ||||||
|     MOVQ    tbl+48(FP), table |  | ||||||
| 
 |  | ||||||
|     MOVQ    pbr0+0(FP), br0 |  | ||||||
|     MOVQ    pbr1+8(FP), br1 |  | ||||||
|     MOVQ    pbr2+16(FP), br2 |  | ||||||
|     MOVQ    pbr3+24(FP), br3 |  | ||||||
| 
 |  | ||||||
| main_loop: |  | ||||||
| {{ define "decode_2_values_x86" }} |  | ||||||
|     // const stream = {{ var "id" }} |  | ||||||
|     // br{{ var "id"}}.fillFast() |  | ||||||
|     MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read |  | ||||||
|     MOVQ    bitReaderShifted_value(br{{ var "id" }}), br_value |  | ||||||
|     MOVQ    bitReaderShifted_off(br{{ var "id" }}), br_offset |  | ||||||
| 
 |  | ||||||
|     // We must have at least 2 * max tablelog left |  | ||||||
|     CMPQ    br_bits_read, $64-22 |  | ||||||
|     JBE     skip_fill{{ var "id" }} |  | ||||||
| 
 |  | ||||||
|     SUBQ    $32, br_bits_read       // b.bitsRead -= 32 |  | ||||||
|     SUBQ    $4, br_offset           // b.off -= 4 |  | ||||||
| 
 |  | ||||||
| 	// v := b.in[b.off-4 : b.off] |  | ||||||
| 	// v = v[:4] |  | ||||||
| 	// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) |  | ||||||
|     MOVQ    bitReaderShifted_in(br{{ var "id" }}), AX |  | ||||||
| 
 |  | ||||||
| 	// b.value |= uint64(low) << (b.bitsRead & 63) |  | ||||||
| #ifdef GOAMD64_v3 |  | ||||||
|     SHLXQ   br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) |  | ||||||
| #else |  | ||||||
|     MOVL    0(br_offset)(AX*1), AX  // AX = uint32(b.in[b.off:b.off+4]) |  | ||||||
|     MOVQ    br_bits_read, CX |  | ||||||
|     SHLQ    CL, AX |  | ||||||
| #endif |  | ||||||
| 
 |  | ||||||
|     ORQ     AX, br_value |  | ||||||
| 
 |  | ||||||
|     // exhausted = exhausted || (br{{ var "id"}}.off < 4) |  | ||||||
|     CMPQ    br_offset, $4 |  | ||||||
|     SETLT   DL |  | ||||||
|     ORB     DL, DH |  | ||||||
|     // } |  | ||||||
| skip_fill{{ var "id" }}: |  | ||||||
| 
 |  | ||||||
|     // val0 := br{{ var "id"}}.peekTopBits(peekBits) |  | ||||||
| #ifdef GOAMD64_v3 |  | ||||||
|     SHRXQ   peek_bits, br_value, AX // AX = (value >> peek_bits) & mask |  | ||||||
| #else |  | ||||||
|     MOVQ    br_value, AX |  | ||||||
|     MOVQ    peek_bits, CX |  | ||||||
|     SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask |  | ||||||
| #endif |  | ||||||
| 
 |  | ||||||
|     // v0 := table[val0&mask] |  | ||||||
|     MOVW    0(table)(AX*2), AX      // AX - v0 |  | ||||||
| 
 |  | ||||||
|     // br{{ var "id"}}.advance(uint8(v0.entry)) |  | ||||||
|     MOVB    AH, BL                  // BL = uint8(v0.entry >> 8) |  | ||||||
| 
 |  | ||||||
| #ifdef GOAMD64_v3 |  | ||||||
|     MOVBQZX AL, CX |  | ||||||
|     SHLXQ   AX, br_value, br_value // value <<= n |  | ||||||
| #else |  | ||||||
|     MOVBQZX AL, CX |  | ||||||
|     SHLQ    CL, br_value            // value <<= n |  | ||||||
| #endif |  | ||||||
| 
 |  | ||||||
|     ADDQ    CX, br_bits_read        // bits_read += n |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| #ifdef GOAMD64_v3 |  | ||||||
|     SHRXQ    peek_bits, br_value, AX  // AX = (value >> peek_bits) & mask |  | ||||||
| #else |  | ||||||
|     // val1 := br{{ var "id"}}.peekTopBits(peekBits) |  | ||||||
|     MOVQ    peek_bits, CX |  | ||||||
|     MOVQ    br_value, AX |  | ||||||
|     SHRQ    CL, AX                  // AX = (value >> peek_bits) & mask |  | ||||||
| #endif |  | ||||||
| 
 |  | ||||||
|     // v1 := table[val1&mask] |  | ||||||
|     MOVW    0(table)(AX*2), AX      // AX - v1 |  | ||||||
| 
 |  | ||||||
|     // br{{ var "id"}}.advance(uint8(v1.entry)) |  | ||||||
|     MOVB    AH, BH                  // BH = uint8(v1.entry >> 8) |  | ||||||
| 
 |  | ||||||
| #ifdef GOAMD64_v3 |  | ||||||
|     MOVBQZX AL, CX |  | ||||||
|     SHLXQ   AX, br_value, br_value // value <<= n |  | ||||||
| #else |  | ||||||
|     MOVBQZX AL, CX |  | ||||||
|     SHLQ    CL, br_value            // value <<= n |  | ||||||
| #endif |  | ||||||
| 
 |  | ||||||
|     ADDQ    CX, br_bits_read        // bits_read += n |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|     // these two writes get coalesced |  | ||||||
|     // buf[stream][off] = uint8(v0.entry >> 8) |  | ||||||
|     // buf[stream][off+1] = uint8(v1.entry >> 8) |  | ||||||
|     MOVW    BX, {{ var "bufofs" }}(buffer)(off*1) |  | ||||||
| 
 |  | ||||||
|     // update the bitrader reader structure |  | ||||||
|     MOVB    br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }}) |  | ||||||
|     MOVQ    br_value, bitReaderShifted_value(br{{ var "id" }}) |  | ||||||
|     MOVQ    br_offset, bitReaderShifted_off(br{{ var "id" }}) |  | ||||||
| {{ end }} |  | ||||||
| 
 |  | ||||||
|     {{ set "id" "0" }} |  | ||||||
|     {{ set "ofs" "0" }} |  | ||||||
|     {{ set "bufofs" "0" }} {{/* id * bufoff */}} |  | ||||||
|     {{ template "decode_2_values_x86" . }} |  | ||||||
| 
 |  | ||||||
|     {{ set "id" "1" }} |  | ||||||
|     {{ set "ofs" "8" }} |  | ||||||
|     {{ set "bufofs" "256" }} |  | ||||||
|     {{ template "decode_2_values_x86" . }} |  | ||||||
| 
 |  | ||||||
|     {{ set "id" "2" }} |  | ||||||
|     {{ set "ofs" "16" }} |  | ||||||
|     {{ set "bufofs" "512" }} |  | ||||||
|     {{ template "decode_2_values_x86" . }} |  | ||||||
| 
 |  | ||||||
|     {{ set "id" "3" }} |  | ||||||
|     {{ set "ofs" "24" }} |  | ||||||
|     {{ set "bufofs" "768" }} |  | ||||||
|     {{ template "decode_2_values_x86" . }} |  | ||||||
| 
 |  | ||||||
|     ADDQ    $2, off     // off += 2 |  | ||||||
| 
 |  | ||||||
|     TESTB   DH, DH      // any br[i].ofs < 4? |  | ||||||
|     JNZ     end |  | ||||||
| 
 |  | ||||||
|     CMPQ    off, $bufoff |  | ||||||
|     JL      main_loop |  | ||||||
| end: |  | ||||||
|     MOVQ    0(SP), BP |  | ||||||
| 
 |  | ||||||
|     MOVB    off, ret+56(FP) |  | ||||||
|     RET |  | ||||||
| #undef  off |  | ||||||
| #undef  buffer |  | ||||||
| #undef  table |  | ||||||
| 
 |  | ||||||
| #undef  br_bits_read |  | ||||||
| #undef  br_value |  | ||||||
| #undef  br_offset |  | ||||||
| #undef  peek_bits |  | ||||||
| #undef  exhausted |  | ||||||
| 
 |  | ||||||
| #undef  br0 |  | ||||||
| #undef  br1 |  | ||||||
| #undef  br2 |  | ||||||
| #undef  br3 |  | ||||||
| @ -0,0 +1,34 @@ | |||||||
|  | // Package cpuinfo gives runtime info about the current CPU.
 | ||||||
|  | //
 | ||||||
|  | // This is a very limited module meant for use internally
 | ||||||
|  | // in this project. For more versatile solution check
 | ||||||
|  | // https://github.com/klauspost/cpuid.
 | ||||||
|  | package cpuinfo | ||||||
|  | 
 | ||||||
|  | // HasBMI1 checks whether an x86 CPU supports the BMI1 extension.
 | ||||||
|  | func HasBMI1() bool { | ||||||
|  | 	return hasBMI1 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // HasBMI2 checks whether an x86 CPU supports the BMI2 extension.
 | ||||||
|  | func HasBMI2() bool { | ||||||
|  | 	return hasBMI2 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // DisableBMI2 will disable BMI2, for testing purposes.
 | ||||||
|  | // Call returned function to restore previous state.
 | ||||||
|  | func DisableBMI2() func() { | ||||||
|  | 	old := hasBMI2 | ||||||
|  | 	hasBMI2 = false | ||||||
|  | 	return func() { | ||||||
|  | 		hasBMI2 = old | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions.
 | ||||||
|  | func HasBMI() bool { | ||||||
|  | 	return HasBMI1() && HasBMI2() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | var hasBMI1 bool | ||||||
|  | var hasBMI2 bool | ||||||
| @ -0,0 +1,11 @@ | |||||||
|  | //go:build amd64 && !appengine && !noasm && gc
 | ||||||
|  | // +build amd64,!appengine,!noasm,gc
 | ||||||
|  | 
 | ||||||
|  | package cpuinfo | ||||||
|  | 
 | ||||||
|  | // go:noescape
 | ||||||
|  | func x86extensions() (bmi1, bmi2 bool) | ||||||
|  | 
 | ||||||
|  | func init() { | ||||||
|  | 	hasBMI1, hasBMI2 = x86extensions() | ||||||
|  | } | ||||||
| @ -0,0 +1,36 @@ | |||||||
|  | // +build !appengine | ||||||
|  | // +build gc | ||||||
|  | // +build !noasm | ||||||
|  | 
 | ||||||
|  | #include "textflag.h" | ||||||
|  | #include "funcdata.h" | ||||||
|  | #include "go_asm.h" | ||||||
|  | 
 | ||||||
|  | TEXT ·x86extensions(SB), NOSPLIT, $0 | ||||||
|  | 	// 1. determine max EAX value | ||||||
|  | 	XORQ AX, AX | ||||||
|  | 	CPUID | ||||||
|  | 
 | ||||||
|  | 	CMPQ AX, $7 | ||||||
|  | 	JB   unsupported | ||||||
|  | 
 | ||||||
|  | 	// 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" | ||||||
|  | 	MOVQ $7, AX | ||||||
|  | 	MOVQ $0, CX | ||||||
|  | 	CPUID | ||||||
|  | 
 | ||||||
|  | 	BTQ   $3, BX // bit 3 = BMI1 | ||||||
|  | 	SETCS AL | ||||||
|  | 
 | ||||||
|  | 	BTQ   $8, BX // bit 8 = BMI2 | ||||||
|  | 	SETCS AH | ||||||
|  | 
 | ||||||
|  | 	MOVB AL, bmi1+0(FP) | ||||||
|  | 	MOVB AH, bmi2+1(FP) | ||||||
|  | 	RET | ||||||
|  | 
 | ||||||
|  | unsupported: | ||||||
|  | 	XORQ AX, AX | ||||||
|  | 	MOVB AL, bmi1+0(FP) | ||||||
|  | 	MOVB AL, bmi2+1(FP) | ||||||
|  | 	RET | ||||||
| @ -0,0 +1,64 @@ | |||||||
|  | //go:build amd64 && !appengine && !noasm && gc
 | ||||||
|  | // +build amd64,!appengine,!noasm,gc
 | ||||||
|  | 
 | ||||||
|  | package zstd | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"fmt" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | type buildDtableAsmContext struct { | ||||||
|  | 	// inputs
 | ||||||
|  | 	stateTable *uint16 | ||||||
|  | 	norm       *int16 | ||||||
|  | 	dt         *uint64 | ||||||
|  | 
 | ||||||
|  | 	// outputs --- set by the procedure in the case of error;
 | ||||||
|  | 	// for interpretation please see the error handling part below
 | ||||||
|  | 	errParam1 uint64 | ||||||
|  | 	errParam2 uint64 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
 | ||||||
|  | // Function returns non-zero exit code on error.
 | ||||||
|  | // go:noescape
 | ||||||
|  | func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int | ||||||
|  | 
 | ||||||
|  | // please keep in sync with _generate/gen_fse.go
 | ||||||
|  | const ( | ||||||
|  | 	errorCorruptedNormalizedCounter = 1 | ||||||
|  | 	errorNewStateTooBig             = 2 | ||||||
|  | 	errorNewStateNoBits             = 3 | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // buildDtable will build the decoding table.
 | ||||||
|  | func (s *fseDecoder) buildDtable() error { | ||||||
|  | 	ctx := buildDtableAsmContext{ | ||||||
|  | 		stateTable: (*uint16)(&s.stateTable[0]), | ||||||
|  | 		norm:       (*int16)(&s.norm[0]), | ||||||
|  | 		dt:         (*uint64)(&s.dt[0]), | ||||||
|  | 	} | ||||||
|  | 	code := buildDtable_asm(s, &ctx) | ||||||
|  | 
 | ||||||
|  | 	if code != 0 { | ||||||
|  | 		switch code { | ||||||
|  | 		case errorCorruptedNormalizedCounter: | ||||||
|  | 			position := ctx.errParam1 | ||||||
|  | 			return fmt.Errorf("corrupted input (position=%d, expected 0)", position) | ||||||
|  | 
 | ||||||
|  | 		case errorNewStateTooBig: | ||||||
|  | 			newState := decSymbol(ctx.errParam1) | ||||||
|  | 			size := ctx.errParam2 | ||||||
|  | 			return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) | ||||||
|  | 
 | ||||||
|  | 		case errorNewStateNoBits: | ||||||
|  | 			newState := decSymbol(ctx.errParam1) | ||||||
|  | 			oldState := decSymbol(ctx.errParam2) | ||||||
|  | 			return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) | ||||||
|  | 
 | ||||||
|  | 		default: | ||||||
|  | 			return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
| @ -0,0 +1,127 @@ | |||||||
|  | // Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. | ||||||
|  | 
 | ||||||
|  | //go:build !appengine && !noasm && gc && !noasm | ||||||
|  | // +build !appengine,!noasm,gc,!noasm | ||||||
|  | 
 | ||||||
|  | // func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int | ||||||
|  | TEXT ·buildDtable_asm(SB), $0-24 | ||||||
|  | 	MOVQ ctx+8(FP), CX | ||||||
|  | 	MOVQ s+0(FP), DI | ||||||
|  | 
 | ||||||
|  | 	// Load values | ||||||
|  | 	MOVBQZX 4098(DI), DX | ||||||
|  | 	XORQ    AX, AX | ||||||
|  | 	BTSQ    DX, AX | ||||||
|  | 	MOVQ    (CX), BX | ||||||
|  | 	MOVQ    16(CX), SI | ||||||
|  | 	LEAQ    -1(AX), R8 | ||||||
|  | 	MOVQ    8(CX), CX | ||||||
|  | 	MOVWQZX 4096(DI), DI | ||||||
|  | 
 | ||||||
|  | 	// End load values | ||||||
|  | 	// Init, lay down lowprob symbols | ||||||
|  | 	XORQ R9, R9 | ||||||
|  | 	JMP  init_main_loop_condition | ||||||
|  | 
 | ||||||
|  | init_main_loop: | ||||||
|  | 	MOVWQSX (CX)(R9*2), R10 | ||||||
|  | 	CMPW    R10, $-1 | ||||||
|  | 	JNE     do_not_update_high_threshold | ||||||
|  | 	MOVB    R9, 1(SI)(R8*8) | ||||||
|  | 	DECQ    R8 | ||||||
|  | 	MOVQ    $0x0000000000000001, R10 | ||||||
|  | 
 | ||||||
|  | do_not_update_high_threshold: | ||||||
|  | 	MOVW R10, (BX)(R9*2) | ||||||
|  | 	INCQ R9 | ||||||
|  | 
 | ||||||
|  | init_main_loop_condition: | ||||||
|  | 	CMPQ R9, DI | ||||||
|  | 	JL   init_main_loop | ||||||
|  | 
 | ||||||
|  | 	// Spread symbols | ||||||
|  | 	// Calculate table step | ||||||
|  | 	MOVQ AX, R9 | ||||||
|  | 	SHRQ $0x01, R9 | ||||||
|  | 	MOVQ AX, R10 | ||||||
|  | 	SHRQ $0x03, R10 | ||||||
|  | 	LEAQ 3(R9)(R10*1), R9 | ||||||
|  | 
 | ||||||
|  | 	// Fill add bits values | ||||||
|  | 	LEAQ -1(AX), R10 | ||||||
|  | 	XORQ R11, R11 | ||||||
|  | 	XORQ R12, R12 | ||||||
|  | 	JMP  spread_main_loop_condition | ||||||
|  | 
 | ||||||
|  | spread_main_loop: | ||||||
|  | 	XORQ    R13, R13 | ||||||
|  | 	MOVWQSX (CX)(R12*2), R14 | ||||||
|  | 	JMP     spread_inner_loop_condition | ||||||
|  | 
 | ||||||
|  | spread_inner_loop: | ||||||
|  | 	MOVB R12, 1(SI)(R11*8) | ||||||
|  | 
 | ||||||
|  | adjust_position: | ||||||
|  | 	ADDQ R9, R11 | ||||||
|  | 	ANDQ R10, R11 | ||||||
|  | 	CMPQ R11, R8 | ||||||
|  | 	JG   adjust_position | ||||||
|  | 	INCQ R13 | ||||||
|  | 
 | ||||||
|  | spread_inner_loop_condition: | ||||||
|  | 	CMPQ R13, R14 | ||||||
|  | 	JL   spread_inner_loop | ||||||
|  | 	INCQ R12 | ||||||
|  | 
 | ||||||
|  | spread_main_loop_condition: | ||||||
|  | 	CMPQ  R12, DI | ||||||
|  | 	JL    spread_main_loop | ||||||
|  | 	TESTQ R11, R11 | ||||||
|  | 	JZ    spread_check_ok | ||||||
|  | 	MOVQ  ctx+8(FP), AX | ||||||
|  | 	MOVQ  R11, 24(AX) | ||||||
|  | 	MOVQ  $+1, ret+16(FP) | ||||||
|  | 	RET | ||||||
|  | 
 | ||||||
|  | spread_check_ok: | ||||||
|  | 	// Build Decoding table | ||||||
|  | 	XORQ DI, DI | ||||||
|  | 
 | ||||||
|  | build_table_main_table: | ||||||
|  | 	MOVBQZX 1(SI)(DI*8), CX | ||||||
|  | 	MOVWQZX (BX)(CX*2), R8 | ||||||
|  | 	LEAQ    1(R8), R9 | ||||||
|  | 	MOVW    R9, (BX)(CX*2) | ||||||
|  | 	MOVQ    R8, R9 | ||||||
|  | 	BSRQ    R9, R9 | ||||||
|  | 	MOVQ    DX, CX | ||||||
|  | 	SUBQ    R9, CX | ||||||
|  | 	SHLQ    CL, R8 | ||||||
|  | 	SUBQ    AX, R8 | ||||||
|  | 	MOVB    CL, (SI)(DI*8) | ||||||
|  | 	MOVW    R8, 2(SI)(DI*8) | ||||||
|  | 	CMPQ    R8, AX | ||||||
|  | 	JLE     build_table_check1_ok | ||||||
|  | 	MOVQ    ctx+8(FP), CX | ||||||
|  | 	MOVQ    R8, 24(CX) | ||||||
|  | 	MOVQ    AX, 32(CX) | ||||||
|  | 	MOVQ    $+2, ret+16(FP) | ||||||
|  | 	RET | ||||||
|  | 
 | ||||||
|  | build_table_check1_ok: | ||||||
|  | 	TESTB CL, CL | ||||||
|  | 	JNZ   build_table_check2_ok | ||||||
|  | 	CMPW  R8, DI | ||||||
|  | 	JNE   build_table_check2_ok | ||||||
|  | 	MOVQ  ctx+8(FP), AX | ||||||
|  | 	MOVQ  R8, 24(AX) | ||||||
|  | 	MOVQ  DI, 32(AX) | ||||||
|  | 	MOVQ  $+3, ret+16(FP) | ||||||
|  | 	RET | ||||||
|  | 
 | ||||||
|  | build_table_check2_ok: | ||||||
|  | 	INCQ DI | ||||||
|  | 	CMPQ DI, AX | ||||||
|  | 	JL   build_table_main_table | ||||||
|  | 	MOVQ $+0, ret+16(FP) | ||||||
|  | 	RET | ||||||
| @ -0,0 +1,72 @@ | |||||||
|  | //go:build !amd64 || appengine || !gc || noasm
 | ||||||
|  | // +build !amd64 appengine !gc noasm
 | ||||||
|  | 
 | ||||||
|  | package zstd | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"errors" | ||||||
|  | 	"fmt" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // buildDtable will build the decoding table.
 | ||||||
|  | func (s *fseDecoder) buildDtable() error { | ||||||
|  | 	tableSize := uint32(1 << s.actualTableLog) | ||||||
|  | 	highThreshold := tableSize - 1 | ||||||
|  | 	symbolNext := s.stateTable[:256] | ||||||
|  | 
 | ||||||
|  | 	// Init, lay down lowprob symbols
 | ||||||
|  | 	{ | ||||||
|  | 		for i, v := range s.norm[:s.symbolLen] { | ||||||
|  | 			if v == -1 { | ||||||
|  | 				s.dt[highThreshold].setAddBits(uint8(i)) | ||||||
|  | 				highThreshold-- | ||||||
|  | 				symbolNext[i] = 1 | ||||||
|  | 			} else { | ||||||
|  | 				symbolNext[i] = uint16(v) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Spread symbols
 | ||||||
|  | 	{ | ||||||
|  | 		tableMask := tableSize - 1 | ||||||
|  | 		step := tableStep(tableSize) | ||||||
|  | 		position := uint32(0) | ||||||
|  | 		for ss, v := range s.norm[:s.symbolLen] { | ||||||
|  | 			for i := 0; i < int(v); i++ { | ||||||
|  | 				s.dt[position].setAddBits(uint8(ss)) | ||||||
|  | 				position = (position + step) & tableMask | ||||||
|  | 				for position > highThreshold { | ||||||
|  | 					// lowprob area
 | ||||||
|  | 					position = (position + step) & tableMask | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		if position != 0 { | ||||||
|  | 			// position must reach all cells once, otherwise normalizedCounter is incorrect
 | ||||||
|  | 			return errors.New("corrupted input (position != 0)") | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Build Decoding table
 | ||||||
|  | 	{ | ||||||
|  | 		tableSize := uint16(1 << s.actualTableLog) | ||||||
|  | 		for u, v := range s.dt[:tableSize] { | ||||||
|  | 			symbol := v.addBits() | ||||||
|  | 			nextState := symbolNext[symbol] | ||||||
|  | 			symbolNext[symbol] = nextState + 1 | ||||||
|  | 			nBits := s.actualTableLog - byte(highBits(uint32(nextState))) | ||||||
|  | 			s.dt[u&maxTableMask].setNBits(nBits) | ||||||
|  | 			newState := (nextState << nBits) - tableSize | ||||||
|  | 			if newState > tableSize { | ||||||
|  | 				return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) | ||||||
|  | 			} | ||||||
|  | 			if newState == uint16(u) && nBits == 0 { | ||||||
|  | 				// Seems weird that this is possible with nbits > 0.
 | ||||||
|  | 				return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) | ||||||
|  | 			} | ||||||
|  | 			s.dt[u&maxTableMask].setNewState(newState) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
| @ -1,11 +0,0 @@ | |||||||
| //go:build ignorecrc
 |  | ||||||
| // +build ignorecrc
 |  | ||||||
| 
 |  | ||||||
| // Copyright 2019+ Klaus Post. All rights reserved.
 |  | ||||||
| // License information can be found in the LICENSE file.
 |  | ||||||
| // Based on work by Yann Collet, released under BSD License.
 |  | ||||||
| 
 |  | ||||||
| package zstd |  | ||||||
| 
 |  | ||||||
| // ignoreCRC can be used for fuzz testing to ignore CRC values...
 |  | ||||||
| const ignoreCRC = true |  | ||||||
| @ -1,11 +0,0 @@ | |||||||
| //go:build !ignorecrc
 |  | ||||||
| // +build !ignorecrc
 |  | ||||||
| 
 |  | ||||||
| // Copyright 2019+ Klaus Post. All rights reserved.
 |  | ||||||
| // License information can be found in the LICENSE file.
 |  | ||||||
| // Based on work by Yann Collet, released under BSD License.
 |  | ||||||
| 
 |  | ||||||
| package zstd |  | ||||||
| 
 |  | ||||||
| // ignoreCRC can be used for fuzz testing to ignore CRC values...
 |  | ||||||
| const ignoreCRC = false |  | ||||||
| @ -0,0 +1,362 @@ | |||||||
|  | //go:build amd64 && !appengine && !noasm && gc
 | ||||||
|  | // +build amd64,!appengine,!noasm,gc
 | ||||||
|  | 
 | ||||||
|  | package zstd | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"fmt" | ||||||
|  | 
 | ||||||
|  | 	"github.com/klauspost/compress/internal/cpuinfo" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | type decodeSyncAsmContext struct { | ||||||
|  | 	llTable     []decSymbol | ||||||
|  | 	mlTable     []decSymbol | ||||||
|  | 	ofTable     []decSymbol | ||||||
|  | 	llState     uint64 | ||||||
|  | 	mlState     uint64 | ||||||
|  | 	ofState     uint64 | ||||||
|  | 	iteration   int | ||||||
|  | 	litRemain   int | ||||||
|  | 	out         []byte | ||||||
|  | 	outPosition int | ||||||
|  | 	literals    []byte | ||||||
|  | 	litPosition int | ||||||
|  | 	history     []byte | ||||||
|  | 	windowSize  int | ||||||
|  | 	ll          int // set on error (not for all errors, please refer to _generate/gen.go)
 | ||||||
|  | 	ml          int // set on error (not for all errors, please refer to _generate/gen.go)
 | ||||||
|  | 	mo          int // set on error (not for all errors, please refer to _generate/gen.go)
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
 | ||||||
|  | //
 | ||||||
|  | // Please refer to seqdec_generic.go for the reference implementation.
 | ||||||
|  | //go:noescape
 | ||||||
|  | func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int | ||||||
|  | 
 | ||||||
|  | // sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
 | ||||||
|  | //go:noescape
 | ||||||
|  | func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int | ||||||
|  | 
 | ||||||
|  | // sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
 | ||||||
|  | //go:noescape
 | ||||||
|  | func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int | ||||||
|  | 
 | ||||||
|  | // sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
 | ||||||
|  | //go:noescape
 | ||||||
|  | func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int | ||||||
|  | 
 | ||||||
|  | // decode sequences from the stream with the provided history but without a dictionary.
 | ||||||
|  | func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { | ||||||
|  | 	if len(s.dict) > 0 { | ||||||
|  | 		return false, nil | ||||||
|  | 	} | ||||||
|  | 	if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { | ||||||
|  | 		return false, nil | ||||||
|  | 	} | ||||||
|  | 	useSafe := false | ||||||
|  | 	if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { | ||||||
|  | 		useSafe = true | ||||||
|  | 	} | ||||||
|  | 	if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { | ||||||
|  | 		useSafe = true | ||||||
|  | 	} | ||||||
|  | 	if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { | ||||||
|  | 		useSafe = true | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	br := s.br | ||||||
|  | 
 | ||||||
|  | 	maxBlockSize := maxCompressedBlockSize | ||||||
|  | 	if s.windowSize < maxBlockSize { | ||||||
|  | 		maxBlockSize = s.windowSize | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	ctx := decodeSyncAsmContext{ | ||||||
|  | 		llTable:     s.litLengths.fse.dt[:maxTablesize], | ||||||
|  | 		mlTable:     s.matchLengths.fse.dt[:maxTablesize], | ||||||
|  | 		ofTable:     s.offsets.fse.dt[:maxTablesize], | ||||||
|  | 		llState:     uint64(s.litLengths.state.state), | ||||||
|  | 		mlState:     uint64(s.matchLengths.state.state), | ||||||
|  | 		ofState:     uint64(s.offsets.state.state), | ||||||
|  | 		iteration:   s.nSeqs - 1, | ||||||
|  | 		litRemain:   len(s.literals), | ||||||
|  | 		out:         s.out, | ||||||
|  | 		outPosition: len(s.out), | ||||||
|  | 		literals:    s.literals, | ||||||
|  | 		windowSize:  s.windowSize, | ||||||
|  | 		history:     hist, | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	s.seqSize = 0 | ||||||
|  | 	startSize := len(s.out) | ||||||
|  | 
 | ||||||
|  | 	var errCode int | ||||||
|  | 	if cpuinfo.HasBMI2() { | ||||||
|  | 		if useSafe { | ||||||
|  | 			errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) | ||||||
|  | 		} else { | ||||||
|  | 			errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) | ||||||
|  | 		} | ||||||
|  | 	} else { | ||||||
|  | 		if useSafe { | ||||||
|  | 			errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) | ||||||
|  | 		} else { | ||||||
|  | 			errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	switch errCode { | ||||||
|  | 	case noError: | ||||||
|  | 		break | ||||||
|  | 
 | ||||||
|  | 	case errorMatchLenOfsMismatch: | ||||||
|  | 		return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) | ||||||
|  | 
 | ||||||
|  | 	case errorMatchLenTooBig: | ||||||
|  | 		return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) | ||||||
|  | 
 | ||||||
|  | 	case errorMatchOffTooBig: | ||||||
|  | 		return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", | ||||||
|  | 			ctx.mo, ctx.outPosition+len(hist)-startSize) | ||||||
|  | 
 | ||||||
|  | 	case errorNotEnoughLiterals: | ||||||
|  | 		return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", | ||||||
|  | 			ctx.ll, ctx.litRemain+ctx.ll) | ||||||
|  | 
 | ||||||
|  | 	case errorNotEnoughSpace: | ||||||
|  | 		size := ctx.outPosition + ctx.ll + ctx.ml | ||||||
|  | 		if debugDecoder { | ||||||
|  | 			println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) | ||||||
|  | 		} | ||||||
|  | 		return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize) | ||||||
|  | 
 | ||||||
|  | 	default: | ||||||
|  | 		return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	s.seqSize += ctx.litRemain | ||||||
|  | 	if s.seqSize > maxBlockSize { | ||||||
|  | 		return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) | ||||||
|  | 	} | ||||||
|  | 	err := br.close() | ||||||
|  | 	if err != nil { | ||||||
|  | 		printf("Closing sequences: %v, %+v\n", err, *br) | ||||||
|  | 		return true, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	s.literals = s.literals[ctx.litPosition:] | ||||||
|  | 	t := ctx.outPosition | ||||||
|  | 	s.out = s.out[:t] | ||||||
|  | 
 | ||||||
|  | 	// Add final literals
 | ||||||
|  | 	s.out = append(s.out, s.literals...) | ||||||
|  | 	if debugDecoder { | ||||||
|  | 		t += len(s.literals) | ||||||
|  | 		if t != len(s.out) { | ||||||
|  | 			panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return true, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // --------------------------------------------------------------------------------
 | ||||||
|  | 
 | ||||||
|  | type decodeAsmContext struct { | ||||||
|  | 	llTable   []decSymbol | ||||||
|  | 	mlTable   []decSymbol | ||||||
|  | 	ofTable   []decSymbol | ||||||
|  | 	llState   uint64 | ||||||
|  | 	mlState   uint64 | ||||||
|  | 	ofState   uint64 | ||||||
|  | 	iteration int | ||||||
|  | 	seqs      []seqVals | ||||||
|  | 	litRemain int | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | const noError = 0 | ||||||
|  | 
 | ||||||
|  | // error reported when mo == 0 && ml > 0
 | ||||||
|  | const errorMatchLenOfsMismatch = 1 | ||||||
|  | 
 | ||||||
|  | // error reported when ml > maxMatchLen
 | ||||||
|  | const errorMatchLenTooBig = 2 | ||||||
|  | 
 | ||||||
|  | // error reported when mo > available history or mo > s.windowSize
 | ||||||
|  | const errorMatchOffTooBig = 3 | ||||||
|  | 
 | ||||||
|  | // error reported when the sum of literal lengths exeeceds the literal buffer size
 | ||||||
|  | const errorNotEnoughLiterals = 4 | ||||||
|  | 
 | ||||||
|  | // error reported when capacity of `out` is too small
 | ||||||
|  | const errorNotEnoughSpace = 5 | ||||||
|  | 
 | ||||||
|  | // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
 | ||||||
|  | //
 | ||||||
|  | // Please refer to seqdec_generic.go for the reference implementation.
 | ||||||
|  | //go:noescape
 | ||||||
|  | func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int | ||||||
|  | 
 | ||||||
|  | // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
 | ||||||
|  | //
 | ||||||
|  | // Please refer to seqdec_generic.go for the reference implementation.
 | ||||||
|  | //go:noescape
 | ||||||
|  | func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int | ||||||
|  | 
 | ||||||
|  | // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
 | ||||||
|  | //go:noescape
 | ||||||
|  | func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int | ||||||
|  | 
 | ||||||
|  | // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
 | ||||||
|  | //go:noescape
 | ||||||
|  | func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int | ||||||
|  | 
 | ||||||
|  | // decode sequences from the stream without the provided history.
 | ||||||
|  | func (s *sequenceDecs) decode(seqs []seqVals) error { | ||||||
|  | 	br := s.br | ||||||
|  | 
 | ||||||
|  | 	maxBlockSize := maxCompressedBlockSize | ||||||
|  | 	if s.windowSize < maxBlockSize { | ||||||
|  | 		maxBlockSize = s.windowSize | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	ctx := decodeAsmContext{ | ||||||
|  | 		llTable:   s.litLengths.fse.dt[:maxTablesize], | ||||||
|  | 		mlTable:   s.matchLengths.fse.dt[:maxTablesize], | ||||||
|  | 		ofTable:   s.offsets.fse.dt[:maxTablesize], | ||||||
|  | 		llState:   uint64(s.litLengths.state.state), | ||||||
|  | 		mlState:   uint64(s.matchLengths.state.state), | ||||||
|  | 		ofState:   uint64(s.offsets.state.state), | ||||||
|  | 		seqs:      seqs, | ||||||
|  | 		iteration: len(seqs) - 1, | ||||||
|  | 		litRemain: len(s.literals), | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	s.seqSize = 0 | ||||||
|  | 	lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 | ||||||
|  | 	var errCode int | ||||||
|  | 	if cpuinfo.HasBMI2() { | ||||||
|  | 		if lte56bits { | ||||||
|  | 			errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) | ||||||
|  | 		} else { | ||||||
|  | 			errCode = sequenceDecs_decode_bmi2(s, br, &ctx) | ||||||
|  | 		} | ||||||
|  | 	} else { | ||||||
|  | 		if lte56bits { | ||||||
|  | 			errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) | ||||||
|  | 		} else { | ||||||
|  | 			errCode = sequenceDecs_decode_amd64(s, br, &ctx) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	if errCode != 0 { | ||||||
|  | 		i := len(seqs) - ctx.iteration - 1 | ||||||
|  | 		switch errCode { | ||||||
|  | 		case errorMatchLenOfsMismatch: | ||||||
|  | 			ml := ctx.seqs[i].ml | ||||||
|  | 			return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) | ||||||
|  | 
 | ||||||
|  | 		case errorMatchLenTooBig: | ||||||
|  | 			ml := ctx.seqs[i].ml | ||||||
|  | 			return fmt.Errorf("match len (%d) bigger than max allowed length", ml) | ||||||
|  | 
 | ||||||
|  | 		case errorNotEnoughLiterals: | ||||||
|  | 			ll := ctx.seqs[i].ll | ||||||
|  | 			return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if ctx.litRemain < 0 { | ||||||
|  | 		return fmt.Errorf("literal count is too big: total available %d, total requested %d", | ||||||
|  | 			len(s.literals), len(s.literals)-ctx.litRemain) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	s.seqSize += ctx.litRemain | ||||||
|  | 	if s.seqSize > maxBlockSize { | ||||||
|  | 		return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) | ||||||
|  | 	} | ||||||
|  | 	err := br.close() | ||||||
|  | 	if err != nil { | ||||||
|  | 		printf("Closing sequences: %v, %+v\n", err, *br) | ||||||
|  | 	} | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // --------------------------------------------------------------------------------
 | ||||||
|  | 
 | ||||||
|  | type executeAsmContext struct { | ||||||
|  | 	seqs        []seqVals | ||||||
|  | 	seqIndex    int | ||||||
|  | 	out         []byte | ||||||
|  | 	history     []byte | ||||||
|  | 	literals    []byte | ||||||
|  | 	outPosition int | ||||||
|  | 	litPosition int | ||||||
|  | 	windowSize  int | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm.
 | ||||||
|  | //
 | ||||||
|  | // Returns false if a match offset is too big.
 | ||||||
|  | //
 | ||||||
|  | // Please refer to seqdec_generic.go for the reference implementation.
 | ||||||
|  | //go:noescape
 | ||||||
|  | func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool | ||||||
|  | 
 | ||||||
|  | // Same as above, but with safe memcopies
 | ||||||
|  | //go:noescape
 | ||||||
|  | func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool | ||||||
|  | 
 | ||||||
|  | // executeSimple handles cases when dictionary is not used.
 | ||||||
|  | func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { | ||||||
|  | 	// Ensure we have enough output size...
 | ||||||
|  | 	if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { | ||||||
|  | 		addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc | ||||||
|  | 		s.out = append(s.out, make([]byte, addBytes)...) | ||||||
|  | 		s.out = s.out[:len(s.out)-addBytes] | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if debugDecoder { | ||||||
|  | 		printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	var t = len(s.out) | ||||||
|  | 	out := s.out[:t+s.seqSize] | ||||||
|  | 
 | ||||||
|  | 	ctx := executeAsmContext{ | ||||||
|  | 		seqs:        seqs, | ||||||
|  | 		seqIndex:    0, | ||||||
|  | 		out:         out, | ||||||
|  | 		history:     hist, | ||||||
|  | 		outPosition: t, | ||||||
|  | 		litPosition: 0, | ||||||
|  | 		literals:    s.literals, | ||||||
|  | 		windowSize:  s.windowSize, | ||||||
|  | 	} | ||||||
|  | 	var ok bool | ||||||
|  | 	if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { | ||||||
|  | 		ok = sequenceDecs_executeSimple_safe_amd64(&ctx) | ||||||
|  | 	} else { | ||||||
|  | 		ok = sequenceDecs_executeSimple_amd64(&ctx) | ||||||
|  | 	} | ||||||
|  | 	if !ok { | ||||||
|  | 		return fmt.Errorf("match offset (%d) bigger than current history (%d)", | ||||||
|  | 			seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) | ||||||
|  | 	} | ||||||
|  | 	s.literals = s.literals[ctx.litPosition:] | ||||||
|  | 	t = ctx.outPosition | ||||||
|  | 
 | ||||||
|  | 	// Add final literals
 | ||||||
|  | 	copy(out[t:], s.literals) | ||||||
|  | 	if debugDecoder { | ||||||
|  | 		t += len(s.literals) | ||||||
|  | 		if t != len(out) { | ||||||
|  | 			panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	s.out = out | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
											
												
													File diff suppressed because it is too large
													Load Diff
												
											
										
									
								| @ -0,0 +1,237 @@ | |||||||
|  | //go:build !amd64 || appengine || !gc || noasm
 | ||||||
|  | // +build !amd64 appengine !gc noasm
 | ||||||
|  | 
 | ||||||
|  | package zstd | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"fmt" | ||||||
|  | 	"io" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // decode sequences from the stream with the provided history but without dictionary.
 | ||||||
|  | func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { | ||||||
|  | 	return false, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // decode sequences from the stream without the provided history.
 | ||||||
|  | func (s *sequenceDecs) decode(seqs []seqVals) error { | ||||||
|  | 	br := s.br | ||||||
|  | 
 | ||||||
|  | 	// Grab full sizes tables, to avoid bounds checks.
 | ||||||
|  | 	llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] | ||||||
|  | 	llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state | ||||||
|  | 	s.seqSize = 0 | ||||||
|  | 	litRemain := len(s.literals) | ||||||
|  | 
 | ||||||
|  | 	maxBlockSize := maxCompressedBlockSize | ||||||
|  | 	if s.windowSize < maxBlockSize { | ||||||
|  | 		maxBlockSize = s.windowSize | ||||||
|  | 	} | ||||||
|  | 	for i := range seqs { | ||||||
|  | 		var ll, mo, ml int | ||||||
|  | 		if br.off > 4+((maxOffsetBits+16+16)>>3) { | ||||||
|  | 			// inlined function:
 | ||||||
|  | 			// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
 | ||||||
|  | 
 | ||||||
|  | 			// Final will not read from stream.
 | ||||||
|  | 			var llB, mlB, moB uint8 | ||||||
|  | 			ll, llB = llState.final() | ||||||
|  | 			ml, mlB = mlState.final() | ||||||
|  | 			mo, moB = ofState.final() | ||||||
|  | 
 | ||||||
|  | 			// extra bits are stored in reverse order.
 | ||||||
|  | 			br.fillFast() | ||||||
|  | 			mo += br.getBits(moB) | ||||||
|  | 			if s.maxBits > 32 { | ||||||
|  | 				br.fillFast() | ||||||
|  | 			} | ||||||
|  | 			ml += br.getBits(mlB) | ||||||
|  | 			ll += br.getBits(llB) | ||||||
|  | 
 | ||||||
|  | 			if moB > 1 { | ||||||
|  | 				s.prevOffset[2] = s.prevOffset[1] | ||||||
|  | 				s.prevOffset[1] = s.prevOffset[0] | ||||||
|  | 				s.prevOffset[0] = mo | ||||||
|  | 			} else { | ||||||
|  | 				// mo = s.adjustOffset(mo, ll, moB)
 | ||||||
|  | 				// Inlined for rather big speedup
 | ||||||
|  | 				if ll == 0 { | ||||||
|  | 					// There is an exception though, when current sequence's literals_length = 0.
 | ||||||
|  | 					// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
 | ||||||
|  | 					// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
 | ||||||
|  | 					mo++ | ||||||
|  | 				} | ||||||
|  | 
 | ||||||
|  | 				if mo == 0 { | ||||||
|  | 					mo = s.prevOffset[0] | ||||||
|  | 				} else { | ||||||
|  | 					var temp int | ||||||
|  | 					if mo == 3 { | ||||||
|  | 						temp = s.prevOffset[0] - 1 | ||||||
|  | 					} else { | ||||||
|  | 						temp = s.prevOffset[mo] | ||||||
|  | 					} | ||||||
|  | 
 | ||||||
|  | 					if temp == 0 { | ||||||
|  | 						// 0 is not valid; input is corrupted; force offset to 1
 | ||||||
|  | 						println("WARNING: temp was 0") | ||||||
|  | 						temp = 1 | ||||||
|  | 					} | ||||||
|  | 
 | ||||||
|  | 					if mo != 1 { | ||||||
|  | 						s.prevOffset[2] = s.prevOffset[1] | ||||||
|  | 					} | ||||||
|  | 					s.prevOffset[1] = s.prevOffset[0] | ||||||
|  | 					s.prevOffset[0] = temp | ||||||
|  | 					mo = temp | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			br.fillFast() | ||||||
|  | 		} else { | ||||||
|  | 			if br.overread() { | ||||||
|  | 				if debugDecoder { | ||||||
|  | 					printf("reading sequence %d, exceeded available data\n", i) | ||||||
|  | 				} | ||||||
|  | 				return io.ErrUnexpectedEOF | ||||||
|  | 			} | ||||||
|  | 			ll, mo, ml = s.next(br, llState, mlState, ofState) | ||||||
|  | 			br.fill() | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if debugSequences { | ||||||
|  | 			println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) | ||||||
|  | 		} | ||||||
|  | 		// Evaluate.
 | ||||||
|  | 		// We might be doing this async, so do it early.
 | ||||||
|  | 		if mo == 0 && ml > 0 { | ||||||
|  | 			return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) | ||||||
|  | 		} | ||||||
|  | 		if ml > maxMatchLen { | ||||||
|  | 			return fmt.Errorf("match len (%d) bigger than max allowed length", ml) | ||||||
|  | 		} | ||||||
|  | 		s.seqSize += ll + ml | ||||||
|  | 		if s.seqSize > maxBlockSize { | ||||||
|  | 			return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) | ||||||
|  | 		} | ||||||
|  | 		litRemain -= ll | ||||||
|  | 		if litRemain < 0 { | ||||||
|  | 			return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) | ||||||
|  | 		} | ||||||
|  | 		seqs[i] = seqVals{ | ||||||
|  | 			ll: ll, | ||||||
|  | 			ml: ml, | ||||||
|  | 			mo: mo, | ||||||
|  | 		} | ||||||
|  | 		if i == len(seqs)-1 { | ||||||
|  | 			// This is the last sequence, so we shouldn't update state.
 | ||||||
|  | 			break | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Manually inlined, ~ 5-20% faster
 | ||||||
|  | 		// Update all 3 states at once. Approx 20% faster.
 | ||||||
|  | 		nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() | ||||||
|  | 		if nBits == 0 { | ||||||
|  | 			llState = llTable[llState.newState()&maxTableMask] | ||||||
|  | 			mlState = mlTable[mlState.newState()&maxTableMask] | ||||||
|  | 			ofState = ofTable[ofState.newState()&maxTableMask] | ||||||
|  | 		} else { | ||||||
|  | 			bits := br.get32BitsFast(nBits) | ||||||
|  | 			lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) | ||||||
|  | 			llState = llTable[(llState.newState()+lowBits)&maxTableMask] | ||||||
|  | 
 | ||||||
|  | 			lowBits = uint16(bits >> (ofState.nbBits() & 31)) | ||||||
|  | 			lowBits &= bitMask[mlState.nbBits()&15] | ||||||
|  | 			mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] | ||||||
|  | 
 | ||||||
|  | 			lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] | ||||||
|  | 			ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	s.seqSize += litRemain | ||||||
|  | 	if s.seqSize > maxBlockSize { | ||||||
|  | 		return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) | ||||||
|  | 	} | ||||||
|  | 	err := br.close() | ||||||
|  | 	if err != nil { | ||||||
|  | 		printf("Closing sequences: %v, %+v\n", err, *br) | ||||||
|  | 	} | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // executeSimple handles cases when a dictionary is not used.
 | ||||||
|  | func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { | ||||||
|  | 	// Ensure we have enough output size...
 | ||||||
|  | 	if len(s.out)+s.seqSize > cap(s.out) { | ||||||
|  | 		addBytes := s.seqSize + len(s.out) | ||||||
|  | 		s.out = append(s.out, make([]byte, addBytes)...) | ||||||
|  | 		s.out = s.out[:len(s.out)-addBytes] | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if debugDecoder { | ||||||
|  | 		printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	var t = len(s.out) | ||||||
|  | 	out := s.out[:t+s.seqSize] | ||||||
|  | 
 | ||||||
|  | 	for _, seq := range seqs { | ||||||
|  | 		// Add literals
 | ||||||
|  | 		copy(out[t:], s.literals[:seq.ll]) | ||||||
|  | 		t += seq.ll | ||||||
|  | 		s.literals = s.literals[seq.ll:] | ||||||
|  | 
 | ||||||
|  | 		// Malformed input
 | ||||||
|  | 		if seq.mo > t+len(hist) || seq.mo > s.windowSize { | ||||||
|  | 			return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Copy from history.
 | ||||||
|  | 		if v := seq.mo - t; v > 0 { | ||||||
|  | 			// v is the start position in history from end.
 | ||||||
|  | 			start := len(hist) - v | ||||||
|  | 			if seq.ml > v { | ||||||
|  | 				// Some goes into the current block.
 | ||||||
|  | 				// Copy remainder of history
 | ||||||
|  | 				copy(out[t:], hist[start:]) | ||||||
|  | 				t += v | ||||||
|  | 				seq.ml -= v | ||||||
|  | 			} else { | ||||||
|  | 				copy(out[t:], hist[start:start+seq.ml]) | ||||||
|  | 				t += seq.ml | ||||||
|  | 				continue | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// We must be in the current buffer now
 | ||||||
|  | 		if seq.ml > 0 { | ||||||
|  | 			start := t - seq.mo | ||||||
|  | 			if seq.ml <= t-start { | ||||||
|  | 				// No overlap
 | ||||||
|  | 				copy(out[t:], out[start:start+seq.ml]) | ||||||
|  | 				t += seq.ml | ||||||
|  | 			} else { | ||||||
|  | 				// Overlapping copy
 | ||||||
|  | 				// Extend destination slice and copy one byte at the time.
 | ||||||
|  | 				src := out[start : start+seq.ml] | ||||||
|  | 				dst := out[t:] | ||||||
|  | 				dst = dst[:len(src)] | ||||||
|  | 				t += len(src) | ||||||
|  | 				// Destination is the space we just added.
 | ||||||
|  | 				for i := range src { | ||||||
|  | 					dst[i] = src[i] | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// Add final literals
 | ||||||
|  | 	copy(out[t:], s.literals) | ||||||
|  | 	if debugDecoder { | ||||||
|  | 		t += len(s.literals) | ||||||
|  | 		if t != len(out) { | ||||||
|  | 			panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	s.out = out | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
| @ -0,0 +1,81 @@ | |||||||
|  | package subrequests | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"encoding/json" | ||||||
|  | 	"fmt" | ||||||
|  | 	"io" | ||||||
|  | 	"strings" | ||||||
|  | 	"text/tabwriter" | ||||||
|  | 
 | ||||||
|  | 	"github.com/moby/buildkit/frontend/gateway/client" | ||||||
|  | 	gwpb "github.com/moby/buildkit/frontend/gateway/pb" | ||||||
|  | 	"github.com/moby/buildkit/solver/errdefs" | ||||||
|  | 	"github.com/pkg/errors" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | const RequestSubrequestsDescribe = "frontend.subrequests.describe" | ||||||
|  | 
 | ||||||
|  | var SubrequestsDescribeDefinition = Request{ | ||||||
|  | 	Name:        RequestSubrequestsDescribe, | ||||||
|  | 	Version:     "1.0.0", | ||||||
|  | 	Type:        TypeRPC, | ||||||
|  | 	Description: "List available subrequest types", | ||||||
|  | 	Metadata: []Named{ | ||||||
|  | 		{Name: "result.json"}, | ||||||
|  | 		{Name: "result.txt"}, | ||||||
|  | 	}, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func Describe(ctx context.Context, c client.Client) ([]Request, error) { | ||||||
|  | 	gwcaps := c.BuildOpts().Caps | ||||||
|  | 
 | ||||||
|  | 	if err := (&gwcaps).Supports(gwpb.CapFrontendCaps); err != nil { | ||||||
|  | 		return nil, errdefs.NewUnsupportedSubrequestError(RequestSubrequestsDescribe) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	res, err := c.Solve(ctx, client.SolveRequest{ | ||||||
|  | 		FrontendOpt: map[string]string{ | ||||||
|  | 			"requestid":     RequestSubrequestsDescribe, | ||||||
|  | 			"frontend.caps": "moby.buildkit.frontend.subrequests", | ||||||
|  | 		}, | ||||||
|  | 		Frontend: "dockerfile.v0", | ||||||
|  | 	}) | ||||||
|  | 	if err != nil { | ||||||
|  | 		var reqErr *errdefs.UnsupportedSubrequestError | ||||||
|  | 		if errors.As(err, &reqErr) { | ||||||
|  | 			return nil, err | ||||||
|  | 		} | ||||||
|  | 		var capErr *errdefs.UnsupportedFrontendCapError | ||||||
|  | 		if errors.As(err, &capErr) { | ||||||
|  | 			return nil, errdefs.NewUnsupportedSubrequestError(RequestSubrequestsDescribe) | ||||||
|  | 		} | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	dt, ok := res.Metadata["result.json"] | ||||||
|  | 	if !ok { | ||||||
|  | 		return nil, errors.Errorf("no result.json metadata in response") | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	var reqs []Request | ||||||
|  | 	if err := json.Unmarshal(dt, &reqs); err != nil { | ||||||
|  | 		return nil, errors.Wrap(err, "failed to parse describe result") | ||||||
|  | 	} | ||||||
|  | 	return reqs, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func PrintDescribe(dt []byte, w io.Writer) error { | ||||||
|  | 	var d []Request | ||||||
|  | 	if err := json.Unmarshal(dt, &d); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0) | ||||||
|  | 	fmt.Fprintf(tw, "NAME\tVERSION\tDESCRIPTION\n") | ||||||
|  | 
 | ||||||
|  | 	for _, r := range d { | ||||||
|  | 		fmt.Fprintf(tw, "%s\t%s\t%s\n", strings.TrimPrefix(r.Name, "frontend."), r.Version, r.Description) | ||||||
|  | 	} | ||||||
|  | 	return tw.Flush() | ||||||
|  | } | ||||||
| @ -0,0 +1,146 @@ | |||||||
|  | package outline | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"encoding/json" | ||||||
|  | 	"fmt" | ||||||
|  | 	"io" | ||||||
|  | 	"text/tabwriter" | ||||||
|  | 
 | ||||||
|  | 	"github.com/moby/buildkit/frontend/gateway/client" | ||||||
|  | 	"github.com/moby/buildkit/frontend/subrequests" | ||||||
|  | 	"github.com/moby/buildkit/solver/pb" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | const RequestSubrequestsOutline = "frontend.outline" | ||||||
|  | 
 | ||||||
|  | var SubrequestsOutlineDefinition = subrequests.Request{ | ||||||
|  | 	Name:        RequestSubrequestsOutline, | ||||||
|  | 	Version:     "1.0.0", | ||||||
|  | 	Type:        subrequests.TypeRPC, | ||||||
|  | 	Description: "List all parameters current build target supports", | ||||||
|  | 	Opts: []subrequests.Named{ | ||||||
|  | 		{ | ||||||
|  | 			Name:        "target", | ||||||
|  | 			Description: "Target build stage", | ||||||
|  | 		}, | ||||||
|  | 	}, | ||||||
|  | 	Metadata: []subrequests.Named{ | ||||||
|  | 		{Name: "result.json"}, | ||||||
|  | 		{Name: "result.txt"}, | ||||||
|  | 	}, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type Outline struct { | ||||||
|  | 	Name        string       `json:"name,omitempty"` | ||||||
|  | 	Description string       `json:"description,omitempty"` | ||||||
|  | 	Args        []Arg        `json:"args,omitempty"` | ||||||
|  | 	Secrets     []Secret     `json:"secrets,omitempty"` | ||||||
|  | 	SSH         []SSH        `json:"ssh,omitempty"` | ||||||
|  | 	Cache       []CacheMount `json:"cache,omitempty"` | ||||||
|  | 	Sources     [][]byte     `json:"sources,omitempty"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (o Outline) ToResult() (*client.Result, error) { | ||||||
|  | 	res := client.NewResult() | ||||||
|  | 	dt, err := json.MarshalIndent(o, "", "  ") | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	res.AddMeta("result.json", dt) | ||||||
|  | 
 | ||||||
|  | 	b := bytes.NewBuffer(nil) | ||||||
|  | 	if err := PrintOutline(dt, b); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	res.AddMeta("result.txt", b.Bytes()) | ||||||
|  | 
 | ||||||
|  | 	res.AddMeta("version", []byte(SubrequestsOutlineDefinition.Version)) | ||||||
|  | 	return res, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type Arg struct { | ||||||
|  | 	Name        string       `json:"name"` | ||||||
|  | 	Description string       `json:"description,omitempty"` | ||||||
|  | 	Value       string       `json:"value,omitempty"` | ||||||
|  | 	Location    *pb.Location `json:"location,omitempty"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type Secret struct { | ||||||
|  | 	Name     string       `json:"name"` | ||||||
|  | 	Required bool         `json:"required,omitempty"` | ||||||
|  | 	Location *pb.Location `json:"location,omitempty"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type SSH struct { | ||||||
|  | 	Name     string       `json:"name"` | ||||||
|  | 	Required bool         `json:"required,omitempty"` | ||||||
|  | 	Location *pb.Location `json:"location,omitempty"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type CacheMount struct { | ||||||
|  | 	ID       string       `json:"ID"` | ||||||
|  | 	Location *pb.Location `json:"location,omitempty"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func PrintOutline(dt []byte, w io.Writer) error { | ||||||
|  | 	var o Outline | ||||||
|  | 
 | ||||||
|  | 	if err := json.Unmarshal(dt, &o); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if o.Name != "" || o.Description != "" { | ||||||
|  | 		tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0) | ||||||
|  | 		name := o.Name | ||||||
|  | 		if o.Name == "" { | ||||||
|  | 			name = "(default)" | ||||||
|  | 		} | ||||||
|  | 		fmt.Fprintf(tw, "TARGET:\t%s\n", name) | ||||||
|  | 		if o.Description != "" { | ||||||
|  | 			fmt.Fprintf(tw, "DESCRIPTION:\t%s\n", o.Description) | ||||||
|  | 		} | ||||||
|  | 		tw.Flush() | ||||||
|  | 		fmt.Println() | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if len(o.Args) > 0 { | ||||||
|  | 		tw := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0) | ||||||
|  | 		fmt.Fprintf(tw, "BUILD ARG\tVALUE\tDESCRIPTION\n") | ||||||
|  | 		for _, a := range o.Args { | ||||||
|  | 			fmt.Fprintf(tw, "%s\t%s\t%s\n", a.Name, a.Value, a.Description) | ||||||
|  | 		} | ||||||
|  | 		tw.Flush() | ||||||
|  | 		fmt.Println() | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if len(o.Secrets) > 0 { | ||||||
|  | 		tw := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0) | ||||||
|  | 		fmt.Fprintf(tw, "SECRET\tREQUIRED\n") | ||||||
|  | 		for _, s := range o.Secrets { | ||||||
|  | 			b := "" | ||||||
|  | 			if s.Required { | ||||||
|  | 				b = "true" | ||||||
|  | 			} | ||||||
|  | 			fmt.Fprintf(tw, "%s\t%s\n", s.Name, b) | ||||||
|  | 		} | ||||||
|  | 		tw.Flush() | ||||||
|  | 		fmt.Println() | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if len(o.SSH) > 0 { | ||||||
|  | 		tw := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0) | ||||||
|  | 		fmt.Fprintf(tw, "SSH\tREQUIRED\n") | ||||||
|  | 		for _, s := range o.SSH { | ||||||
|  | 			b := "" | ||||||
|  | 			if s.Required { | ||||||
|  | 				b = "true" | ||||||
|  | 			} | ||||||
|  | 			fmt.Fprintf(tw, "%s\t%s\n", s.Name, b) | ||||||
|  | 		} | ||||||
|  | 		tw.Flush() | ||||||
|  | 		fmt.Println() | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
| @ -0,0 +1,84 @@ | |||||||
|  | package targets | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"encoding/json" | ||||||
|  | 	"fmt" | ||||||
|  | 	"io" | ||||||
|  | 	"text/tabwriter" | ||||||
|  | 
 | ||||||
|  | 	"github.com/moby/buildkit/frontend/gateway/client" | ||||||
|  | 	"github.com/moby/buildkit/frontend/subrequests" | ||||||
|  | 	"github.com/moby/buildkit/solver/pb" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | const RequestTargets = "frontend.targets" | ||||||
|  | 
 | ||||||
|  | var SubrequestsTargetsDefinition = subrequests.Request{ | ||||||
|  | 	Name:        RequestTargets, | ||||||
|  | 	Version:     "1.0.0", | ||||||
|  | 	Type:        subrequests.TypeRPC, | ||||||
|  | 	Description: "List all targets current build supports", | ||||||
|  | 	Opts:        []subrequests.Named{}, | ||||||
|  | 	Metadata: []subrequests.Named{ | ||||||
|  | 		{Name: "result.json"}, | ||||||
|  | 		{Name: "result.txt"}, | ||||||
|  | 	}, | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type List struct { | ||||||
|  | 	Targets []Target `json:"targets"` | ||||||
|  | 	Sources [][]byte `json:"sources"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (l List) ToResult() (*client.Result, error) { | ||||||
|  | 	res := client.NewResult() | ||||||
|  | 	dt, err := json.MarshalIndent(l, "", "  ") | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	res.AddMeta("result.json", dt) | ||||||
|  | 
 | ||||||
|  | 	b := bytes.NewBuffer(nil) | ||||||
|  | 	if err := PrintTargets(dt, b); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	res.AddMeta("result.txt", b.Bytes()) | ||||||
|  | 
 | ||||||
|  | 	res.AddMeta("version", []byte(SubrequestsTargetsDefinition.Version)) | ||||||
|  | 	return res, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type Target struct { | ||||||
|  | 	Name        string       `json:"name,omitempty"` | ||||||
|  | 	Default     bool         `json:"default,omitempty"` | ||||||
|  | 	Description string       `json:"description,omitempty"` | ||||||
|  | 	Base        string       `json:"base,omitempty"` | ||||||
|  | 	Platform    string       `json:"platform,omitempty"` | ||||||
|  | 	Location    *pb.Location `json:"location,omitempty"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func PrintTargets(dt []byte, w io.Writer) error { | ||||||
|  | 	var l List | ||||||
|  | 
 | ||||||
|  | 	if err := json.Unmarshal(dt, &l); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0) | ||||||
|  | 	fmt.Fprintf(tw, "TARGET\tDESCRIPTION\n") | ||||||
|  | 
 | ||||||
|  | 	for _, t := range l.Targets { | ||||||
|  | 		name := t.Name | ||||||
|  | 		if name == "" && t.Default { | ||||||
|  | 			name = "(default)" | ||||||
|  | 		} else { | ||||||
|  | 			if t.Default { | ||||||
|  | 				name = fmt.Sprintf("%s (default)", name) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		fmt.Fprintf(tw, "%s\t%s\n", name, t.Description) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return tw.Flush() | ||||||
|  | } | ||||||
| @ -0,0 +1,21 @@ | |||||||
|  | package subrequests | ||||||
|  | 
 | ||||||
|  | type Request struct { | ||||||
|  | 	Name        string      `json:"name"` | ||||||
|  | 	Version     string      `json:"version"` | ||||||
|  | 	Type        RequestType `json:"type"` | ||||||
|  | 	Description string      `json:"description"` | ||||||
|  | 	Opts        []Named     `json:"opts"` | ||||||
|  | 	Inputs      []Named     `json:"inputs"` | ||||||
|  | 	Metadata    []Named     `json:"metadata"` | ||||||
|  | 	Refs        []Named     `json:"refs"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type Named struct { | ||||||
|  | 	Name        string `json:"name"` | ||||||
|  | 	Description string `json:"description"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type RequestType string | ||||||
|  | 
 | ||||||
|  | const TypeRPC RequestType = "rpc" | ||||||
| @ -0,0 +1,95 @@ | |||||||
|  | package gitutil | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"regexp" | ||||||
|  | 	"strings" | ||||||
|  | 
 | ||||||
|  | 	"github.com/containerd/containerd/errdefs" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // GitRef represents a git ref.
 | ||||||
|  | //
 | ||||||
|  | // Examples:
 | ||||||
|  | // - "https://github.com/foo/bar.git#baz/qux:quux/quuz" is parsed into:
 | ||||||
|  | //   {Remote: "https://github.com/foo/bar.git", ShortName: "bar", Commit:"baz/qux", SubDir: "quux/quuz"}.
 | ||||||
|  | type GitRef struct { | ||||||
|  | 	// Remote is the remote repository path.
 | ||||||
|  | 	Remote string | ||||||
|  | 
 | ||||||
|  | 	// ShortName is the directory name of the repo.
 | ||||||
|  | 	// e.g., "bar" for "https://github.com/foo/bar.git"
 | ||||||
|  | 	ShortName string | ||||||
|  | 
 | ||||||
|  | 	// Commit is a commit hash, a tag, or branch name.
 | ||||||
|  | 	// Commit is optional.
 | ||||||
|  | 	Commit string | ||||||
|  | 
 | ||||||
|  | 	// SubDir is a directory path inside the repo.
 | ||||||
|  | 	// SubDir is optional.
 | ||||||
|  | 	SubDir string | ||||||
|  | 
 | ||||||
|  | 	// IndistinguishableFromLocal is true for a ref that is indistinguishable from a local file path,
 | ||||||
|  | 	// e.g., "github.com/foo/bar".
 | ||||||
|  | 	//
 | ||||||
|  | 	// Deprecated.
 | ||||||
|  | 	// Instead, use a distinguishable form such as "https://github.com/foo/bar.git".
 | ||||||
|  | 	//
 | ||||||
|  | 	// The dockerfile frontend still accepts this form only for build contexts.
 | ||||||
|  | 	IndistinguishableFromLocal bool | ||||||
|  | 
 | ||||||
|  | 	// UnencryptedTCP is true for a ref that needs an unencrypted TCP connection,
 | ||||||
|  | 	// e.g., "git://..." and "http://..." .
 | ||||||
|  | 	//
 | ||||||
|  | 	// Discouraged, although not deprecated.
 | ||||||
|  | 	// Instead, consider using an encrypted TCP connection such as "git@github.com/foo/bar.git" or "https://github.com/foo/bar.git".
 | ||||||
|  | 	UnencryptedTCP bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
 | ||||||
|  | 
 | ||||||
|  | // ParseGitRef parses a git ref.
 | ||||||
|  | func ParseGitRef(ref string) (*GitRef, error) { | ||||||
|  | 	res := &GitRef{} | ||||||
|  | 
 | ||||||
|  | 	if strings.HasPrefix(ref, "github.com/") { | ||||||
|  | 		res.IndistinguishableFromLocal = true // Deprecated
 | ||||||
|  | 	} else { | ||||||
|  | 		_, proto := ParseProtocol(ref) | ||||||
|  | 		switch proto { | ||||||
|  | 		case UnknownProtocol: | ||||||
|  | 			return nil, errdefs.ErrInvalidArgument | ||||||
|  | 		} | ||||||
|  | 		switch proto { | ||||||
|  | 		case HTTPProtocol, GitProtocol: | ||||||
|  | 			res.UnencryptedTCP = true // Discouraged, but not deprecated
 | ||||||
|  | 		} | ||||||
|  | 		switch proto { | ||||||
|  | 		// An HTTP(S) URL is considered to be a valid git ref only when it has the ".git[...]" suffix.
 | ||||||
|  | 		case HTTPProtocol, HTTPSProtocol: | ||||||
|  | 			var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`) | ||||||
|  | 			if !gitURLPathWithFragmentSuffix.MatchString(ref) { | ||||||
|  | 				return nil, errdefs.ErrInvalidArgument | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	refSplitBySharp := strings.SplitN(ref, "#", 2) | ||||||
|  | 	res.Remote = refSplitBySharp[0] | ||||||
|  | 	if len(res.Remote) == 0 { | ||||||
|  | 		return res, errdefs.ErrInvalidArgument | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if len(refSplitBySharp) > 1 { | ||||||
|  | 		refSplitBySharpSplitByColon := strings.SplitN(refSplitBySharp[1], ":", 2) | ||||||
|  | 		res.Commit = refSplitBySharpSplitByColon[0] | ||||||
|  | 		if len(res.Commit) == 0 { | ||||||
|  | 			return res, errdefs.ErrInvalidArgument | ||||||
|  | 		} | ||||||
|  | 		if len(refSplitBySharpSplitByColon) > 1 { | ||||||
|  | 			res.SubDir = refSplitBySharpSplitByColon[1] | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	repoSplitBySlash := strings.Split(res.Remote, "/") | ||||||
|  | 	res.ShortName = strings.TrimSuffix(repoSplitBySlash[len(repoSplitBySlash)-1], ".git") | ||||||
|  | 	return res, nil | ||||||
|  | } | ||||||
								
									
										
											
										
									
									
										
											14
										
									
									vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
									
										generated
									
									
										vendored
									
								
								
							
							
										
											14
										
									
									vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
									
										generated
									
									
										vendored
									
								| @ -0,0 +1,36 @@ | |||||||
|  | /* | ||||||
|  |  * | ||||||
|  |  * Copyright 2020 gRPC authors. | ||||||
|  |  * | ||||||
|  |  * Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  |  * you may not use this file except in compliance with the License. | ||||||
|  |  * You may obtain a copy of the License at | ||||||
|  |  * | ||||||
|  |  *     http://www.apache.org/licenses/LICENSE-2.0
 | ||||||
|  |  * | ||||||
|  |  * Unless required by applicable law or agreed to in writing, software | ||||||
|  |  * distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  |  * See the License for the specific language governing permissions and | ||||||
|  |  * limitations under the License. | ||||||
|  |  * | ||||||
|  |  */ | ||||||
|  | 
 | ||||||
|  | // Package channelz exports internals of the channelz implementation as required
 | ||||||
|  | // by other gRPC packages.
 | ||||||
|  | //
 | ||||||
|  | // The implementation of the channelz spec as defined in
 | ||||||
|  | // https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by
 | ||||||
|  | // the `internal/channelz` package.
 | ||||||
|  | //
 | ||||||
|  | // Experimental
 | ||||||
|  | //
 | ||||||
|  | // Notice: All APIs in this package are experimental and may be removed in a
 | ||||||
|  | // later release.
 | ||||||
|  | package channelz | ||||||
|  | 
 | ||||||
|  | import "google.golang.org/grpc/internal/channelz" | ||||||
|  | 
 | ||||||
|  | // Identifier is an opaque identifier which uniquely identifies an entity in the
 | ||||||
|  | // channelz database.
 | ||||||
|  | type Identifier = channelz.Identifier | ||||||
								
									
										
											
										
									
									
										
											382
										
									
									vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
									
										generated
									
									
										vendored
									
								
								
							
							
										
											382
										
									
									vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
									
										generated
									
									
										vendored
									
								| @ -0,0 +1,382 @@ | |||||||
|  | /* | ||||||
|  |  * | ||||||
|  |  * Copyright 2022 gRPC authors. | ||||||
|  |  * | ||||||
|  |  * Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  |  * you may not use this file except in compliance with the License. | ||||||
|  |  * You may obtain a copy of the License at | ||||||
|  |  * | ||||||
|  |  *     http://www.apache.org/licenses/LICENSE-2.0
 | ||||||
|  |  * | ||||||
|  |  * Unless required by applicable law or agreed to in writing, software | ||||||
|  |  * distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  |  * See the License for the specific language governing permissions and | ||||||
|  |  * limitations under the License. | ||||||
|  |  * | ||||||
|  |  */ | ||||||
|  | 
 | ||||||
|  | // Package gracefulswitch implements a graceful switch load balancer.
 | ||||||
|  | package gracefulswitch | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"errors" | ||||||
|  | 	"fmt" | ||||||
|  | 	"sync" | ||||||
|  | 
 | ||||||
|  | 	"google.golang.org/grpc/balancer" | ||||||
|  | 	"google.golang.org/grpc/balancer/base" | ||||||
|  | 	"google.golang.org/grpc/connectivity" | ||||||
|  | 	"google.golang.org/grpc/resolver" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") | ||||||
|  | var _ balancer.Balancer = (*Balancer)(nil) | ||||||
|  | 
 | ||||||
|  | // NewBalancer returns a graceful switch Balancer.
 | ||||||
|  | func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { | ||||||
|  | 	return &Balancer{ | ||||||
|  | 		cc:    cc, | ||||||
|  | 		bOpts: opts, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Balancer is a utility to gracefully switch from one balancer to
 | ||||||
|  | // a new balancer. It implements the balancer.Balancer interface.
 | ||||||
|  | type Balancer struct { | ||||||
|  | 	bOpts balancer.BuildOptions | ||||||
|  | 	cc    balancer.ClientConn | ||||||
|  | 
 | ||||||
|  | 	// mu protects the following fields and all fields within balancerCurrent
 | ||||||
|  | 	// and balancerPending. mu does not need to be held when calling into the
 | ||||||
|  | 	// child balancers, as all calls into these children happen only as a direct
 | ||||||
|  | 	// result of a call into the gracefulSwitchBalancer, which are also
 | ||||||
|  | 	// guaranteed to be synchronous. There is one exception: an UpdateState call
 | ||||||
|  | 	// from a child balancer when current and pending are populated can lead to
 | ||||||
|  | 	// calling Close() on the current. To prevent that racing with an
 | ||||||
|  | 	// UpdateSubConnState from the channel, we hold currentMu during Close and
 | ||||||
|  | 	// UpdateSubConnState calls.
 | ||||||
|  | 	mu              sync.Mutex | ||||||
|  | 	balancerCurrent *balancerWrapper | ||||||
|  | 	balancerPending *balancerWrapper | ||||||
|  | 	closed          bool // set to true when this balancer is closed
 | ||||||
|  | 
 | ||||||
|  | 	// currentMu must be locked before mu. This mutex guards against this
 | ||||||
|  | 	// sequence of events: UpdateSubConnState() called, finds the
 | ||||||
|  | 	// balancerCurrent, gives up lock, updateState comes in, causes Close() on
 | ||||||
|  | 	// balancerCurrent before the UpdateSubConnState is called on the
 | ||||||
|  | 	// balancerCurrent.
 | ||||||
|  | 	currentMu sync.Mutex | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // swap swaps out the current lb with the pending lb and updates the ClientConn.
 | ||||||
|  | // The caller must hold gsb.mu.
 | ||||||
|  | func (gsb *Balancer) swap() { | ||||||
|  | 	gsb.cc.UpdateState(gsb.balancerPending.lastState) | ||||||
|  | 	cur := gsb.balancerCurrent | ||||||
|  | 	gsb.balancerCurrent = gsb.balancerPending | ||||||
|  | 	gsb.balancerPending = nil | ||||||
|  | 	go func() { | ||||||
|  | 		gsb.currentMu.Lock() | ||||||
|  | 		defer gsb.currentMu.Unlock() | ||||||
|  | 		cur.Close() | ||||||
|  | 	}() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Helper function that checks if the balancer passed in is current or pending.
 | ||||||
|  | // The caller must hold gsb.mu.
 | ||||||
|  | func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { | ||||||
|  | 	return bw == gsb.balancerCurrent || bw == gsb.balancerPending | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SwitchTo initializes the graceful switch process, which completes based on
 | ||||||
|  | // connectivity state changes on the current/pending balancer. Thus, the switch
 | ||||||
|  | // process is not complete when this method returns. This method must be called
 | ||||||
|  | // synchronously alongside the rest of the balancer.Balancer methods this
 | ||||||
|  | // Graceful Switch Balancer implements.
 | ||||||
|  | func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { | ||||||
|  | 	gsb.mu.Lock() | ||||||
|  | 	if gsb.closed { | ||||||
|  | 		gsb.mu.Unlock() | ||||||
|  | 		return errBalancerClosed | ||||||
|  | 	} | ||||||
|  | 	bw := &balancerWrapper{ | ||||||
|  | 		gsb: gsb, | ||||||
|  | 		lastState: balancer.State{ | ||||||
|  | 			ConnectivityState: connectivity.Connecting, | ||||||
|  | 			Picker:            base.NewErrPicker(balancer.ErrNoSubConnAvailable), | ||||||
|  | 		}, | ||||||
|  | 		subconns: make(map[balancer.SubConn]bool), | ||||||
|  | 	} | ||||||
|  | 	balToClose := gsb.balancerPending // nil if there is no pending balancer
 | ||||||
|  | 	if gsb.balancerCurrent == nil { | ||||||
|  | 		gsb.balancerCurrent = bw | ||||||
|  | 	} else { | ||||||
|  | 		gsb.balancerPending = bw | ||||||
|  | 	} | ||||||
|  | 	gsb.mu.Unlock() | ||||||
|  | 	balToClose.Close() | ||||||
|  | 	// This function takes a builder instead of a balancer because builder.Build
 | ||||||
|  | 	// can call back inline, and this utility needs to handle the callbacks.
 | ||||||
|  | 	newBalancer := builder.Build(bw, gsb.bOpts) | ||||||
|  | 	if newBalancer == nil { | ||||||
|  | 		// This is illegal and should never happen; we clear the balancerWrapper
 | ||||||
|  | 		// we were constructing if it happens to avoid a potential panic.
 | ||||||
|  | 		gsb.mu.Lock() | ||||||
|  | 		if gsb.balancerPending != nil { | ||||||
|  | 			gsb.balancerPending = nil | ||||||
|  | 		} else { | ||||||
|  | 			gsb.balancerCurrent = nil | ||||||
|  | 		} | ||||||
|  | 		gsb.mu.Unlock() | ||||||
|  | 		return balancer.ErrBadResolverState | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// This write doesn't need to take gsb.mu because this field never gets read
 | ||||||
|  | 	// or written to on any calls from the current or pending. Calls from grpc
 | ||||||
|  | 	// to this balancer are guaranteed to be called synchronously, so this
 | ||||||
|  | 	// bw.Balancer field will never be forwarded to until this SwitchTo()
 | ||||||
|  | 	// function returns.
 | ||||||
|  | 	bw.Balancer = newBalancer | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Returns nil if the graceful switch balancer is closed.
 | ||||||
|  | func (gsb *Balancer) latestBalancer() *balancerWrapper { | ||||||
|  | 	gsb.mu.Lock() | ||||||
|  | 	defer gsb.mu.Unlock() | ||||||
|  | 	if gsb.balancerPending != nil { | ||||||
|  | 		return gsb.balancerPending | ||||||
|  | 	} | ||||||
|  | 	return gsb.balancerCurrent | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // UpdateClientConnState forwards the update to the latest balancer created.
 | ||||||
|  | func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { | ||||||
|  | 	// The resolver data is only relevant to the most recent LB Policy.
 | ||||||
|  | 	balToUpdate := gsb.latestBalancer() | ||||||
|  | 	if balToUpdate == nil { | ||||||
|  | 		return errBalancerClosed | ||||||
|  | 	} | ||||||
|  | 	// Perform this call without gsb.mu to prevent deadlocks if the child calls
 | ||||||
|  | 	// back into the channel. The latest balancer can never be closed during a
 | ||||||
|  | 	// call from the channel, even without gsb.mu held.
 | ||||||
|  | 	return balToUpdate.UpdateClientConnState(state) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ResolverError forwards the error to the latest balancer created.
 | ||||||
|  | func (gsb *Balancer) ResolverError(err error) { | ||||||
|  | 	// The resolver data is only relevant to the most recent LB Policy.
 | ||||||
|  | 	balToUpdate := gsb.latestBalancer() | ||||||
|  | 	if balToUpdate == nil { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	// Perform this call without gsb.mu to prevent deadlocks if the child calls
 | ||||||
|  | 	// back into the channel. The latest balancer can never be closed during a
 | ||||||
|  | 	// call from the channel, even without gsb.mu held.
 | ||||||
|  | 	balToUpdate.ResolverError(err) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ExitIdle forwards the call to the latest balancer created.
 | ||||||
|  | //
 | ||||||
|  | // If the latest balancer does not support ExitIdle, the subConns are
 | ||||||
|  | // re-connected to manually.
 | ||||||
|  | func (gsb *Balancer) ExitIdle() { | ||||||
|  | 	balToUpdate := gsb.latestBalancer() | ||||||
|  | 	if balToUpdate == nil { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	// There is no need to protect this read with a mutex, as the write to the
 | ||||||
|  | 	// Balancer field happens in SwitchTo, which completes before this can be
 | ||||||
|  | 	// called.
 | ||||||
|  | 	if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { | ||||||
|  | 		ei.ExitIdle() | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	for sc := range balToUpdate.subconns { | ||||||
|  | 		sc.Connect() | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // UpdateSubConnState forwards the update to the appropriate child.
 | ||||||
|  | func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { | ||||||
|  | 	gsb.currentMu.Lock() | ||||||
|  | 	defer gsb.currentMu.Unlock() | ||||||
|  | 	gsb.mu.Lock() | ||||||
|  | 	// Forward update to the appropriate child.  Even if there is a pending
 | ||||||
|  | 	// balancer, the current balancer should continue to get SubConn updates to
 | ||||||
|  | 	// maintain the proper state while the pending is still connecting.
 | ||||||
|  | 	var balToUpdate *balancerWrapper | ||||||
|  | 	if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { | ||||||
|  | 		balToUpdate = gsb.balancerCurrent | ||||||
|  | 	} else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { | ||||||
|  | 		balToUpdate = gsb.balancerPending | ||||||
|  | 	} | ||||||
|  | 	gsb.mu.Unlock() | ||||||
|  | 	if balToUpdate == nil { | ||||||
|  | 		// SubConn belonged to a stale lb policy that has not yet fully closed,
 | ||||||
|  | 		// or the balancer was already closed.
 | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	balToUpdate.UpdateSubConnState(sc, state) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Close closes any active child balancers.
 | ||||||
|  | func (gsb *Balancer) Close() { | ||||||
|  | 	gsb.mu.Lock() | ||||||
|  | 	gsb.closed = true | ||||||
|  | 	currentBalancerToClose := gsb.balancerCurrent | ||||||
|  | 	gsb.balancerCurrent = nil | ||||||
|  | 	pendingBalancerToClose := gsb.balancerPending | ||||||
|  | 	gsb.balancerPending = nil | ||||||
|  | 	gsb.mu.Unlock() | ||||||
|  | 
 | ||||||
|  | 	currentBalancerToClose.Close() | ||||||
|  | 	pendingBalancerToClose.Close() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // balancerWrapper wraps a balancer.Balancer, and overrides some Balancer
 | ||||||
|  | // methods to help cleanup SubConns created by the wrapped balancer.
 | ||||||
|  | //
 | ||||||
|  | // It implements the balancer.ClientConn interface and is passed down in that
 | ||||||
|  | // capacity to the wrapped balancer. It maintains a set of subConns created by
 | ||||||
|  | // the wrapped balancer and calls from the latter to create/update/remove
 | ||||||
|  | // SubConns update this set before being forwarded to the parent ClientConn.
 | ||||||
|  | // State updates from the wrapped balancer can result in invocation of the
 | ||||||
|  | // graceful switch logic.
 | ||||||
|  | type balancerWrapper struct { | ||||||
|  | 	balancer.Balancer | ||||||
|  | 	gsb *Balancer | ||||||
|  | 
 | ||||||
|  | 	lastState balancer.State | ||||||
|  | 	subconns  map[balancer.SubConn]bool // subconns created by this balancer
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { | ||||||
|  | 	if state.ConnectivityState == connectivity.Shutdown { | ||||||
|  | 		bw.gsb.mu.Lock() | ||||||
|  | 		delete(bw.subconns, sc) | ||||||
|  | 		bw.gsb.mu.Unlock() | ||||||
|  | 	} | ||||||
|  | 	// There is no need to protect this read with a mutex, as the write to the
 | ||||||
|  | 	// Balancer field happens in SwitchTo, which completes before this can be
 | ||||||
|  | 	// called.
 | ||||||
|  | 	bw.Balancer.UpdateSubConnState(sc, state) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Close closes the underlying LB policy and removes the subconns it created. bw
 | ||||||
|  | // must not be referenced via balancerCurrent or balancerPending in gsb when
 | ||||||
|  | // called. gsb.mu must not be held.  Does not panic with a nil receiver.
 | ||||||
|  | func (bw *balancerWrapper) Close() { | ||||||
|  | 	// before Close is called.
 | ||||||
|  | 	if bw == nil { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	// There is no need to protect this read with a mutex, as Close() is
 | ||||||
|  | 	// impossible to be called concurrently with the write in SwitchTo(). The
 | ||||||
|  | 	// callsites of Close() for this balancer in Graceful Switch Balancer will
 | ||||||
|  | 	// never be called until SwitchTo() returns.
 | ||||||
|  | 	bw.Balancer.Close() | ||||||
|  | 	bw.gsb.mu.Lock() | ||||||
|  | 	for sc := range bw.subconns { | ||||||
|  | 		bw.gsb.cc.RemoveSubConn(sc) | ||||||
|  | 	} | ||||||
|  | 	bw.gsb.mu.Unlock() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (bw *balancerWrapper) UpdateState(state balancer.State) { | ||||||
|  | 	// Hold the mutex for this entire call to ensure it cannot occur
 | ||||||
|  | 	// concurrently with other updateState() calls. This causes updates to
 | ||||||
|  | 	// lastState and calls to cc.UpdateState to happen atomically.
 | ||||||
|  | 	bw.gsb.mu.Lock() | ||||||
|  | 	defer bw.gsb.mu.Unlock() | ||||||
|  | 	bw.lastState = state | ||||||
|  | 
 | ||||||
|  | 	if !bw.gsb.balancerCurrentOrPending(bw) { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if bw == bw.gsb.balancerCurrent { | ||||||
|  | 		// In the case that the current balancer exits READY, and there is a pending
 | ||||||
|  | 		// balancer, you can forward the pending balancer's cached State up to
 | ||||||
|  | 		// ClientConn and swap the pending into the current. This is because there
 | ||||||
|  | 		// is no reason to gracefully switch from and keep using the old policy as
 | ||||||
|  | 		// the ClientConn is not connected to any backends.
 | ||||||
|  | 		if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { | ||||||
|  | 			bw.gsb.swap() | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		// Even if there is a pending balancer waiting to be gracefully switched to,
 | ||||||
|  | 		// continue to forward current balancer updates to the Client Conn. Ignoring
 | ||||||
|  | 		// state + picker from the current would cause undefined behavior/cause the
 | ||||||
|  | 		// system to behave incorrectly from the current LB policies perspective.
 | ||||||
|  | 		// Also, the current LB is still being used by grpc to choose SubConns per
 | ||||||
|  | 		// RPC, and thus should use the most updated form of the current balancer.
 | ||||||
|  | 		bw.gsb.cc.UpdateState(state) | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	// This method is now dealing with a state update from the pending balancer.
 | ||||||
|  | 	// If the current balancer is currently in a state other than READY, the new
 | ||||||
|  | 	// policy can be swapped into place immediately. This is because there is no
 | ||||||
|  | 	// reason to gracefully switch from and keep using the old policy as the
 | ||||||
|  | 	// ClientConn is not connected to any backends.
 | ||||||
|  | 	if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { | ||||||
|  | 		bw.gsb.swap() | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { | ||||||
|  | 	bw.gsb.mu.Lock() | ||||||
|  | 	if !bw.gsb.balancerCurrentOrPending(bw) { | ||||||
|  | 		bw.gsb.mu.Unlock() | ||||||
|  | 		return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) | ||||||
|  | 	} | ||||||
|  | 	bw.gsb.mu.Unlock() | ||||||
|  | 
 | ||||||
|  | 	sc, err := bw.gsb.cc.NewSubConn(addrs, opts) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	bw.gsb.mu.Lock() | ||||||
|  | 	if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call
 | ||||||
|  | 		bw.gsb.cc.RemoveSubConn(sc) | ||||||
|  | 		bw.gsb.mu.Unlock() | ||||||
|  | 		return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) | ||||||
|  | 	} | ||||||
|  | 	bw.subconns[sc] = true | ||||||
|  | 	bw.gsb.mu.Unlock() | ||||||
|  | 	return sc, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { | ||||||
|  | 	// Ignore ResolveNow requests from anything other than the most recent
 | ||||||
|  | 	// balancer, because older balancers were already removed from the config.
 | ||||||
|  | 	if bw != bw.gsb.latestBalancer() { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	bw.gsb.cc.ResolveNow(opts) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { | ||||||
|  | 	bw.gsb.mu.Lock() | ||||||
|  | 	if !bw.gsb.balancerCurrentOrPending(bw) { | ||||||
|  | 		bw.gsb.mu.Unlock() | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	bw.gsb.mu.Unlock() | ||||||
|  | 	bw.gsb.cc.RemoveSubConn(sc) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { | ||||||
|  | 	bw.gsb.mu.Lock() | ||||||
|  | 	if !bw.gsb.balancerCurrentOrPending(bw) { | ||||||
|  | 		bw.gsb.mu.Unlock() | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	bw.gsb.mu.Unlock() | ||||||
|  | 	bw.gsb.cc.UpdateAddresses(sc, addrs) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (bw *balancerWrapper) Target() string { | ||||||
|  | 	return bw.gsb.cc.Target() | ||||||
|  | } | ||||||
| @ -0,0 +1,75 @@ | |||||||
|  | /* | ||||||
|  |  * | ||||||
|  |  * Copyright 2022 gRPC authors. | ||||||
|  |  * | ||||||
|  |  * Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  |  * you may not use this file except in compliance with the License. | ||||||
|  |  * You may obtain a copy of the License at | ||||||
|  |  * | ||||||
|  |  *     http://www.apache.org/licenses/LICENSE-2.0
 | ||||||
|  |  * | ||||||
|  |  * Unless required by applicable law or agreed to in writing, software | ||||||
|  |  * distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  |  * See the License for the specific language governing permissions and | ||||||
|  |  * limitations under the License. | ||||||
|  |  * | ||||||
|  |  */ | ||||||
|  | 
 | ||||||
|  | package channelz | ||||||
|  | 
 | ||||||
|  | import "fmt" | ||||||
|  | 
 | ||||||
|  | // Identifier is an opaque identifier which uniquely identifies an entity in the
 | ||||||
|  | // channelz database.
 | ||||||
|  | type Identifier struct { | ||||||
|  | 	typ RefChannelType | ||||||
|  | 	id  int64 | ||||||
|  | 	str string | ||||||
|  | 	pid *Identifier | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Type returns the entity type corresponding to id.
 | ||||||
|  | func (id *Identifier) Type() RefChannelType { | ||||||
|  | 	return id.typ | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Int returns the integer identifier corresponding to id.
 | ||||||
|  | func (id *Identifier) Int() int64 { | ||||||
|  | 	return id.id | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // String returns a string representation of the entity corresponding to id.
 | ||||||
|  | //
 | ||||||
|  | // This includes some information about the parent as well. Examples:
 | ||||||
|  | // Top-level channel: [Channel #channel-number]
 | ||||||
|  | // Nested channel:    [Channel #parent-channel-number Channel #channel-number]
 | ||||||
|  | // Sub channel:       [Channel #parent-channel SubChannel #subchannel-number]
 | ||||||
|  | func (id *Identifier) String() string { | ||||||
|  | 	return id.str | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Equal returns true if other is the same as id.
 | ||||||
|  | func (id *Identifier) Equal(other *Identifier) bool { | ||||||
|  | 	if (id != nil) != (other != nil) { | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 	if id == nil && other == nil { | ||||||
|  | 		return true | ||||||
|  | 	} | ||||||
|  | 	return id.typ == other.typ && id.id == other.id && id.pid == other.pid | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewIdentifierForTesting returns a new opaque identifier to be used only for
 | ||||||
|  | // testing purposes.
 | ||||||
|  | func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { | ||||||
|  | 	return newIdentifer(typ, id, pid) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { | ||||||
|  | 	str := fmt.Sprintf("%s #%d", typ, id) | ||||||
|  | 	if pid != nil { | ||||||
|  | 		str = fmt.Sprintf("%s %s", pid, str) | ||||||
|  | 	} | ||||||
|  | 	return &Identifier{typ: typ, id: id, str: str, pid: pid} | ||||||
|  | } | ||||||
| @ -0,0 +1,82 @@ | |||||||
|  | /* | ||||||
|  |  * | ||||||
|  |  * Copyright 2021 gRPC authors. | ||||||
|  |  * | ||||||
|  |  * Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  |  * you may not use this file except in compliance with the License. | ||||||
|  |  * You may obtain a copy of the License at | ||||||
|  |  * | ||||||
|  |  *     http://www.apache.org/licenses/LICENSE-2.0
 | ||||||
|  |  * | ||||||
|  |  * Unless required by applicable law or agreed to in writing, software | ||||||
|  |  * distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  |  * See the License for the specific language governing permissions and | ||||||
|  |  * limitations under the License. | ||||||
|  |  * | ||||||
|  |  */ | ||||||
|  | 
 | ||||||
|  | // Package pretty defines helper functions to pretty-print structs for logging.
 | ||||||
|  | package pretty | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"encoding/json" | ||||||
|  | 	"fmt" | ||||||
|  | 
 | ||||||
|  | 	"github.com/golang/protobuf/jsonpb" | ||||||
|  | 	protov1 "github.com/golang/protobuf/proto" | ||||||
|  | 	"google.golang.org/protobuf/encoding/protojson" | ||||||
|  | 	protov2 "google.golang.org/protobuf/proto" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | const jsonIndent = "  " | ||||||
|  | 
 | ||||||
|  | // ToJSON marshals the input into a json string.
 | ||||||
|  | //
 | ||||||
|  | // If marshal fails, it falls back to fmt.Sprintf("%+v").
 | ||||||
|  | func ToJSON(e interface{}) string { | ||||||
|  | 	switch ee := e.(type) { | ||||||
|  | 	case protov1.Message: | ||||||
|  | 		mm := jsonpb.Marshaler{Indent: jsonIndent} | ||||||
|  | 		ret, err := mm.MarshalToString(ee) | ||||||
|  | 		if err != nil { | ||||||
|  | 			// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
 | ||||||
|  | 			// messages are not imported, and this will fail because the message
 | ||||||
|  | 			// is not found.
 | ||||||
|  | 			return fmt.Sprintf("%+v", ee) | ||||||
|  | 		} | ||||||
|  | 		return ret | ||||||
|  | 	case protov2.Message: | ||||||
|  | 		mm := protojson.MarshalOptions{ | ||||||
|  | 			Multiline: true, | ||||||
|  | 			Indent:    jsonIndent, | ||||||
|  | 		} | ||||||
|  | 		ret, err := mm.Marshal(ee) | ||||||
|  | 		if err != nil { | ||||||
|  | 			// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
 | ||||||
|  | 			// messages are not imported, and this will fail because the message
 | ||||||
|  | 			// is not found.
 | ||||||
|  | 			return fmt.Sprintf("%+v", ee) | ||||||
|  | 		} | ||||||
|  | 		return string(ret) | ||||||
|  | 	default: | ||||||
|  | 		ret, err := json.MarshalIndent(ee, "", jsonIndent) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return fmt.Sprintf("%+v", ee) | ||||||
|  | 		} | ||||||
|  | 		return string(ret) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // FormatJSON formats the input json bytes with indentation.
 | ||||||
|  | //
 | ||||||
|  | // If Indent fails, it returns the unchanged input as string.
 | ||||||
|  | func FormatJSON(b []byte) string { | ||||||
|  | 	var out bytes.Buffer | ||||||
|  | 	err := json.Indent(&out, b, "", jsonIndent) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return string(b) | ||||||
|  | 	} | ||||||
|  | 	return out.String() | ||||||
|  | } | ||||||
					Loading…
					
					
				
		Reference in New Issue
	
	 Tõnis Tiigi
						Tõnis Tiigi