vendor: update buildkit to v0.8
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>pull/469/head
parent
080e9981c7
commit
69a1419ab1
@ -1,12 +0,0 @@
|
||||
{
|
||||
"name": "metadata",
|
||||
"name_pretty": "Google Compute Engine Metadata API",
|
||||
"product_documentation": "https://cloud.google.com/compute/docs/storing-retrieving-metadata",
|
||||
"client_documentation": "https://godoc.org/cloud.google.com/go/compute/metadata",
|
||||
"release_level": "ga",
|
||||
"language": "go",
|
||||
"repo": "googleapis/google-cloud-go",
|
||||
"distribution_name": "cloud.google.com/go/compute/metadata",
|
||||
"api_id": "compute:metadata",
|
||||
"requires_billing": false
|
||||
}
|
@ -0,0 +1,49 @@
|
||||
package hcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
hcsschema "github.com/Microsoft/hcsshim/internal/schema2"
|
||||
"github.com/Microsoft/hcsshim/internal/vmcompute"
|
||||
)
|
||||
|
||||
// GetServiceProperties returns properties of the host compute service.
|
||||
func GetServiceProperties(ctx context.Context, q hcsschema.PropertyQuery) (*hcsschema.ServiceProperties, error) {
|
||||
operation := "hcsshim::GetServiceProperties"
|
||||
|
||||
queryb, err := json.Marshal(q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
propertiesJSON, resultJSON, err := vmcompute.HcsGetServiceProperties(ctx, string(queryb))
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return nil, &HcsError{Op: operation, Err: err, Events: events}
|
||||
}
|
||||
|
||||
if propertiesJSON == "" {
|
||||
return nil, ErrUnexpectedValue
|
||||
}
|
||||
properties := &hcsschema.ServiceProperties{}
|
||||
if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return properties, nil
|
||||
}
|
||||
|
||||
// ModifyServiceSettings modifies settings of the host compute service.
|
||||
func ModifyServiceSettings(ctx context.Context, settings hcsschema.ModificationRequest) error {
|
||||
operation := "hcsshim::ModifyServiceSettings"
|
||||
|
||||
settingsJSON, err := json.Marshal(settings)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resultJSON, err := vmcompute.HcsModifyServiceSettings(ctx, string(settingsJSON))
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
if err != nil {
|
||||
return &HcsError{Op: operation, Err: err, Events: events}
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,79 +0,0 @@
|
||||
// Code generated by 'go generate'; DO NOT EDIT.
|
||||
|
||||
package safefile
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
|
||||
procNtCreateFile = modntdll.NewProc("NtCreateFile")
|
||||
procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile")
|
||||
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
|
||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||
procLocalFree = modkernel32.NewProc("LocalFree")
|
||||
)
|
||||
|
||||
func ntCreateFile(handle *uintptr, accessMask uint32, oa *objectAttributes, iosb *ioStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0)
|
||||
status = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func ntSetInformationFile(handle uintptr, iosb *ioStatusBlock, information uintptr, length uint32, class uint32) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0)
|
||||
status = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlNtStatusToDosError(status uint32) (winerr error) {
|
||||
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
|
||||
if r0 != 0 {
|
||||
winerr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func localAlloc(flags uint32, size int) (ptr uintptr) {
|
||||
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0)
|
||||
ptr = uintptr(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func localFree(ptr uintptr) {
|
||||
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0)
|
||||
return
|
||||
}
|
@ -0,0 +1,16 @@
|
||||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type ContainerCredentialGuardAddInstanceRequest struct {
|
||||
Id string `json:"Id,omitempty"`
|
||||
CredentialSpec string `json:"CredentialSpec,omitempty"`
|
||||
Transport string `json:"Transport,omitempty"`
|
||||
}
|
@ -0,0 +1,15 @@
|
||||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type ContainerCredentialGuardHvSocketServiceConfig struct {
|
||||
ServiceId string `json:"ServiceId,omitempty"`
|
||||
ServiceConfig *HvSocketServiceConfig `json:"ServiceConfig,omitempty"`
|
||||
}
|
@ -0,0 +1,16 @@
|
||||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type ContainerCredentialGuardInstance struct {
|
||||
Id string `json:"Id,omitempty"`
|
||||
CredentialGuard *ContainerCredentialGuardState `json:"CredentialGuard,omitempty"`
|
||||
HvSocketConfig *ContainerCredentialGuardHvSocketServiceConfig `json:"HvSocketConfig,omitempty"`
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type ContainerCredentialGuardModifyOperation string
|
||||
|
||||
const (
|
||||
AddInstance ContainerCredentialGuardModifyOperation = "AddInstance"
|
||||
RemoveInstance ContainerCredentialGuardModifyOperation = "RemoveInstance"
|
||||
)
|
@ -0,0 +1,15 @@
|
||||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type ContainerCredentialGuardOperationRequest struct {
|
||||
Operation ContainerCredentialGuardModifyOperation `json:"Operation,omitempty"`
|
||||
OperationDetails interface{} `json:"OperationDetails,omitempty"`
|
||||
}
|
@ -0,0 +1,14 @@
|
||||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type ContainerCredentialGuardRemoveInstanceRequest struct {
|
||||
Id string `json:"Id,omitempty"`
|
||||
}
|
@ -0,0 +1,14 @@
|
||||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type ContainerCredentialGuardSystemInfo struct {
|
||||
Instances []ContainerCredentialGuardInstance `json:"Instances,omitempty"`
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
// This class defines address settings applied to a VM
|
||||
// by the GCS every time a VM starts or restores.
|
||||
type HvSocketAddress struct {
|
||||
LocalAddress string `json:"LocalAddress,omitempty"`
|
||||
ParentAddress string `json:"ParentAddress,omitempty"`
|
||||
}
|
@ -0,0 +1,18 @@
|
||||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type LogicalProcessor struct {
|
||||
LpIndex uint32 `json:"LpIndex,omitempty"`
|
||||
NodeNumber uint8 `json:"NodeNumber, omitempty"`
|
||||
PackageId uint32 `json:"PackageId, omitempty"`
|
||||
CoreId uint32 `json:"CoreId, omitempty"`
|
||||
RootVpIndex int32 `json:"RootVpIndex, omitempty"`
|
||||
}
|
@ -0,0 +1,15 @@
|
||||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type ModificationRequest struct {
|
||||
PropertyType PropertyType `json:"PropertyType,omitempty"`
|
||||
Settings interface{} `json:"Settings,omitempty"`
|
||||
}
|
@ -0,0 +1,15 @@
|
||||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
type ProcessorTopology struct {
|
||||
LogicalProcessorCount uint32 `json:"LogicalProcessorCount,omitempty"`
|
||||
LogicalProcessors []LogicalProcessor `json:"LogicalProcessors,omitempty"`
|
||||
}
|
@ -0,0 +1,18 @@
|
||||
/*
|
||||
* HCS API
|
||||
*
|
||||
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
|
||||
*
|
||||
* API version: 2.4
|
||||
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
|
||||
*/
|
||||
|
||||
package hcsschema
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
type ServiceProperties struct {
|
||||
// Changed Properties field to []json.RawMessage from []interface{} to avoid having to
|
||||
// remarshal sp.Properties[n] and unmarshal into the type(s) we want.
|
||||
Properties []json.RawMessage `json:"Properties,omitempty"`
|
||||
}
|
@ -0,0 +1,13 @@
|
||||
package winapi
|
||||
|
||||
import "github.com/Microsoft/go-winio/pkg/guid"
|
||||
|
||||
//sys CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) = cfgmgr32.CM_Get_Device_ID_List_SizeA
|
||||
//sys CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error)= cfgmgr32.CM_Get_Device_ID_ListA
|
||||
//sys CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) = cfgmgr32.CM_Locate_DevNodeW
|
||||
//sys CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) = cfgmgr32.CM_Get_DevNode_PropertyW
|
||||
|
||||
type DevPropKey struct {
|
||||
Fmtid guid.GUID
|
||||
Pid uint32
|
||||
}
|
@ -0,0 +1,15 @@
|
||||
package winapi
|
||||
|
||||
import "syscall"
|
||||
|
||||
//sys RtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosError
|
||||
|
||||
const (
|
||||
STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B
|
||||
ERROR_NO_MORE_ITEMS = 0x103
|
||||
ERROR_MORE_DATA syscall.Errno = 234
|
||||
)
|
||||
|
||||
func NTSuccess(status uint32) bool {
|
||||
return status == 0
|
||||
}
|
@ -0,0 +1,61 @@
|
||||
package winapi
|
||||
|
||||
//sys NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile
|
||||
//sys NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile
|
||||
|
||||
//sys NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) = ntdll.NtOpenDirectoryObject
|
||||
//sys NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32)(status uint32) = ntdll.NtQueryDirectoryObject
|
||||
|
||||
const (
|
||||
FileLinkInformationClass = 11
|
||||
FileDispositionInformationExClass = 64
|
||||
|
||||
FILE_READ_ATTRIBUTES = 0x0080
|
||||
FILE_WRITE_ATTRIBUTES = 0x0100
|
||||
DELETE = 0x10000
|
||||
|
||||
FILE_OPEN = 1
|
||||
FILE_CREATE = 2
|
||||
|
||||
FILE_LIST_DIRECTORY = 0x00000001
|
||||
FILE_DIRECTORY_FILE = 0x00000001
|
||||
FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020
|
||||
FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000
|
||||
FILE_OPEN_REPARSE_POINT = 0x00200000
|
||||
|
||||
FILE_DISPOSITION_DELETE = 0x00000001
|
||||
|
||||
OBJ_DONT_REPARSE = 0x1000
|
||||
|
||||
STATUS_MORE_ENTRIES = 0x105
|
||||
STATUS_NO_MORE_ENTRIES = 0x8000001a
|
||||
)
|
||||
|
||||
type FileDispositionInformationEx struct {
|
||||
Flags uintptr
|
||||
}
|
||||
|
||||
type IOStatusBlock struct {
|
||||
Status, Information uintptr
|
||||
}
|
||||
|
||||
type ObjectAttributes struct {
|
||||
Length uintptr
|
||||
RootDirectory uintptr
|
||||
ObjectName uintptr
|
||||
Attributes uintptr
|
||||
SecurityDescriptor uintptr
|
||||
SecurityQoS uintptr
|
||||
}
|
||||
|
||||
type ObjectDirectoryInformation struct {
|
||||
Name UnicodeString
|
||||
TypeName UnicodeString
|
||||
}
|
||||
|
||||
type FileLinkInformation struct {
|
||||
ReplaceIfExists bool
|
||||
RootDirectory uintptr
|
||||
FileNameLength uint32
|
||||
FileName [1]uint16
|
||||
}
|
@ -0,0 +1,120 @@
|
||||
package winapi
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Messages that can be received from an assigned io completion port.
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port
|
||||
const (
|
||||
JOB_OBJECT_MSG_END_OF_JOB_TIME = 1
|
||||
JOB_OBJECT_MSG_END_OF_PROCESS_TIME = 2
|
||||
JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT = 3
|
||||
JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO = 4
|
||||
JOB_OBJECT_MSG_NEW_PROCESS = 6
|
||||
JOB_OBJECT_MSG_EXIT_PROCESS = 7
|
||||
JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS = 8
|
||||
JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT = 9
|
||||
JOB_OBJECT_MSG_JOB_MEMORY_LIMIT = 10
|
||||
JOB_OBJECT_MSG_NOTIFICATION_LIMIT = 11
|
||||
)
|
||||
|
||||
// IO limit flags
|
||||
//
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information
|
||||
const JOB_OBJECT_IO_RATE_CONTROL_ENABLE = 0x1
|
||||
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information
|
||||
const (
|
||||
JOB_OBJECT_CPU_RATE_CONTROL_ENABLE = 1 << iota
|
||||
JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED
|
||||
JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP
|
||||
JOB_OBJECT_CPU_RATE_CONTROL_NOTIFY
|
||||
JOB_OBJECT_CPU_RATE_CONTROL_MIN_MAX_RATE
|
||||
)
|
||||
|
||||
// JobObjectInformationClass values. Used for a call to QueryInformationJobObject
|
||||
//
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-queryinformationjobobject
|
||||
const (
|
||||
JobObjectBasicAccountingInformation uint32 = 1
|
||||
JobObjectBasicProcessIdList uint32 = 3
|
||||
JobObjectBasicAndIoAccountingInformation uint32 = 8
|
||||
JobObjectLimitViolationInformation uint32 = 13
|
||||
JobObjectNotificationLimitInformation2 uint32 = 33
|
||||
)
|
||||
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_limit_information
|
||||
type JOBOBJECT_BASIC_LIMIT_INFORMATION struct {
|
||||
PerProcessUserTimeLimit int64
|
||||
PerJobUserTimeLimit int64
|
||||
LimitFlags uint32
|
||||
MinimumWorkingSetSize uintptr
|
||||
MaximumWorkingSetSize uintptr
|
||||
ActiveProcessLimit uint32
|
||||
Affinity uintptr
|
||||
PriorityClass uint32
|
||||
SchedulingClass uint32
|
||||
}
|
||||
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information
|
||||
type JOBOBJECT_CPU_RATE_CONTROL_INFORMATION struct {
|
||||
ControlFlags uint32
|
||||
Rate uint32
|
||||
}
|
||||
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/ns-jobapi2-jobobject_io_rate_control_information
|
||||
type JOBOBJECT_IO_RATE_CONTROL_INFORMATION struct {
|
||||
MaxIops int64
|
||||
MaxBandwidth int64
|
||||
ReservationIops int64
|
||||
BaseIOSize uint32
|
||||
VolumeName string
|
||||
ControlFlags uint32
|
||||
}
|
||||
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_process_id_list
|
||||
type JOBOBJECT_BASIC_PROCESS_ID_LIST struct {
|
||||
NumberOfAssignedProcesses uint32
|
||||
NumberOfProcessIdsInList uint32
|
||||
ProcessIdList [1]uintptr
|
||||
}
|
||||
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_associate_completion_port
|
||||
type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
|
||||
CompletionKey uintptr
|
||||
CompletionPort windows.Handle
|
||||
}
|
||||
|
||||
// BOOL IsProcessInJob(
|
||||
// HANDLE ProcessHandle,
|
||||
// HANDLE JobHandle,
|
||||
// PBOOL Result
|
||||
// );
|
||||
//
|
||||
//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) = kernel32.IsProcessInJob
|
||||
|
||||
// BOOL QueryInformationJobObject(
|
||||
// HANDLE hJob,
|
||||
// JOBOBJECTINFOCLASS JobObjectInformationClass,
|
||||
// LPVOID lpJobObjectInformation,
|
||||
// DWORD cbJobObjectInformationLength,
|
||||
// LPDWORD lpReturnLength
|
||||
// );
|
||||
//
|
||||
//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
|
||||
|
||||
// HANDLE OpenJobObjectW(
|
||||
// DWORD dwDesiredAccess,
|
||||
// BOOL bInheritHandle,
|
||||
// LPCWSTR lpName
|
||||
// );
|
||||
//
|
||||
//sys OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW
|
||||
|
||||
// DWORD SetIoRateControlInformationJobObject(
|
||||
// HANDLE hJob,
|
||||
// JOBOBJECT_IO_RATE_CONTROL_INFORMATION *IoRateControlInfo
|
||||
// );
|
||||
//
|
||||
//sys SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) = kernel32.SetIoRateControlInformationJobObject
|
@ -0,0 +1,30 @@
|
||||
package winapi
|
||||
|
||||
// BOOL LogonUserA(
|
||||
// LPCWSTR lpszUsername,
|
||||
// LPCWSTR lpszDomain,
|
||||
// LPCWSTR lpszPassword,
|
||||
// DWORD dwLogonType,
|
||||
// DWORD dwLogonProvider,
|
||||
// PHANDLE phToken
|
||||
// );
|
||||
//
|
||||
//sys LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) = advapi32.LogonUserW
|
||||
|
||||
// Logon types
|
||||
const (
|
||||
LOGON32_LOGON_INTERACTIVE uint32 = 2
|
||||
LOGON32_LOGON_NETWORK uint32 = 3
|
||||
LOGON32_LOGON_BATCH uint32 = 4
|
||||
LOGON32_LOGON_SERVICE uint32 = 5
|
||||
LOGON32_LOGON_UNLOCK uint32 = 7
|
||||
LOGON32_LOGON_NETWORK_CLEARTEXT uint32 = 8
|
||||
LOGON32_LOGON_NEW_CREDENTIALS uint32 = 9
|
||||
)
|
||||
|
||||
// Logon providers
|
||||
const (
|
||||
LOGON32_PROVIDER_DEFAULT uint32 = 0
|
||||
LOGON32_PROVIDER_WINNT40 uint32 = 2
|
||||
LOGON32_PROVIDER_WINNT50 uint32 = 3
|
||||
)
|
@ -0,0 +1,11 @@
|
||||
package winapi
|
||||
|
||||
// VOID RtlMoveMemory(
|
||||
// _Out_ VOID UNALIGNED *Destination,
|
||||
// _In_ const VOID UNALIGNED *Source,
|
||||
// _In_ SIZE_T Length
|
||||
// );
|
||||
//sys RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) = kernel32.RtlMoveMemory
|
||||
|
||||
//sys LocalAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc
|
||||
//sys LocalFree(ptr uintptr) = kernel32.LocalFree
|
@ -0,0 +1,11 @@
|
||||
package winapi
|
||||
|
||||
// DWORD SearchPathW(
|
||||
// LPCWSTR lpPath,
|
||||
// LPCWSTR lpFileName,
|
||||
// LPCWSTR lpExtension,
|
||||
// DWORD nBufferLength,
|
||||
// LPWSTR lpBuffer,
|
||||
// LPWSTR *lpFilePart
|
||||
// );
|
||||
//sys SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath **uint16) (size uint32, err error) = kernel32.SearchPathW
|
@ -0,0 +1,3 @@
|
||||
package winapi
|
||||
|
||||
const PROCESS_ALL_ACCESS uint32 = 2097151
|
@ -0,0 +1,7 @@
|
||||
package winapi
|
||||
|
||||
// Get count from all processor groups.
|
||||
// https://docs.microsoft.com/en-us/windows/win32/procthread/processor-groups
|
||||
const ALL_PROCESSOR_GROUPS = 0xFFFF
|
||||
|
||||
//sys GetActiveProcessorCount(groupNumber uint16) (amount uint32) = kernel32.GetActiveProcessorCount
|
@ -0,0 +1,60 @@
|
||||
package winapi
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type UnicodeString struct {
|
||||
Length uint16
|
||||
MaximumLength uint16
|
||||
Buffer *uint16
|
||||
}
|
||||
|
||||
//String converts a UnicodeString to a golang string
|
||||
func (uni UnicodeString) String() string {
|
||||
p := (*[0xffff]uint16)(unsafe.Pointer(uni.Buffer))
|
||||
|
||||
// UnicodeString is not guaranteed to be null terminated, therefore
|
||||
// use the UnicodeString's Length field
|
||||
lengthInChars := uni.Length / 2
|
||||
return syscall.UTF16ToString(p[:lengthInChars])
|
||||
}
|
||||
|
||||
// NewUnicodeString allocates a new UnicodeString and copies `s` into
|
||||
// the buffer of the new UnicodeString.
|
||||
func NewUnicodeString(s string) (*UnicodeString, error) {
|
||||
ws := utf16.Encode(([]rune)(s))
|
||||
if len(ws) > 32767 {
|
||||
return nil, syscall.ENAMETOOLONG
|
||||
}
|
||||
|
||||
uni := &UnicodeString{
|
||||
Length: uint16(len(ws) * 2),
|
||||
MaximumLength: uint16(len(ws) * 2),
|
||||
Buffer: &make([]uint16, len(ws))[0],
|
||||
}
|
||||
copy((*[32768]uint16)(unsafe.Pointer(uni.Buffer))[:], ws)
|
||||
return uni, nil
|
||||
}
|
||||
|
||||
// ConvertStringSetToSlice is a helper function used to convert the contents of
|
||||
// `buf` into a string slice. `buf` contains a set of null terminated strings
|
||||
// with an additional null at the end to indicate the end of the set.
|
||||
func ConvertStringSetToSlice(buf []byte) ([]string, error) {
|
||||
var results []string
|
||||
prev := 0
|
||||
for i := range buf {
|
||||
if buf[i] == 0 {
|
||||
if prev == i {
|
||||
// found two null characters in a row, return result
|
||||
return results, nil
|
||||
}
|
||||
results = append(results, string(buf[prev:i]))
|
||||
prev = i + 1
|
||||
}
|
||||
}
|
||||
return nil, errors.New("string set malformed: missing null terminator at end of buffer")
|
||||
}
|
@ -0,0 +1,5 @@
|
||||
// Package winapi contains various low-level bindings to Windows APIs. It can
|
||||
// be thought of as an extension to golang.org/x/sys/windows.
|
||||
package winapi
|
||||
|
||||
//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go jobobject.go path.go logon.go memory.go processor.go devices.go filesystem.go errors.go
|
@ -0,0 +1,271 @@
|
||||
// Code generated mksyscall_windows.exe DO NOT EDIT
|
||||
|
||||
package winapi
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||
modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll")
|
||||
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||
|
||||
procIsProcessInJob = modkernel32.NewProc("IsProcessInJob")
|
||||
procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject")
|
||||
procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW")
|
||||
procSetIoRateControlInformationJobObject = modkernel32.NewProc("SetIoRateControlInformationJobObject")
|
||||
procSearchPathW = modkernel32.NewProc("SearchPathW")
|
||||
procLogonUserW = modadvapi32.NewProc("LogonUserW")
|
||||
procRtlMoveMemory = modkernel32.NewProc("RtlMoveMemory")
|
||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||
procLocalFree = modkernel32.NewProc("LocalFree")
|
||||
procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
|
||||
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
|
||||
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
|
||||
procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW")
|
||||
procCM_Get_DevNode_PropertyW = modcfgmgr32.NewProc("CM_Get_DevNode_PropertyW")
|
||||
procNtCreateFile = modntdll.NewProc("NtCreateFile")
|
||||
procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile")
|
||||
procNtOpenDirectoryObject = modntdll.NewProc("NtOpenDirectoryObject")
|
||||
procNtQueryDirectoryObject = modntdll.NewProc("NtQueryDirectoryObject")
|
||||
procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError")
|
||||
)
|
||||
|
||||
func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result)))
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) {
|
||||
var _p0 uint32
|
||||
if inheritHandle {
|
||||
_p0 = 1
|
||||
} else {
|
||||
_p0 = 0
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(lpName)))
|
||||
handle = windows.Handle(r0)
|
||||
if handle == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) {
|
||||
r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0)
|
||||
ret = uint32(r0)
|
||||
if ret == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath **uint16) (size uint32, err error) {
|
||||
r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath)))
|
||||
size = uint32(r0)
|
||||
if size == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token)))
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procRtlMoveMemory.Addr(), 3, uintptr(unsafe.Pointer(destination)), uintptr(unsafe.Pointer(source)), uintptr(length))
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func LocalAlloc(flags uint32, size int) (ptr uintptr) {
|
||||
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0)
|
||||
ptr = uintptr(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func LocalFree(ptr uintptr) {
|
||||
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
|
||||
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
|
||||
amount = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) {
|
||||
r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) {
|
||||
r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) {
|
||||
var _p0 *uint16
|
||||
_p0, hr = syscall.UTF16PtrFromString(pDeviceID)
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
return _CMLocateDevNode(pdnDevInst, _p0, uFlags)
|
||||
}
|
||||
|
||||
func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) {
|
||||
r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) {
|
||||
r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags))
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0)
|
||||
status = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0)
|
||||
status = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall(procNtOpenDirectoryObject.Addr(), 3, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)))
|
||||
status = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32) (status uint32) {
|
||||
var _p0 uint32
|
||||
if singleEntry {
|
||||
_p0 = 1
|
||||
} else {
|
||||
_p0 = 0
|
||||
}
|
||||
var _p1 uint32
|
||||
if restartScan {
|
||||
_p1 = 1
|
||||
} else {
|
||||
_p1 = 0
|
||||
}
|
||||
r0, _, _ := syscall.Syscall9(procNtQueryDirectoryObject.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)), 0, 0)
|
||||
status = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func RtlNtStatusToDosError(status uint32) (winerr error) {
|
||||
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosError.Addr(), 1, uintptr(status), 0, 0)
|
||||
if r0 != 0 {
|
||||
winerr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
@ -0,0 +1,32 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaults
|
||||
|
||||
const (
|
||||
// DefaultMaxRecvMsgSize defines the default maximum message size for
|
||||
// receiving protobufs passed over the GRPC API.
|
||||
DefaultMaxRecvMsgSize = 16 << 20
|
||||
// DefaultMaxSendMsgSize defines the default maximum message size for
|
||||
// sending protobufs passed over the GRPC API.
|
||||
DefaultMaxSendMsgSize = 16 << 20
|
||||
// DefaultRuntimeNSLabel defines the namespace label to check for the
|
||||
// default runtime
|
||||
DefaultRuntimeNSLabel = "containerd.io/defaults/runtime"
|
||||
// DefaultSnapshotterNSLabel defines the namespace label to check for the
|
||||
// default snapshotter
|
||||
DefaultSnapshotterNSLabel = "containerd.io/defaults/snapshotter"
|
||||
)
|
@ -0,0 +1,37 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaults
|
||||
|
||||
const (
|
||||
// DefaultRootDir is the default location used by containerd to store
|
||||
// persistent data
|
||||
DefaultRootDir = "/var/lib/containerd"
|
||||
// DefaultStateDir is the default location used by containerd to store
|
||||
// transient data
|
||||
DefaultStateDir = "/run/containerd"
|
||||
// DefaultAddress is the default unix socket address
|
||||
DefaultAddress = "/run/containerd/containerd.sock"
|
||||
// DefaultDebugAddress is the default unix socket address for pprof data
|
||||
DefaultDebugAddress = "/run/containerd/debug.sock"
|
||||
// DefaultFIFODir is the default location used by client-side cio library
|
||||
// to store FIFOs.
|
||||
DefaultFIFODir = "/run/containerd/fifo"
|
||||
// DefaultRuntime is the default linux runtime
|
||||
DefaultRuntime = "io.containerd.runc.v2"
|
||||
)
|
@ -0,0 +1,45 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaults
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultRootDir is the default location used by containerd to store
|
||||
// persistent data
|
||||
DefaultRootDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "root")
|
||||
// DefaultStateDir is the default location used by containerd to store
|
||||
// transient data
|
||||
DefaultStateDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "state")
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultAddress is the default winpipe address
|
||||
DefaultAddress = `\\.\pipe\containerd-containerd`
|
||||
// DefaultDebugAddress is the default winpipe address for pprof data
|
||||
DefaultDebugAddress = `\\.\pipe\containerd-debug`
|
||||
// DefaultFIFODir is the default location used by client-side cio library
|
||||
// to store FIFOs. Unused on Windows.
|
||||
DefaultFIFODir = ""
|
||||
// DefaultRuntime is the default windows runtime
|
||||
DefaultRuntime = "io.containerd.runhcs.v1"
|
||||
)
|
@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package defaults provides several common defaults for interacting with
|
||||
// containerd. These can be used on the client-side or server-side.
|
||||
package defaults
|
@ -1,94 +0,0 @@
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
// Seccomp represents the config for a seccomp profile for syscall restriction.
|
||||
type Seccomp struct {
|
||||
DefaultAction Action `json:"defaultAction"`
|
||||
// Architectures is kept to maintain backward compatibility with the old
|
||||
// seccomp profile.
|
||||
Architectures []Arch `json:"architectures,omitempty"`
|
||||
ArchMap []Architecture `json:"archMap,omitempty"`
|
||||
Syscalls []*Syscall `json:"syscalls"`
|
||||
}
|
||||
|
||||
// Architecture is used to represent a specific architecture
|
||||
// and its sub-architectures
|
||||
type Architecture struct {
|
||||
Arch Arch `json:"architecture"`
|
||||
SubArches []Arch `json:"subArchitectures"`
|
||||
}
|
||||
|
||||
// Arch used for architectures
|
||||
type Arch string
|
||||
|
||||
// Additional architectures permitted to be used for system calls
|
||||
// By default only the native architecture of the kernel is permitted
|
||||
const (
|
||||
ArchX86 Arch = "SCMP_ARCH_X86"
|
||||
ArchX86_64 Arch = "SCMP_ARCH_X86_64"
|
||||
ArchX32 Arch = "SCMP_ARCH_X32"
|
||||
ArchARM Arch = "SCMP_ARCH_ARM"
|
||||
ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
|
||||
ArchMIPS Arch = "SCMP_ARCH_MIPS"
|
||||
ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
|
||||
ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
|
||||
ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
|
||||
ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
|
||||
ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
|
||||
ArchPPC Arch = "SCMP_ARCH_PPC"
|
||||
ArchPPC64 Arch = "SCMP_ARCH_PPC64"
|
||||
ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
|
||||
ArchS390 Arch = "SCMP_ARCH_S390"
|
||||
ArchS390X Arch = "SCMP_ARCH_S390X"
|
||||
)
|
||||
|
||||
// Action taken upon Seccomp rule match
|
||||
type Action string
|
||||
|
||||
// Define actions for Seccomp rules
|
||||
const (
|
||||
ActKill Action = "SCMP_ACT_KILL"
|
||||
ActTrap Action = "SCMP_ACT_TRAP"
|
||||
ActErrno Action = "SCMP_ACT_ERRNO"
|
||||
ActTrace Action = "SCMP_ACT_TRACE"
|
||||
ActAllow Action = "SCMP_ACT_ALLOW"
|
||||
)
|
||||
|
||||
// Operator used to match syscall arguments in Seccomp
|
||||
type Operator string
|
||||
|
||||
// Define operators for syscall arguments in Seccomp
|
||||
const (
|
||||
OpNotEqual Operator = "SCMP_CMP_NE"
|
||||
OpLessThan Operator = "SCMP_CMP_LT"
|
||||
OpLessEqual Operator = "SCMP_CMP_LE"
|
||||
OpEqualTo Operator = "SCMP_CMP_EQ"
|
||||
OpGreaterEqual Operator = "SCMP_CMP_GE"
|
||||
OpGreaterThan Operator = "SCMP_CMP_GT"
|
||||
OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
|
||||
)
|
||||
|
||||
// Arg used for matching specific syscall arguments in Seccomp
|
||||
type Arg struct {
|
||||
Index uint `json:"index"`
|
||||
Value uint64 `json:"value"`
|
||||
ValueTwo uint64 `json:"valueTwo"`
|
||||
Op Operator `json:"op"`
|
||||
}
|
||||
|
||||
// Filter is used to conditionally apply Seccomp rules
|
||||
type Filter struct {
|
||||
Caps []string `json:"caps,omitempty"`
|
||||
Arches []string `json:"arches,omitempty"`
|
||||
MinKernel string `json:"minKernel,omitempty"`
|
||||
}
|
||||
|
||||
// Syscall is used to match a group of syscalls in Seccomp
|
||||
type Syscall struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Names []string `json:"names,omitempty"`
|
||||
Action Action `json:"action"`
|
||||
Args []*Arg `json:"args"`
|
||||
Comment string `json:"comment"`
|
||||
Includes Filter `json:"includes"`
|
||||
Excludes Filter `json:"excludes"`
|
||||
}
|
@ -0,0 +1,6 @@
|
||||
package system
|
||||
|
||||
import "os"
|
||||
|
||||
// EnsureRemoveAll is an alias to os.RemoveAll on Windows
|
||||
var EnsureRemoveAll = os.RemoveAll
|
@ -1,3 +1,5 @@
|
||||
// +build freebsd netbsd
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import "syscall"
|
@ -0,0 +1,13 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import "syscall"
|
||||
|
||||
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
|
||||
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
|
||||
return &StatT{size: s.Size,
|
||||
mode: s.Mode,
|
||||
uid: s.Uid,
|
||||
gid: s.Gid,
|
||||
rdev: s.Rdev,
|
||||
mtim: s.Mtim}, nil
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
package tarsum // import "github.com/docker/docker/pkg/tarsum"
|
||||
|
||||
// BuilderContext is an interface extending TarSum by adding the Remove method.
|
||||
// In general there was concern about adding this method to TarSum itself
|
||||
// so instead it is being added just to "BuilderContext" which will then
|
||||
// only be used during the .dockerignore file processing
|
||||
// - see builder/evaluator.go
|
||||
type BuilderContext interface {
|
||||
TarSum
|
||||
Remove(string)
|
||||
}
|
||||
|
||||
func (bc *tarSum) Remove(filename string) {
|
||||
for i, fis := range bc.sums {
|
||||
if fis.Name() == filename {
|
||||
bc.sums = append(bc.sums[:i], bc.sums[i+1:]...)
|
||||
// Note, we don't just return because there could be
|
||||
// more than one with this name
|
||||
}
|
||||
}
|
||||
}
|
@ -1,133 +0,0 @@
|
||||
package tarsum // import "github.com/docker/docker/pkg/tarsum"
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FileInfoSumInterface provides an interface for accessing file checksum
|
||||
// information within a tar file. This info is accessed through interface
|
||||
// so the actual name and sum cannot be melded with.
|
||||
type FileInfoSumInterface interface {
|
||||
// File name
|
||||
Name() string
|
||||
// Checksum of this particular file and its headers
|
||||
Sum() string
|
||||
// Position of file in the tar
|
||||
Pos() int64
|
||||
}
|
||||
|
||||
type fileInfoSum struct {
|
||||
name string
|
||||
sum string
|
||||
pos int64
|
||||
}
|
||||
|
||||
func (fis fileInfoSum) Name() string {
|
||||
return fis.name
|
||||
}
|
||||
func (fis fileInfoSum) Sum() string {
|
||||
return fis.sum
|
||||
}
|
||||
func (fis fileInfoSum) Pos() int64 {
|
||||
return fis.pos
|
||||
}
|
||||
|
||||
// FileInfoSums provides a list of FileInfoSumInterfaces.
|
||||
type FileInfoSums []FileInfoSumInterface
|
||||
|
||||
// GetFile returns the first FileInfoSumInterface with a matching name.
|
||||
func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface {
|
||||
// We do case insensitive matching on Windows as c:\APP and c:\app are
|
||||
// the same. See issue #33107.
|
||||
for i := range fis {
|
||||
if (runtime.GOOS == "windows" && strings.EqualFold(fis[i].Name(), name)) ||
|
||||
(runtime.GOOS != "windows" && fis[i].Name() == name) {
|
||||
return fis[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAllFile returns a FileInfoSums with all matching names.
|
||||
func (fis FileInfoSums) GetAllFile(name string) FileInfoSums {
|
||||
f := FileInfoSums{}
|
||||
for i := range fis {
|
||||
if fis[i].Name() == name {
|
||||
f = append(f, fis[i])
|
||||
}
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// GetDuplicatePaths returns a FileInfoSums with all duplicated paths.
|
||||
func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) {
|
||||
seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map.
|
||||
for i := range fis {
|
||||
f := fis[i]
|
||||
if _, ok := seen[f.Name()]; ok {
|
||||
dups = append(dups, f)
|
||||
} else {
|
||||
seen[f.Name()] = 0
|
||||
}
|
||||
}
|
||||
return dups
|
||||
}
|
||||
|
||||
// Len returns the size of the FileInfoSums.
|
||||
func (fis FileInfoSums) Len() int { return len(fis) }
|
||||
|
||||
// Swap swaps two FileInfoSum values if a FileInfoSums list.
|
||||
func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] }
|
||||
|
||||
// SortByPos sorts FileInfoSums content by position.
|
||||
func (fis FileInfoSums) SortByPos() {
|
||||
sort.Sort(byPos{fis})
|
||||
}
|
||||
|
||||
// SortByNames sorts FileInfoSums content by name.
|
||||
func (fis FileInfoSums) SortByNames() {
|
||||
sort.Sort(byName{fis})
|
||||
}
|
||||
|
||||
// SortBySums sorts FileInfoSums content by sums.
|
||||
func (fis FileInfoSums) SortBySums() {
|
||||
dups := fis.GetDuplicatePaths()
|
||||
if len(dups) > 0 {
|
||||
sort.Sort(bySum{fis, dups})
|
||||
} else {
|
||||
sort.Sort(bySum{fis, nil})
|
||||
}
|
||||
}
|
||||
|
||||
// byName is a sort.Sort helper for sorting by file names.
|
||||
// If names are the same, order them by their appearance in the tar archive
|
||||
type byName struct{ FileInfoSums }
|
||||
|
||||
func (bn byName) Less(i, j int) bool {
|
||||
if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() {
|
||||
return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos()
|
||||
}
|
||||
return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name()
|
||||
}
|
||||
|
||||
// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive
|
||||
type bySum struct {
|
||||
FileInfoSums
|
||||
dups FileInfoSums
|
||||
}
|
||||
|
||||
func (bs bySum) Less(i, j int) bool {
|
||||
if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() {
|
||||
return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos()
|
||||
}
|
||||
return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum()
|
||||
}
|
||||
|
||||
// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order
|
||||
type byPos struct{ FileInfoSums }
|
||||
|
||||
func (bp byPos) Less(i, j int) bool {
|
||||
return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos()
|
||||
}
|
@ -1,301 +0,0 @@
|
||||
// Package tarsum provides algorithms to perform checksum calculation on
|
||||
// filesystem layers.
|
||||
//
|
||||
// The transportation of filesystems, regarding Docker, is done with tar(1)
|
||||
// archives. There are a variety of tar serialization formats [2], and a key
|
||||
// concern here is ensuring a repeatable checksum given a set of inputs from a
|
||||
// generic tar archive. Types of transportation include distribution to and from a
|
||||
// registry endpoint, saving and loading through commands or Docker daemon APIs,
|
||||
// transferring the build context from client to Docker daemon, and committing the
|
||||
// filesystem of a container to become an image.
|
||||
//
|
||||
// As tar archives are used for transit, but not preserved in many situations, the
|
||||
// focus of the algorithm is to ensure the integrity of the preserved filesystem,
|
||||
// while maintaining a deterministic accountability. This includes neither
|
||||
// constraining the ordering or manipulation of the files during the creation or
|
||||
// unpacking of the archive, nor include additional metadata state about the file
|
||||
// system attributes.
|
||||
package tarsum // import "github.com/docker/docker/pkg/tarsum"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
buf8K = 8 * 1024
|
||||
buf16K = 16 * 1024
|
||||
buf32K = 32 * 1024
|
||||
)
|
||||
|
||||
// NewTarSum creates a new interface for calculating a fixed time checksum of a
|
||||
// tar archive.
|
||||
//
|
||||
// This is used for calculating checksums of layers of an image, in some cases
|
||||
// including the byte payload of the image's json metadata as well, and for
|
||||
// calculating the checksums for buildcache.
|
||||
func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) {
|
||||
return NewTarSumHash(r, dc, v, DefaultTHash)
|
||||
}
|
||||
|
||||
// NewTarSumHash creates a new TarSum, providing a THash to use rather than
|
||||
// the DefaultTHash.
|
||||
func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) {
|
||||
headerSelector, err := getTarHeaderSelector(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash}
|
||||
err = ts.initTarSum()
|
||||
return ts, err
|
||||
}
|
||||
|
||||
// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label.
|
||||
func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) {
|
||||
parts := strings.SplitN(label, "+", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}")
|
||||
}
|
||||
|
||||
versionName, hashName := parts[0], parts[1]
|
||||
|
||||
version, ok := tarSumVersionsByName[versionName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown TarSum version name: %q", versionName)
|
||||
}
|
||||
|
||||
hashConfig, ok := standardHashConfigs[hashName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName)
|
||||
}
|
||||
|
||||
tHash := NewTHash(hashConfig.name, hashConfig.hash.New)
|
||||
|
||||
return NewTarSumHash(r, disableCompression, version, tHash)
|
||||
}
|
||||
|
||||
// TarSum is the generic interface for calculating fixed time
|
||||
// checksums of a tar archive.
|
||||
type TarSum interface {
|
||||
io.Reader
|
||||
GetSums() FileInfoSums
|
||||
Sum([]byte) string
|
||||
Version() Version
|
||||
Hash() THash
|
||||
}
|
||||
|
||||
// tarSum struct is the structure for a Version0 checksum calculation.
|
||||
type tarSum struct {
|
||||
io.Reader
|
||||
tarR *tar.Reader
|
||||
tarW *tar.Writer
|
||||
writer writeCloseFlusher
|
||||
bufTar *bytes.Buffer
|
||||
bufWriter *bytes.Buffer
|
||||
bufData []byte
|
||||
h hash.Hash
|
||||
tHash THash
|
||||
sums FileInfoSums
|
||||
fileCounter int64
|
||||
currentFile string
|
||||
finished bool
|
||||
first bool
|
||||
DisableCompression bool // false by default. When false, the output gzip compressed.
|
||||
tarSumVersion Version // this field is not exported so it can not be mutated during use
|
||||
headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive
|
||||
}
|
||||
|
||||
func (ts tarSum) Hash() THash {
|
||||
return ts.tHash
|
||||
}
|
||||
|
||||
func (ts tarSum) Version() Version {
|
||||
return ts.tarSumVersion
|
||||
}
|
||||
|
||||
// THash provides a hash.Hash type generator and its name.
|
||||
type THash interface {
|
||||
Hash() hash.Hash
|
||||
Name() string
|
||||
}
|
||||
|
||||
// NewTHash is a convenience method for creating a THash.
|
||||
func NewTHash(name string, h func() hash.Hash) THash {
|
||||
return simpleTHash{n: name, h: h}
|
||||
}
|
||||
|
||||
type tHashConfig struct {
|
||||
name string
|
||||
hash crypto.Hash
|
||||
}
|
||||
|
||||
var (
|
||||
// NOTE: DO NOT include MD5 or SHA1, which are considered insecure.
|
||||
standardHashConfigs = map[string]tHashConfig{
|
||||
"sha256": {name: "sha256", hash: crypto.SHA256},
|
||||
"sha512": {name: "sha512", hash: crypto.SHA512},
|
||||
}
|
||||
)
|
||||
|
||||
// DefaultTHash is default TarSum hashing algorithm - "sha256".
|
||||
var DefaultTHash = NewTHash("sha256", sha256.New)
|
||||
|
||||
type simpleTHash struct {
|
||||
n string
|
||||
h func() hash.Hash
|
||||
}
|
||||
|
||||
func (sth simpleTHash) Name() string { return sth.n }
|
||||
func (sth simpleTHash) Hash() hash.Hash { return sth.h() }
|
||||
|
||||
func (ts *tarSum) encodeHeader(h *tar.Header) error {
|
||||
for _, elem := range ts.headerSelector.selectHeaders(h) {
|
||||
// Ignore these headers to be compatible with versions
|
||||
// before go 1.10
|
||||
if elem[0] == "gname" || elem[0] == "uname" {
|
||||
elem[1] = ""
|
||||
}
|
||||
if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ts *tarSum) initTarSum() error {
|
||||
ts.bufTar = bytes.NewBuffer([]byte{})
|
||||
ts.bufWriter = bytes.NewBuffer([]byte{})
|
||||
ts.tarR = tar.NewReader(ts.Reader)
|
||||
ts.tarW = tar.NewWriter(ts.bufTar)
|
||||
if !ts.DisableCompression {
|
||||
ts.writer = gzip.NewWriter(ts.bufWriter)
|
||||
} else {
|
||||
ts.writer = &nopCloseFlusher{Writer: ts.bufWriter}
|
||||
}
|
||||
if ts.tHash == nil {
|
||||
ts.tHash = DefaultTHash
|
||||
}
|
||||
ts.h = ts.tHash.Hash()
|
||||
ts.h.Reset()
|
||||
ts.first = true
|
||||
ts.sums = FileInfoSums{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ts *tarSum) Read(buf []byte) (int, error) {
|
||||
if ts.finished {
|
||||
return ts.bufWriter.Read(buf)
|
||||
}
|
||||
if len(ts.bufData) < len(buf) {
|
||||
switch {
|
||||
case len(buf) <= buf8K:
|
||||
ts.bufData = make([]byte, buf8K)
|
||||
case len(buf) <= buf16K:
|
||||
ts.bufData = make([]byte, buf16K)
|
||||
case len(buf) <= buf32K:
|
||||
ts.bufData = make([]byte, buf32K)
|
||||
default:
|
||||
ts.bufData = make([]byte, len(buf))
|
||||
}
|
||||
}
|
||||
buf2 := ts.bufData[:len(buf)]
|
||||
|
||||
n, err := ts.tarR.Read(buf2)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if _, err := ts.h.Write(buf2[:n]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !ts.first {
|
||||
ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter})
|
||||
ts.fileCounter++
|
||||
ts.h.Reset()
|
||||
} else {
|
||||
ts.first = false
|
||||
}
|
||||
|
||||
if _, err := ts.tarW.Write(buf2[:n]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
currentHeader, err := ts.tarR.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if err := ts.tarW.Close(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if _, err := io.Copy(ts.writer, ts.bufTar); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := ts.writer.Close(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
ts.finished = true
|
||||
return ts.bufWriter.Read(buf)
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
||||
ts.currentFile = path.Join(".", path.Join("/", currentHeader.Name))
|
||||
if err := ts.encodeHeader(currentHeader); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := ts.tarW.WriteHeader(currentHeader); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(ts.writer, ts.bufTar); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
ts.writer.Flush()
|
||||
|
||||
return ts.bufWriter.Read(buf)
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Filling the hash buffer
|
||||
if _, err = ts.h.Write(buf2[:n]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Filling the tar writer
|
||||
if _, err = ts.tarW.Write(buf2[:n]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Filling the output writer
|
||||
if _, err = io.Copy(ts.writer, ts.bufTar); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
ts.writer.Flush()
|
||||
|
||||
return ts.bufWriter.Read(buf)
|
||||
}
|
||||
|
||||
func (ts *tarSum) Sum(extra []byte) string {
|
||||
ts.sums.SortBySums()
|
||||
h := ts.tHash.Hash()
|
||||
if extra != nil {
|
||||
h.Write(extra)
|
||||
}
|
||||
for _, fis := range ts.sums {
|
||||
h.Write([]byte(fis.Sum()))
|
||||
}
|
||||
checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil))
|
||||
return checksum
|
||||
}
|
||||
|
||||
func (ts *tarSum) GetSums() FileInfoSums {
|
||||
return ts.sums
|
||||
}
|
@ -1,230 +0,0 @@
|
||||
page_title: TarSum checksum specification
|
||||
page_description: Documentation for algorithms used in the TarSum checksum calculation
|
||||
page_keywords: docker, checksum, validation, tarsum
|
||||
|
||||
# TarSum Checksum Specification
|
||||
|
||||
## Abstract
|
||||
|
||||
This document describes the algorithms used in performing the TarSum checksum
|
||||
calculation on filesystem layers, the need for this method over existing
|
||||
methods, and the versioning of this calculation.
|
||||
|
||||
## Warning
|
||||
|
||||
This checksum algorithm is for best-effort comparison of file trees with fuzzy logic.
|
||||
|
||||
This is _not_ a cryptographic attestation, and should not be considered secure.
|
||||
|
||||
## Introduction
|
||||
|
||||
The transportation of filesystems, regarding Docker, is done with tar(1)
|
||||
archives. There are a variety of tar serialization formats [2], and a key
|
||||
concern here is ensuring a repeatable checksum given a set of inputs from a
|
||||
generic tar archive. Types of transportation include distribution to and from a
|
||||
registry endpoint, saving and loading through commands or Docker daemon APIs,
|
||||
transferring the build context from client to Docker daemon, and committing the
|
||||
filesystem of a container to become an image.
|
||||
|
||||
As tar archives are used for transit, but not preserved in many situations, the
|
||||
focus of the algorithm is to ensure the integrity of the preserved filesystem,
|
||||
while maintaining a deterministic accountability. This includes neither
|
||||
constraining the ordering or manipulation of the files during the creation or
|
||||
unpacking of the archive, nor include additional metadata state about the file
|
||||
system attributes.
|
||||
|
||||
## Intended Audience
|
||||
|
||||
This document is outlining the methods used for consistent checksum calculation
|
||||
for filesystems transported via tar archives.
|
||||
|
||||
Auditing these methodologies is an open and iterative process. This document
|
||||
should accommodate the review of source code. Ultimately, this document should
|
||||
be the starting point of further refinements to the algorithm and its future
|
||||
versions.
|
||||
|
||||
## Concept
|
||||
|
||||
The checksum mechanism must ensure the integrity and assurance of the
|
||||
filesystem payload.
|
||||
|
||||
## Checksum Algorithm Profile
|
||||
|
||||
A checksum mechanism must define the following operations and attributes:
|
||||
|
||||
* Associated hashing cipher - used to checksum each file payload and attribute
|
||||
information.
|
||||
* Checksum list - each file of the filesystem archive has its checksum
|
||||
calculated from the payload and attributes of the file. The final checksum is
|
||||
calculated from this list, with specific ordering.
|
||||
* Version - as the algorithm adapts to requirements, there are behaviors of the
|
||||
algorithm to manage by versioning.
|
||||
* Archive being calculated - the tar archive having its checksum calculated
|
||||
|
||||
## Elements of TarSum checksum
|
||||
|
||||
The calculated sum output is a text string. The elements included in the output
|
||||
of the calculated sum comprise the information needed for validation of the sum
|
||||
(TarSum version and hashing cipher used) and the expected checksum in hexadecimal
|
||||
form.
|
||||
|
||||
There are two delimiters used:
|
||||
* '+' separates TarSum version from hashing cipher
|
||||
* ':' separates calculation mechanics from expected hash
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
"tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e"
|
||||
| | \ |
|
||||
| | \ |
|
||||
|_version_|_cipher__|__ |
|
||||
| \ |
|
||||
|_calculation_mechanics_|______________________expected_sum_______________________|
|
||||
```
|
||||
|
||||
## Versioning
|
||||
|
||||
Versioning was introduced [0] to accommodate differences in calculation needed,
|
||||
and ability to maintain reverse compatibility.
|
||||
|
||||
The general algorithm will be describe further in the 'Calculation'.
|
||||
|
||||
### Version0
|
||||
|
||||
This is the initial version of TarSum.
|
||||
|
||||
Its element in the TarSum checksum string is `tarsum`.
|
||||
|
||||
### Version1
|
||||
|
||||
Its element in the TarSum checksum is `tarsum.v1`.
|
||||
|
||||
The notable changes in this version:
|
||||
* Exclusion of file `mtime` from the file information headers, in each file
|
||||
checksum calculation
|
||||
* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax
|
||||
tar file info headers) keys and values in each file checksum calculation
|
||||
|
||||
### VersionDev
|
||||
|
||||
*Do not use unless validating refinements to the checksum algorithm*
|
||||
|
||||
Its element in the TarSum checksum is `tarsum.dev`.
|
||||
|
||||
This is a floating place holder for a next version and grounds for testing
|
||||
changes. The methods used for calculation are subject to change without notice,
|
||||
and this version is for testing and not for production use.
|
||||
|
||||
## Ciphers
|
||||
|
||||
The official default and standard hashing cipher used in the calculation mechanic
|
||||
is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4.
|
||||
|
||||
Though the TarSum algorithm itself is not exclusively bound to the single
|
||||
hashing cipher `sha256`, support for alternate hashing ciphers was later added
|
||||
[1]. Use cases for alternate cipher could include future-proofing TarSum
|
||||
checksum format and using faster cipher hashes for tar filesystem checksums.
|
||||
|
||||
## Calculation
|
||||
|
||||
### Requirement
|
||||
|
||||
As mentioned earlier, the calculation is such that it takes into consideration
|
||||
the lifecycle of the tar archive. In that the tar archive is not an immutable,
|
||||
permanent artifact. Otherwise options like relying on a known hashing cipher
|
||||
checksum of the archive itself would be reliable enough. The tar archive of the
|
||||
filesystem is used as a transportation medium for Docker images, and the
|
||||
archive is discarded once its contents are extracted. Therefore, for consistent
|
||||
validation items such as order of files in the tar archive and time stamps are
|
||||
subject to change once an image is received.
|
||||
|
||||
### Process
|
||||
|
||||
The method is typically iterative due to reading tar info headers from the
|
||||
archive stream, though this is not a strict requirement.
|
||||
|
||||
#### Files
|
||||
|
||||
Each file in the tar archive have their contents (headers and body) checksummed
|
||||
individually using the designated associated hashing cipher. The ordered
|
||||
headers of the file are written to the checksum calculation first, and then the
|
||||
payload of the file body.
|
||||
|
||||
The resulting checksum of the file is appended to the list of file sums. The
|
||||
sum is encoded as a string of the hexadecimal digest. Additionally, the file
|
||||
name and position in the archive is kept as reference for special ordering.
|
||||
|
||||
#### Headers
|
||||
|
||||
The following headers are read, in this
|
||||
order ( and the corresponding representation of its value):
|
||||
* 'name' - string
|
||||
* 'mode' - string of the base10 integer
|
||||
* 'uid' - string of the integer
|
||||
* 'gid' - string of the integer
|
||||
* 'size' - string of the integer
|
||||
* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC
|
||||
* 'typeflag' - string of the char
|
||||
* 'linkname' - string
|
||||
* 'uname' - string
|
||||
* 'gname' - string
|
||||
* 'devmajor' - string of the integer
|
||||
* 'devminor' - string of the integer
|
||||
|
||||
For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax
|
||||
headers) included after the above list. These xattrs key/values are first
|
||||
sorted by the keys.
|
||||
|
||||
#### Header Format
|
||||
|
||||
The ordered headers are written to the hash in the format of
|
||||
|
||||
"{.key}{.value}"
|
||||
|
||||
with no newline.
|
||||
|
||||
#### Body
|
||||
|
||||
After the order headers of the file have been added to the checksum for the
|
||||
file, the body of the file is written to the hash.
|
||||
|
||||
#### List of file sums
|
||||
|
||||
The list of file sums is sorted by the string of the hexadecimal digest.
|
||||
|
||||
If there are two files in the tar with matching paths, the order of occurrence
|
||||
for that path is reflected for the sums of the corresponding file header and
|
||||
body.
|
||||
|
||||
#### Final Checksum
|
||||
|
||||
Begin with a fresh or initial state of the associated hash cipher. If there is
|
||||
additional payload to include in the TarSum calculation for the archive, it is
|
||||
written first. Then each checksum from the ordered list of file sums is written
|
||||
to the hash.
|
||||
|
||||
The resulting digest is formatted per the Elements of TarSum checksum,
|
||||
including the TarSum version, the associated hash cipher and the hexadecimal
|
||||
encoded checksum digest.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
The initial version of TarSum has undergone one update that could invalidate
|
||||
handcrafted tar archives. The tar archive format supports appending of files
|
||||
with same names as prior files in the archive. The latter file will clobber the
|
||||
prior file of the same path. Due to this the algorithm now accounts for files
|
||||
with matching paths, and orders the list of file sums accordingly [3].
|
||||
|
||||
## Footnotes
|
||||
|
||||
* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0
|
||||
* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e
|
||||
* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29
|
||||
* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31
|
||||
|
||||
## Acknowledgments
|
||||
|
||||
Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the
|
||||
TarSum calculation.
|
||||
|
@ -1,158 +0,0 @@
|
||||
package tarsum // import "github.com/docker/docker/pkg/tarsum"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"io"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Version is used for versioning of the TarSum algorithm
|
||||
// based on the prefix of the hash used
|
||||
// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"
|
||||
type Version int
|
||||
|
||||
// Prefix of "tarsum"
|
||||
const (
|
||||
Version0 Version = iota
|
||||
Version1
|
||||
// VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation
|
||||
VersionDev
|
||||
)
|
||||
|
||||
// WriteV1Header writes a tar header to a writer in V1 tarsum format.
|
||||
func WriteV1Header(h *tar.Header, w io.Writer) {
|
||||
for _, elem := range v1TarHeaderSelect(h) {
|
||||
w.Write([]byte(elem[0] + elem[1]))
|
||||
}
|
||||
}
|
||||
|
||||
// VersionLabelForChecksum returns the label for the given tarsum
|
||||
// checksum, i.e., everything before the first `+` character in
|
||||
// the string or an empty string if no label separator is found.
|
||||
func VersionLabelForChecksum(checksum string) string {
|
||||
// Checksums are in the form: {versionLabel}+{hashID}:{hex}
|
||||
sepIndex := strings.Index(checksum, "+")
|
||||
if sepIndex < 0 {
|
||||
return ""
|
||||
}
|
||||
return checksum[:sepIndex]
|
||||
}
|
||||
|
||||
// GetVersions gets a list of all known tarsum versions.
|
||||
func GetVersions() []Version {
|
||||
v := []Version{}
|
||||
for k := range tarSumVersions {
|
||||
v = append(v, k)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
var (
|
||||
tarSumVersions = map[Version]string{
|
||||
Version0: "tarsum",
|
||||
Version1: "tarsum.v1",
|
||||
VersionDev: "tarsum.dev",
|
||||
}
|
||||
tarSumVersionsByName = map[string]Version{
|
||||
"tarsum": Version0,
|
||||
"tarsum.v1": Version1,
|
||||
"tarsum.dev": VersionDev,
|
||||
}
|
||||
)
|
||||
|
||||
func (tsv Version) String() string {
|
||||
return tarSumVersions[tsv]
|
||||
}
|
||||
|
||||
// GetVersionFromTarsum returns the Version from the provided string.
|
||||
func GetVersionFromTarsum(tarsum string) (Version, error) {
|
||||
tsv := tarsum
|
||||
if strings.Contains(tarsum, "+") {
|
||||
tsv = strings.SplitN(tarsum, "+", 2)[0]
|
||||
}
|
||||
for v, s := range tarSumVersions {
|
||||
if s == tsv {
|
||||
return v, nil
|
||||
}
|
||||
}
|
||||
return -1, ErrNotVersion
|
||||
}
|
||||
|
||||
// Errors that may be returned by functions in this package
|
||||
var (
|
||||
ErrNotVersion = errors.New("string does not include a TarSum Version")
|
||||
ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented")
|
||||
)
|
||||
|
||||
// tarHeaderSelector is the interface which different versions
|
||||
// of tarsum should use for selecting and ordering tar headers
|
||||
// for each item in the archive.
|
||||
type tarHeaderSelector interface {
|
||||
selectHeaders(h *tar.Header) (orderedHeaders [][2]string)
|
||||
}
|
||||
|
||||
type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string)
|
||||
|
||||
func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) {
|
||||
return f(h)
|
||||
}
|
||||
|
||||
func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
|
||||
return [][2]string{
|
||||
{"name", h.Name},
|
||||
{"mode", strconv.FormatInt(h.Mode, 10)},
|
||||
{"uid", strconv.Itoa(h.Uid)},
|
||||
{"gid", strconv.Itoa(h.Gid)},
|
||||
{"size", strconv.FormatInt(h.Size, 10)},
|
||||
{"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)},
|
||||
{"typeflag", string([]byte{h.Typeflag})},
|
||||
{"linkname", h.Linkname},
|
||||
{"uname", h.Uname},
|
||||
{"gname", h.Gname},
|
||||
{"devmajor", strconv.FormatInt(h.Devmajor, 10)},
|
||||
{"devminor", strconv.FormatInt(h.Devminor, 10)},
|
||||
}
|
||||
}
|
||||
|
||||
func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
|
||||
// Get extended attributes.
|
||||
xAttrKeys := make([]string, len(h.Xattrs))
|
||||
for k := range h.Xattrs {
|
||||
xAttrKeys = append(xAttrKeys, k)
|
||||
}
|
||||
sort.Strings(xAttrKeys)
|
||||
|
||||
// Make the slice with enough capacity to hold the 11 basic headers
|
||||
// we want from the v0 selector plus however many xattrs we have.
|
||||
orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys))
|
||||
|
||||
// Copy all headers from v0 excluding the 'mtime' header (the 5th element).
|
||||
v0headers := v0TarHeaderSelect(h)
|
||||
orderedHeaders = append(orderedHeaders, v0headers[0:5]...)
|
||||
orderedHeaders = append(orderedHeaders, v0headers[6:]...)
|
||||
|
||||
// Finally, append the sorted xattrs.
|
||||
for _, k := range xAttrKeys {
|
||||
orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]})
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{
|
||||
Version0: v0TarHeaderSelect,
|
||||
Version1: v1TarHeaderSelect,
|
||||
VersionDev: v1TarHeaderSelect,
|
||||
}
|
||||
|
||||
func getTarHeaderSelector(v Version) (tarHeaderSelector, error) {
|
||||
headerSelector, ok := registeredHeaderSelectors[v]
|
||||
if !ok {
|
||||
return nil, ErrVersionNotImplemented
|
||||
}
|
||||
|
||||
return headerSelector, nil
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
package tarsum // import "github.com/docker/docker/pkg/tarsum"
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
type writeCloseFlusher interface {
|
||||
io.WriteCloser
|
||||
Flush() error
|
||||
}
|
||||
|
||||
type nopCloseFlusher struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (n *nopCloseFlusher) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *nopCloseFlusher) Flush() error {
|
||||
return nil
|
||||
}
|
@ -1,96 +0,0 @@
|
||||
package resumable // import "github.com/docker/docker/registry/resumable"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type requestReader struct {
|
||||
client *http.Client
|
||||
request *http.Request
|
||||
lastRange int64
|
||||
totalSize int64
|
||||
currentResponse *http.Response
|
||||
failures uint32
|
||||
maxFailures uint32
|
||||
waitDuration time.Duration
|
||||
}
|
||||
|
||||
// NewRequestReader makes it possible to resume reading a request's body transparently
|
||||
// maxfail is the number of times we retry to make requests again (not resumes)
|
||||
// totalsize is the total length of the body; auto detect if not provided
|
||||
func NewRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser {
|
||||
return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, waitDuration: 5 * time.Second}
|
||||
}
|
||||
|
||||
// NewRequestReaderWithInitialResponse makes it possible to resume
|
||||
// reading the body of an already initiated request.
|
||||
func NewRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser {
|
||||
return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse, waitDuration: 5 * time.Second}
|
||||
}
|
||||
|
||||
func (r *requestReader) Read(p []byte) (n int, err error) {
|
||||
if r.client == nil || r.request == nil {
|
||||
return 0, fmt.Errorf("client and request can't be nil")
|
||||
}
|
||||
isFreshRequest := false
|
||||
if r.lastRange != 0 && r.currentResponse == nil {
|
||||
readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize)
|
||||
r.request.Header.Set("Range", readRange)
|
||||
time.Sleep(r.waitDuration)
|
||||
}
|
||||
if r.currentResponse == nil {
|
||||
r.currentResponse, err = r.client.Do(r.request)
|
||||
isFreshRequest = true
|
||||
}
|
||||
if err != nil && r.failures+1 != r.maxFailures {
|
||||
r.cleanUpResponse()
|
||||
r.failures++
|
||||
time.Sleep(time.Duration(r.failures) * r.waitDuration)
|
||||
return 0, nil
|
||||
} else if err != nil {
|
||||
r.cleanUpResponse()
|
||||
return 0, err
|
||||
}
|
||||
if r.currentResponse.StatusCode == http.StatusRequestedRangeNotSatisfiable && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 {
|
||||
r.cleanUpResponse()
|
||||
return 0, io.EOF
|
||||
} else if r.currentResponse.StatusCode != http.StatusPartialContent && r.lastRange != 0 && isFreshRequest {
|
||||
r.cleanUpResponse()
|
||||
return 0, fmt.Errorf("the server doesn't support byte ranges")
|
||||
}
|
||||
if r.totalSize == 0 {
|
||||
r.totalSize = r.currentResponse.ContentLength
|
||||
} else if r.totalSize <= 0 {
|
||||
r.cleanUpResponse()
|
||||
return 0, fmt.Errorf("failed to auto detect content length")
|
||||
}
|
||||
n, err = r.currentResponse.Body.Read(p)
|
||||
r.lastRange += int64(n)
|
||||
if err != nil {
|
||||
r.cleanUpResponse()
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
logrus.Infof("encountered error during pull and clearing it before resume: %s", err)
|
||||
err = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *requestReader) Close() error {
|
||||
r.cleanUpResponse()
|
||||
r.client = nil
|
||||
r.request = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *requestReader) cleanUpResponse() {
|
||||
if r.currentResponse != nil {
|
||||
r.currentResponse.Body.Close()
|
||||
r.currentResponse = nil
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,338 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/struct.proto
|
||||
|
||||
package structpb
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// `NullValue` is a singleton enumeration to represent the null value for the
|
||||
// `Value` type union.
|
||||
//
|
||||
// The JSON representation for `NullValue` is JSON `null`.
|
||||
type NullValue int32
|
||||
|
||||
const (
|
||||
// Null value.
|
||||
NullValue_NULL_VALUE NullValue = 0
|
||||
)
|
||||
|
||||
var NullValue_name = map[int32]string{
|
||||
0: "NULL_VALUE",
|
||||
}
|
||||
|
||||
var NullValue_value = map[string]int32{
|
||||
"NULL_VALUE": 0,
|
||||
}
|
||||
|
||||
func (x NullValue) String() string {
|
||||
return proto.EnumName(NullValue_name, int32(x))
|
||||
}
|
||||
|
||||
func (NullValue) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_df322afd6c9fb402, []int{0}
|
||||
}
|
||||
|
||||
func (NullValue) XXX_WellKnownType() string { return "NullValue" }
|
||||
|
||||
// `Struct` represents a structured data value, consisting of fields
|
||||
// which map to dynamically typed values. In some languages, `Struct`
|
||||
// might be supported by a native representation. For example, in
|
||||
// scripting languages like JS a struct is represented as an
|
||||
// object. The details of that representation are described together
|
||||
// with the proto support for the language.
|
||||
//
|
||||
// The JSON representation for `Struct` is JSON object.
|
||||
type Struct struct {
|
||||
// Unordered map of dynamically typed values.
|
||||
Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Struct) Reset() { *m = Struct{} }
|
||||
func (m *Struct) String() string { return proto.CompactTextString(m) }
|
||||
func (*Struct) ProtoMessage() {}
|
||||
func (*Struct) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_df322afd6c9fb402, []int{0}
|
||||
}
|
||||
|
||||
func (*Struct) XXX_WellKnownType() string { return "Struct" }
|
||||
|
||||
func (m *Struct) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Struct.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Struct) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Struct.Merge(m, src)
|
||||
}
|
||||
func (m *Struct) XXX_Size() int {
|
||||
return xxx_messageInfo_Struct.Size(m)
|
||||
}
|
||||
func (m *Struct) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Struct.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Struct proto.InternalMessageInfo
|
||||
|
||||
func (m *Struct) GetFields() map[string]*Value {
|
||||
if m != nil {
|
||||
return m.Fields
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// `Value` represents a dynamically typed value which can be either
|
||||
// null, a number, a string, a boolean, a recursive struct value, or a
|
||||
// list of values. A producer of value is expected to set one of that
|
||||
// variants, absence of any variant indicates an error.
|
||||
//
|
||||
// The JSON representation for `Value` is JSON value.
|
||||
type Value struct {
|
||||
// The kind of value.
|
||||
//
|
||||
// Types that are valid to be assigned to Kind:
|
||||
// *Value_NullValue
|
||||
// *Value_NumberValue
|
||||
// *Value_StringValue
|
||||
// *Value_BoolValue
|
||||
// *Value_StructValue
|
||||
// *Value_ListValue
|
||||
Kind isValue_Kind `protobuf_oneof:"kind"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Value) Reset() { *m = Value{} }
|
||||
func (m *Value) String() string { return proto.CompactTextString(m) }
|
||||
func (*Value) ProtoMessage() {}
|
||||
func (*Value) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_df322afd6c9fb402, []int{1}
|
||||
}
|
||||
|
||||
func (*Value) XXX_WellKnownType() string { return "Value" }
|
||||
|
||||
func (m *Value) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Value.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Value.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Value) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Value.Merge(m, src)
|
||||
}
|
||||
func (m *Value) XXX_Size() int {
|
||||
return xxx_messageInfo_Value.Size(m)
|
||||
}
|
||||
func (m *Value) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Value.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Value proto.InternalMessageInfo
|
||||
|
||||
type isValue_Kind interface {
|
||||
isValue_Kind()
|
||||
}
|
||||
|
||||
type Value_NullValue struct {
|
||||
NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
|
||||
}
|
||||
|
||||
type Value_NumberValue struct {
|
||||
NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_StringValue struct {
|
||||
StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_BoolValue struct {
|
||||
BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_StructValue struct {
|
||||
StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_ListValue struct {
|
||||
ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*Value_NullValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_NumberValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_StringValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_BoolValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_StructValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_ListValue) isValue_Kind() {}
|
||||
|
||||
func (m *Value) GetKind() isValue_Kind {
|
||||
if m != nil {
|
||||
return m.Kind
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Value) GetNullValue() NullValue {
|
||||
if x, ok := m.GetKind().(*Value_NullValue); ok {
|
||||
return x.NullValue
|
||||
}
|
||||
return NullValue_NULL_VALUE
|
||||
}
|
||||
|
||||
func (m *Value) GetNumberValue() float64 {
|
||||
if x, ok := m.GetKind().(*Value_NumberValue); ok {
|
||||
return x.NumberValue
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Value) GetStringValue() string {
|
||||
if x, ok := m.GetKind().(*Value_StringValue); ok {
|
||||
return x.StringValue
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Value) GetBoolValue() bool {
|
||||
if x, ok := m.GetKind().(*Value_BoolValue); ok {
|
||||
return x.BoolValue
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *Value) GetStructValue() *Struct {
|
||||
if x, ok := m.GetKind().(*Value_StructValue); ok {
|
||||
return x.StructValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Value) GetListValue() *ListValue {
|
||||
if x, ok := m.GetKind().(*Value_ListValue); ok {
|
||||
return x.ListValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||
func (*Value) XXX_OneofWrappers() []interface{} {
|
||||
return []interface{}{
|
||||
(*Value_NullValue)(nil),
|
||||
(*Value_NumberValue)(nil),
|
||||
(*Value_StringValue)(nil),
|
||||
(*Value_BoolValue)(nil),
|
||||
(*Value_StructValue)(nil),
|
||||
(*Value_ListValue)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
// `ListValue` is a wrapper around a repeated field of values.
|
||||
//
|
||||
// The JSON representation for `ListValue` is JSON array.
|
||||
type ListValue struct {
|
||||
// Repeated field of dynamically typed values.
|
||||
Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListValue) Reset() { *m = ListValue{} }
|
||||
func (m *ListValue) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListValue) ProtoMessage() {}
|
||||
func (*ListValue) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_df322afd6c9fb402, []int{2}
|
||||
}
|
||||
|
||||
func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
|
||||
|
||||
func (m *ListValue) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListValue.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ListValue) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListValue.Merge(m, src)
|
||||
}
|
||||
func (m *ListValue) XXX_Size() int {
|
||||
return xxx_messageInfo_ListValue.Size(m)
|
||||
}
|
||||
func (m *ListValue) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListValue.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListValue proto.InternalMessageInfo
|
||||
|
||||
func (m *ListValue) GetValues() []*Value {
|
||||
if m != nil {
|
||||
return m.Values
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
|
||||
proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
|
||||
proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry")
|
||||
proto.RegisterType((*Value)(nil), "google.protobuf.Value")
|
||||
proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402)
|
||||
}
|
||||
|
||||
var fileDescriptor_df322afd6c9fb402 = []byte{
|
||||
// 417 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
|
||||
0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
|
||||
0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
|
||||
0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
|
||||
0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
|
||||
0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
|
||||
0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
|
||||
0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
|
||||
0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
|
||||
0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
|
||||
0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
|
||||
0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
|
||||
0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
|
||||
0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
|
||||
0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
|
||||
0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
|
||||
0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
|
||||
0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
|
||||
0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
|
||||
0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
|
||||
0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
|
||||
0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
|
||||
0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
|
||||
0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
|
||||
0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
|
||||
0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
|
||||
0x00,
|
||||
}
|
@ -0,0 +1,95 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "StructProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// `Struct` represents a structured data value, consisting of fields
|
||||
// which map to dynamically typed values. In some languages, `Struct`
|
||||
// might be supported by a native representation. For example, in
|
||||
// scripting languages like JS a struct is represented as an
|
||||
// object. The details of that representation are described together
|
||||
// with the proto support for the language.
|
||||
//
|
||||
// The JSON representation for `Struct` is JSON object.
|
||||
message Struct {
|
||||
// Unordered map of dynamically typed values.
|
||||
map<string, Value> fields = 1;
|
||||
}
|
||||
|
||||
// `Value` represents a dynamically typed value which can be either
|
||||
// null, a number, a string, a boolean, a recursive struct value, or a
|
||||
// list of values. A producer of value is expected to set one of that
|
||||
// variants, absence of any variant indicates an error.
|
||||
//
|
||||
// The JSON representation for `Value` is JSON value.
|
||||
message Value {
|
||||
// The kind of value.
|
||||
oneof kind {
|
||||
// Represents a null value.
|
||||
NullValue null_value = 1;
|
||||
// Represents a double value.
|
||||
double number_value = 2;
|
||||
// Represents a string value.
|
||||
string string_value = 3;
|
||||
// Represents a boolean value.
|
||||
bool bool_value = 4;
|
||||
// Represents a structured value.
|
||||
Struct struct_value = 5;
|
||||
// Represents a repeated `Value`.
|
||||
ListValue list_value = 6;
|
||||
}
|
||||
}
|
||||
|
||||
// `NullValue` is a singleton enumeration to represent the null value for the
|
||||
// `Value` type union.
|
||||
//
|
||||
// The JSON representation for `NullValue` is JSON `null`.
|
||||
enum NullValue {
|
||||
// Null value.
|
||||
NULL_VALUE = 0;
|
||||
}
|
||||
|
||||
// `ListValue` is a wrapper around a repeated field of values.
|
||||
//
|
||||
// The JSON representation for `ListValue` is JSON array.
|
||||
message ListValue {
|
||||
// Repeated field of dynamically typed values.
|
||||
repeated Value values = 1;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue