Merge pull request #3766 from thaJeztah/gofumpt

format code with gofumpt
This commit is contained in:
Hayley Swimelar 2022-11-04 12:19:53 +01:00 committed by GitHub
commit 52d948a9f5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
107 changed files with 404 additions and 483 deletions

View File

@ -62,7 +62,6 @@ func main() {
if flag.NArg() > 0 { if flag.NArg() > 0 {
for _, path := range flag.Args() { for _, path := range flag.Args() {
fp, err := os.Open(path) fp, err := os.Open(path)
if err != nil { if err != nil {
log.Printf("%s: %v", path, err) log.Printf("%s: %v", path, err)
fail = true fail = true

View File

@ -27,7 +27,6 @@ import (
var spaceRegex = regexp.MustCompile(`\n\s*`) var spaceRegex = regexp.MustCompile(`\n\s*`)
func main() { func main() {
if len(os.Args) != 2 { if len(os.Args) != 2 {
log.Fatalln("please specify a template to execute.") log.Fatalln("please specify a template to execute.")
} }
@ -127,5 +126,4 @@ end:
} }
return output return output
} }

View File

@ -589,7 +589,7 @@ type Events struct {
IncludeReferences bool `yaml:"includereferences"` // include reference data in manifest events IncludeReferences bool `yaml:"includereferences"` // include reference data in manifest events
} }
//Ignore configures mediaTypes and actions of the event, that it won't be propagated // Ignore configures mediaTypes and actions of the event, that it won't be propagated
type Ignore struct { type Ignore struct {
MediaTypes []string `yaml:"mediatypes"` // target media types to ignore MediaTypes []string `yaml:"mediatypes"` // target media types to ignore
Actions []string `yaml:"actions"` // ignore action types Actions []string `yaml:"actions"` // ignore action types

View File

@ -360,7 +360,6 @@ func (suite *ConfigSuite) TestParseInvalidLoglevel(c *C) {
_, err = Parse(bytes.NewReader([]byte(configYamlV0_1))) _, err = Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, NotNil) c.Assert(err, NotNil)
} }
// TestParseWithDifferentEnvReporting validates that environment variables // TestParseWithDifferentEnvReporting validates that environment variables

View File

@ -1,3 +1,4 @@
//go:build gofuzz
// +build gofuzz // +build gofuzz
package configuration package configuration

View File

@ -15,7 +15,7 @@
// The above will store the version in the context and will be available to // The above will store the version in the context and will be available to
// the logger. // the logger.
// //
// Logging // # Logging
// //
// The most useful aspect of this package is GetLogger. This function takes // The most useful aspect of this package is GetLogger. This function takes
// any context.Context interface and returns the current logger from the // any context.Context interface and returns the current logger from the
@ -65,7 +65,7 @@
// added to the request context, is unique to that context and can have // added to the request context, is unique to that context and can have
// request scoped variables. // request scoped variables.
// //
// HTTP Requests // # HTTP Requests
// //
// This package also contains several methods for working with http requests. // This package also contains several methods for working with http requests.
// The concepts are very similar to those described above. We simply place the // The concepts are very similar to those described above. We simply place the

View File

@ -20,9 +20,7 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
var ( var enforceRepoClass bool
enforceRepoClass bool
)
func main() { func main() {
var ( var (
@ -110,7 +108,6 @@ func main() {
if err != nil { if err != nil {
logrus.Infof("Error serving: %v", err) logrus.Infof("Error serving: %v", err)
} }
} }
// handlerWithContext wraps the given context-aware handler by setting up the // handlerWithContext wraps the given context-aware handler by setting up the

View File

@ -5,11 +5,10 @@ import (
"crypto/rsa" "crypto/rsa"
"encoding/base64" "encoding/base64"
"errors" "errors"
"strings"
"testing" "testing"
"time" "time"
"strings"
"github.com/distribution/distribution/v3/registry/auth" "github.com/distribution/distribution/v3/registry/auth"
"github.com/docker/libtrust" "github.com/docker/libtrust"
) )
@ -49,7 +48,6 @@ func TestCreateJWTSuccessWithEmptyACL(t *testing.T) {
if !strings.Contains(json, "test") { if !strings.Contains(json, "test") {
t.Fatal("Valid token was not generated.") t.Fatal("Valid token was not generated.")
} }
} }
func decodeJWT(rawToken string) (string, error) { func decodeJWT(rawToken string) (string, error) {
@ -74,7 +72,7 @@ func joseBase64Decode(s string) (string, error) {
} }
data, err := base64.StdEncoding.DecodeString(s) data, err := base64.StdEncoding.DecodeString(s)
if err != nil { if err != nil {
return "", err //errors.New("Error in Decoding base64 String") return "", err // errors.New("Error in Decoding base64 String")
} }
return string(data), nil return string(data), nil
} }

View File

@ -187,7 +187,6 @@ func TestAll(t *testing.T) {
t.Fatalf("Missing element at position %d: %s", i, dgst) t.Fatalf("Missing element at position %d: %s", i, dgst)
} }
} }
} }
func assertEqualShort(t *testing.T, actual, expected string) { func assertEqualShort(t *testing.T, actual, expected string) {
@ -363,9 +362,11 @@ func BenchmarkLookup1000(b *testing.B) {
func BenchmarkShortCode10(b *testing.B) { func BenchmarkShortCode10(b *testing.B) {
benchShortCodeNTable(b, 10, 12) benchShortCodeNTable(b, 10, 12)
} }
func BenchmarkShortCode100(b *testing.B) { func BenchmarkShortCode100(b *testing.B) {
benchShortCodeNTable(b, 100, 12) benchShortCodeNTable(b, 100, 12)
} }
func BenchmarkShortCode1000(b *testing.B) { func BenchmarkShortCode1000(b *testing.B) {
benchShortCodeNTable(b, 1000, 12) benchShortCodeNTable(b, 1000, 12)
} }

View File

@ -7,9 +7,7 @@ import (
"github.com/distribution/distribution/v3/health" "github.com/distribution/distribution/v3/health"
) )
var ( var updater = health.NewStatusUpdater()
updater = health.NewStatusUpdater()
)
// DownHandler registers a manual_http_status that always returns an Error // DownHandler registers a manual_http_status that always returns an Error
func DownHandler(w http.ResponseWriter, r *http.Request) { func DownHandler(w http.ResponseWriter, r *http.Request) {

View File

@ -13,7 +13,7 @@
// particularly useful for checks that verify upstream connectivity or // particularly useful for checks that verify upstream connectivity or
// database status, since they might take a long time to return/timeout. // database status, since they might take a long time to return/timeout.
// //
// Installing // # Installing
// //
// To install health, just import it in your application: // To install health, just import it in your application:
// //
@ -35,7 +35,7 @@
// After importing these packages to your main application, you can start // After importing these packages to your main application, you can start
// registering checks. // registering checks.
// //
// Registering Checks // # Registering Checks
// //
// The recommended way of registering checks is using a periodic Check. // The recommended way of registering checks is using a periodic Check.
// PeriodicChecks run on a certain schedule and asynchronously update the // PeriodicChecks run on a certain schedule and asynchronously update the
@ -84,7 +84,7 @@
// return Errors.new("This is an error!") // return Errors.new("This is an error!")
// })) // }))
// //
// Examples // # Examples
// //
// You could also use the health checker mechanism to ensure your application // You could also use the health checker mechanism to ensure your application
// only comes up if certain conditions are met, or to allow the developer to // only comes up if certain conditions are met, or to allow the developer to

View File

@ -11,14 +11,12 @@ import (
v1 "github.com/opencontainers/image-spec/specs-go/v1" v1 "github.com/opencontainers/image-spec/specs-go/v1"
) )
var ( // SchemaVersion provides a pre-initialized version structure for this
// SchemaVersion provides a pre-initialized version structure for this // packages version of the manifest.
// packages version of the manifest. var SchemaVersion = manifest.Versioned{
SchemaVersion = manifest.Versioned{
SchemaVersion: 2, // historical value here.. does not pertain to OCI or docker version SchemaVersion: 2, // historical value here.. does not pertain to OCI or docker version
MediaType: v1.MediaTypeImageManifest, MediaType: v1.MediaTypeImageManifest,
} }
)
func init() { func init() {
ocischemaFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { ocischemaFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {

View File

@ -20,13 +20,11 @@ const (
MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar"
) )
var ( // SchemaVersion provides a pre-initialized version structure for this
// SchemaVersion provides a pre-initialized version structure for this // packages version of the manifest.
// packages version of the manifest. var SchemaVersion = manifest.Versioned{
SchemaVersion = manifest.Versioned{
SchemaVersion: 1, SchemaVersion: 1,
} }
)
func init() { func init() {
schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
@ -149,7 +147,6 @@ func (sm SignedManifest) References() []distribution.Descriptor {
} }
return dependencies return dependencies
} }
// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner // MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner

View File

@ -42,7 +42,6 @@ func TestManifestUnmarshaling(t *testing.T) {
if !reflect.DeepEqual(&signed, env.signed) { if !reflect.DeepEqual(&signed, env.signed) {
t.Fatalf("manifests are different after unmarshaling: %v != %v", signed, env.signed) t.Fatalf("manifests are different after unmarshaling: %v != %v", signed, env.signed)
} }
} }
func TestManifestVerification(t *testing.T) { func TestManifestVerification(t *testing.T) {

View File

@ -65,7 +65,6 @@ func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable)
mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...)
mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...)
return nil return nil
} }
// References returns the current references added to this builder // References returns the current references added to this builder

View File

@ -33,14 +33,12 @@ const (
MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar" MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
) )
var ( // SchemaVersion provides a pre-initialized version structure for this
// SchemaVersion provides a pre-initialized version structure for this // packages version of the manifest.
// packages version of the manifest. var SchemaVersion = manifest.Versioned{
SchemaVersion = manifest.Versioned{
SchemaVersion: 2, SchemaVersion: 2,
MediaType: MediaTypeManifest, MediaType: MediaTypeManifest,
} }
)
func init() { func init() {
schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
@ -119,7 +117,6 @@ func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
if manifest.MediaType != MediaTypeManifest { if manifest.MediaType != MediaTypeManifest {
return fmt.Errorf("mediaType in manifest should be '%s' not '%s'", return fmt.Errorf("mediaType in manifest should be '%s' not '%s'",
MediaTypeManifest, manifest.MediaType) MediaTypeManifest, manifest.MediaType)
} }
m.Manifest = manifest m.Manifest = manifest

View File

@ -233,7 +233,6 @@ func checkCommon(t *testing.T, event events.Event) {
if event.(Event).Target.Repository != repo { if event.(Event).Target.Repository != repo {
t.Fatalf("unexpected repository: %q != %q", event.(Event).Target.Repository, repo) t.Fatalf("unexpected repository: %q != %q", event.(Event).Target.Repository, repo)
} }
} }
type testSinkFn func(event events.Event) error type testSinkFn func(event events.Event) error

View File

@ -143,9 +143,7 @@ type SourceRecord struct {
InstanceID string `json:"instanceID,omitempty"` InstanceID string `json:"instanceID,omitempty"`
} }
var ( // ErrSinkClosed is returned if a write is issued to a sink that has been
// ErrSinkClosed is returned if a write is issued to a sink that has been // closed. If encountered, the error should be considered terminal and
// closed. If encountered, the error should be considered terminal and // retries will not be successful.
// retries will not be successful. var ErrSinkClosed = fmt.Errorf("sink: closed")
ErrSinkClosed = fmt.Errorf("sink: closed")
)

View File

@ -13,7 +13,7 @@ import (
// envelope has changed. If this code fails, the revision of the protocol may // envelope has changed. If this code fails, the revision of the protocol may
// need to be incremented. // need to be incremented.
func TestEventEnvelopeJSONFormat(t *testing.T) { func TestEventEnvelopeJSONFormat(t *testing.T) {
var expected = strings.TrimSpace(` expected := strings.TrimSpace(`
{ {
"events": [ "events": [
{ {
@ -114,7 +114,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
prototype.Request.UserAgent = "test/0.1" prototype.Request.UserAgent = "test/0.1"
prototype.Source.Addr = "hostname.local:port" prototype.Source.Addr = "hostname.local:port"
var manifestPush = prototype manifestPush := prototype
manifestPush.ID = "asdf-asdf-asdf-asdf-0" manifestPush.ID = "asdf-asdf-asdf-asdf-0"
manifestPush.Target.Digest = "sha256:0123456789abcdef0" manifestPush.Target.Digest = "sha256:0123456789abcdef0"
manifestPush.Target.Length = 1 manifestPush.Target.Length = 1
@ -123,7 +123,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
manifestPush.Target.Repository = "library/test" manifestPush.Target.Repository = "library/test"
manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest" manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest"
var layerPush0 = prototype layerPush0 := prototype
layerPush0.ID = "asdf-asdf-asdf-asdf-1" layerPush0.ID = "asdf-asdf-asdf-asdf-1"
layerPush0.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5" layerPush0.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"
layerPush0.Target.Length = 2 layerPush0.Target.Length = 2
@ -132,7 +132,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) {
layerPush0.Target.Repository = "library/test" layerPush0.Target.Repository = "library/test"
layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest" layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest"
var layerPush1 = prototype layerPush1 := prototype
layerPush1.ID = "asdf-asdf-asdf-asdf-2" layerPush1.ID = "asdf-asdf-asdf-asdf-2"
layerPush1.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6" layerPush1.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6"
layerPush1.Target.Length = 3 layerPush1.Target.Length = 3

View File

@ -135,7 +135,7 @@ type headerRoundTripper struct {
} }
func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
var nreq = *req nreq := *req
nreq.Header = make(http.Header) nreq.Header = make(http.Header)
merge := func(headers http.Header) { merge := func(headers http.Header) {

View File

@ -197,7 +197,6 @@ func TestHTTPSink(t *testing.T) {
if err := sink.Close(); err == nil { if err := sink.Close(); err == nil {
t.Fatalf("second close should have returned error: %v", err) t.Fatalf("second close should have returned error: %v", err)
} }
} }
func createTestEvent(action, repo, typ string) Event { func createTestEvent(action, repo, typ string) Event {

View File

@ -3,13 +3,12 @@ package notifications
import ( import (
"reflect" "reflect"
"sync" "sync"
"testing"
"time" "time"
events "github.com/docker/go-events" events "github.com/docker/go-events"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"testing"
) )
func TestEventQueue(t *testing.T) { func TestEventQueue(t *testing.T) {

View File

@ -1,3 +1,4 @@
//go:build gofuzz
// +build gofuzz // +build gofuzz
package reference package reference

View File

@ -84,7 +84,7 @@ func TestValidateRemoteName(t *testing.T) {
// Allow multiple hyphens as well. // Allow multiple hyphens as well.
"docker---rules/docker", "docker---rules/docker",
//Username doc and image name docker being tested. // Username doc and image name docker being tested.
"doc/docker", "doc/docker",
// single character names are now allowed. // single character names are now allowed.
@ -129,7 +129,7 @@ func TestValidateRemoteName(t *testing.T) {
// No repository. // No repository.
"docker/", "docker/",
//namespace too long // namespace too long
"this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker",
} }
for _, repositoryName := range invalidRepositoryNames { for _, repositoryName := range invalidRepositoryNames {

View File

@ -525,7 +525,6 @@ func TestReferenceRegexp(t *testing.T) {
for i := range testcases { for i := range testcases {
checkRegexp(t, ReferenceRegexp, testcases[i]) checkRegexp(t, ReferenceRegexp, testcases[i])
} }
} }
func TestIdentifierRegexp(t *testing.T) { func TestIdentifierRegexp(t *testing.T) {

View File

@ -86,7 +86,6 @@ func TestErrorCodes(t *testing.T) {
t.Fatalf("unexpected return from %v.Error(): %q != %q", ec, ec.Error(), expectedErrorString) t.Fatalf("unexpected return from %v.Error(): %q != %q", ec, ec.Error(), expectedErrorString)
} }
} }
} }
func TestErrorsManagement(t *testing.T) { func TestErrorsManagement(t *testing.T) {
@ -99,7 +98,6 @@ func TestErrorsManagement(t *testing.T) {
errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE").WithDetail("data")) errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE").WithDetail("data"))
p, err := json.Marshal(errs) p, err := json.Marshal(errs)
if err != nil { if err != nil {
t.Fatalf("error marashaling errors: %v", err) t.Fatalf("error marashaling errors: %v", err)
} }
@ -181,5 +179,4 @@ func TestErrorsManagement(t *testing.T) {
if e2.Detail != `stuff2` { if e2.Detail != `stuff2` {
t.Fatalf("e2 had wrong detail: %q", e2.Detail) t.Fatalf("e2 had wrong detail: %q", e2.Detail)
} }
} }

View File

@ -75,8 +75,10 @@ var (
}) })
) )
var nextCode = 1000 var (
var registerLock sync.Mutex nextCode = 1000
registerLock sync.Mutex
)
// Register will make the passed-in error known to the environment and // Register will make the passed-in error known to the environment and
// return a new ErrorCode // return a new ErrorCode

View File

@ -262,7 +262,6 @@ type RouteDescriptor struct {
// MethodDescriptor provides a description of the requests that may be // MethodDescriptor provides a description of the requests that may be
// conducted with the target method. // conducted with the target method.
type MethodDescriptor struct { type MethodDescriptor struct {
// Method is an HTTP method, such as GET, PUT or POST. // Method is an HTTP method, such as GET, PUT or POST.
Method string Method string

View File

@ -1,3 +1,4 @@
//go:build gofuzz
// +build gofuzz // +build gofuzz
package v2 package v2

View File

@ -265,7 +265,6 @@ func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, dee
u := server.URL + testcase.RequestURI u := server.URL + testcase.RequestURI
resp, err := http.Get(u) resp, err := http.Get(u)
if err != nil { if err != nil {
t.Fatalf("error issuing get request: %v", err) t.Fatalf("error issuing get request: %v", err)
} }
@ -316,7 +315,6 @@ func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, dee
resp.Body.Close() resp.Body.Close()
} }
} }
// -------------- START LICENSED CODE -------------- // -------------- START LICENSED CODE --------------

View File

@ -29,7 +29,6 @@
// } // }
// } // }
// } // }
//
package auth package auth
import ( import (

View File

@ -128,10 +128,10 @@ func createHtpasswdFile(path string) error {
return err return err
} }
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
return err return err
} }
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600) f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0o600)
if err != nil { if err != nil {
return fmt.Errorf("failed to open htpasswd path %s", err) return fmt.Errorf("failed to open htpasswd path %s", err)
} }

View File

@ -42,7 +42,7 @@ func TestBasicAccessController(t *testing.T) {
tempFile.Close() tempFile.Close()
var userNumber = 0 userNumber := 0
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := context.WithRequest(ctx, r) ctx := context.WithRequest(ctx, r)
@ -76,7 +76,6 @@ func TestBasicAccessController(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, server.URL, nil) req, _ := http.NewRequest(http.MethodGet, server.URL, nil)
resp, err := client.Do(req) resp, err := client.Do(req)
if err != nil { if err != nil {
t.Fatalf("unexpected error during GET: %v", err) t.Fatalf("unexpected error during GET: %v", err)
} }
@ -120,7 +119,6 @@ func TestBasicAccessController(t *testing.T) {
} }
} }
} }
} }
func TestCreateHtpasswdFile(t *testing.T) { func TestCreateHtpasswdFile(t *testing.T) {

View File

@ -8,7 +8,6 @@ import (
) )
func TestParseHTPasswd(t *testing.T) { func TestParseHTPasswd(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
desc string desc string
input string input string
@ -81,5 +80,4 @@ asdf
t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries) t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries)
} }
} }
} }

View File

@ -70,7 +70,6 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut
ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, auth.UserNameKey, auth.UserKey)) ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, auth.UserNameKey, auth.UserKey))
return ctx, nil return ctx, nil
} }
type challenge struct { type challenge struct {

View File

@ -185,6 +185,7 @@ func (t *Token) Verify(verifyOpts VerifyOptions) error {
// VerifySigningKey attempts to get the key which was used to sign this token. // VerifySigningKey attempts to get the key which was used to sign this token.
// The token header should contain either of these 3 fields: // The token header should contain either of these 3 fields:
//
// `x5c` - The x509 certificate chain for the signing key. Needs to be // `x5c` - The x509 certificate chain for the signing key. Needs to be
// verified. // verified.
// `jwk` - The JSON Web Key representation of the signing key. // `jwk` - The JSON Web Key representation of the signing key.
@ -192,6 +193,7 @@ func (t *Token) Verify(verifyOpts VerifyOptions) error {
// `kid` - The unique identifier for the key. This library interprets it // `kid` - The unique identifier for the key. This library interprets it
// as a libtrust fingerprint. The key itself can be looked up in // as a libtrust fingerprint. The key itself can be looked up in
// the trustedKeys field of the given verify options. // the trustedKeys field of the given verify options.
//
// Each of these methods are tried in that order of preference until the // Each of these methods are tried in that order of preference until the
// signing key is found or an error is returned. // signing key is found or an error is returned.
func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) {

View File

@ -493,7 +493,7 @@ func TestNewAccessControllerPemBlock(t *testing.T) {
defer os.Remove(rootCertBundleFilename) defer os.Remove(rootCertBundleFilename)
// Add something other than a certificate to the rootcertbundle // Add something other than a certificate to the rootcertbundle
file, err := os.OpenFile(rootCertBundleFilename, os.O_WRONLY|os.O_APPEND, 0666) file, err := os.OpenFile(rootCertBundleFilename, os.O_WRONLY|os.O_APPEND, 0o666)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -38,7 +38,6 @@ func TestAuthChallengeParse(t *testing.T) {
if expected := "he\"llo"; challenge.Parameters["slashed"] != expected { if expected := "he\"llo"; challenge.Parameters["slashed"] != expected {
t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected) t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected)
} }
} }
func TestAuthChallengeNormalization(t *testing.T) { func TestAuthChallengeNormalization(t *testing.T) {
@ -49,7 +48,6 @@ func TestAuthChallengeNormalization(t *testing.T) {
} }
func testAuthChallengeNormalization(t *testing.T, host string) { func testAuthChallengeNormalization(t *testing.T, host string) {
scm := NewSimpleManager() scm := NewSimpleManager()
url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host))
@ -85,7 +83,6 @@ func testAuthChallengeNormalization(t *testing.T, host string) {
} }
func testAuthChallengeConcurrent(t *testing.T, host string) { func testAuthChallengeConcurrent(t *testing.T, host string) {
scm := NewSimpleManager() scm := NewSimpleManager()
url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host))

View File

@ -50,7 +50,6 @@ func (w *testAuthenticationWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Re
func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (string, func()) { func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (string, func()) {
h := testutil.NewHandler(rrm) h := testutil.NewHandler(rrm)
wrapper := &testAuthenticationWrapper{ wrapper := &testAuthenticationWrapper{
headers: http.Header(map[string][]string{ headers: http.Header(map[string][]string{
"X-API-Version": {"registry/2.0"}, "X-API-Version": {"registry/2.0"},
"X-Multi-API-Version": {"registry/2.0", "registry/2.1", "trust/1.0"}, "X-Multi-API-Version": {"registry/2.0", "registry/2.1", "trust/1.0"},

View File

@ -296,7 +296,6 @@ func descriptorFromResponse(response *http.Response) (distribution.Descriptor, e
desc.Size = length desc.Size = length
return desc, nil return desc, nil
} }
// Get issues a HEAD request for a Manifest against its named endpoint in order // Get issues a HEAD request for a Manifest against its named endpoint in order
@ -529,7 +528,6 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
} }
mt := resp.Header.Get("Content-Type") mt := resp.Header.Get("Content-Type")
body, err := ioutil.ReadAll(resp.Body) body, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -667,7 +665,6 @@ func sanitizeLocation(location, base string) (string, error) {
func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
return bs.statter.Stat(ctx, dgst) return bs.statter.Stat(ctx, dgst)
} }
func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {

View File

@ -319,7 +319,6 @@ func TestBlobDelete(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("Error deleting blob: %s", err.Error()) t.Errorf("Error deleting blob: %s", err.Error())
} }
} }
func TestBlobFetch(t *testing.T) { func TestBlobFetch(t *testing.T) {
@ -399,7 +398,6 @@ func TestBlobExistsNoContentLength(t *testing.T) {
if !strings.Contains(err.Error(), "missing content-length heade") { if !strings.Contains(err.Error(), "missing content-length heade") {
t.Fatalf("Expected missing content-length error message") t.Fatalf("Expected missing content-length error message")
} }
} }
func TestBlobExists(t *testing.T) { func TestBlobExists(t *testing.T) {
@ -986,7 +984,6 @@ func addTestManifestWithEtag(repo reference.Named, reference string, content []b
"Content-Type": {schema1.MediaTypeSignedManifest}, "Content-Type": {schema1.MediaTypeSignedManifest},
}), }),
} }
} }
*m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) *m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag})
} }
@ -1535,6 +1532,7 @@ func TestObtainsManifestForTagWithoutHeaders(t *testing.T) {
t.Fatalf("Unexpected digest") t.Fatalf("Unexpected digest")
} }
} }
func TestManifestTagsPaginated(t *testing.T) { func TestManifestTagsPaginated(t *testing.T) {
s := httptest.NewServer(http.NotFoundHandler()) s := httptest.NewServer(http.NotFoundHandler())
defer s.Close() defer s.Close()

View File

@ -87,7 +87,8 @@ func TestCatalogAPI(t *testing.T) {
values := url.Values{ values := url.Values{
"last": []string{""}, "last": []string{""},
"n": []string{strconv.Itoa(chunkLen)}} "n": []string{strconv.Itoa(chunkLen)},
}
catalogURL, err := env.builder.BuildCatalogURL(values) catalogURL, err := env.builder.BuildCatalogURL(values)
if err != nil { if err != nil {
@ -453,7 +454,6 @@ func TestBlobAPI(t *testing.T) {
defer env2.Shutdown() defer env2.Shutdown()
args = makeBlobArgs(t) args = makeBlobArgs(t)
testBlobAPI(t, env2, args) testBlobAPI(t, env2, args)
} }
func TestBlobDelete(t *testing.T) { func TestBlobDelete(t *testing.T) {
@ -1110,7 +1110,7 @@ const (
func (factory *storageManifestErrDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { func (factory *storageManifestErrDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
// Initialize the mock driver // Initialize the mock driver
var errGenericStorage = errors.New("generic storage error") errGenericStorage := errors.New("generic storage error")
return &mockErrorDriver{ return &mockErrorDriver{
returnErrs: []mockErrorMapping{ returnErrs: []mockErrorMapping{
{ {
@ -1346,7 +1346,6 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name
for i := range unsignedManifest.FSLayers { for i := range unsignedManifest.FSLayers {
rs, dgst, err := testutil.CreateRandomTarFile() rs, dgst, err := testutil.CreateRandomTarFile()
if err != nil { if err != nil {
t.Fatalf("error creating random layer %d: %v", i, err) t.Fatalf("error creating random layer %d: %v", i, err)
} }
@ -1450,7 +1449,6 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name
sm2, err := schema1.Sign(&fetchedManifestByDigest.Manifest, env.pk) sm2, err := schema1.Sign(&fetchedManifestByDigest.Manifest, env.pk)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Re-push with a few different Content-Types. The official schema1 // Re-push with a few different Content-Types. The official schema1
@ -1684,7 +1682,6 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name
for i := range manifest.Layers { for i := range manifest.Layers {
rs, dgst, err := testutil.CreateRandomTarFile() rs, dgst, err := testutil.CreateRandomTarFile()
if err != nil { if err != nil {
t.Fatalf("error creating random layer %d: %v", i, err) t.Fatalf("error creating random layer %d: %v", i, err)
} }
@ -2279,7 +2276,6 @@ func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) {
if len(tagsResponse.Tags) != 0 { if len(tagsResponse.Tags) != 0 {
t.Fatalf("expected 0 tags in response: %v", tagsResponse.Tags) t.Fatalf("expected 0 tags in response: %v", tagsResponse.Tags)
} }
} }
type testEnv struct { type testEnv struct {
@ -2308,7 +2304,6 @@ func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv {
config.Compatibility.Schema1.Enabled = true config.Compatibility.Schema1.Enabled = true
return newTestEnvWithConfig(t, &config) return newTestEnvWithConfig(t, &config)
} }
func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv {
@ -2334,7 +2329,6 @@ func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *te
app := NewApp(ctx, config) app := NewApp(ctx, config)
server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app))
builder, err := v2.NewURLBuilderFromString(server.URL+config.HTTP.Prefix, false) builder, err := v2.NewURLBuilderFromString(server.URL+config.HTTP.Prefix, false)
if err != nil { if err != nil {
t.Fatalf("error creating url builder: %v", err) t.Fatalf("error creating url builder: %v", err)
} }
@ -2832,7 +2826,6 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) {
blobURL, _ := env.builder.BuildBlobURL(ref) blobURL, _ := env.builder.BuildBlobURL(ref)
resp, _ = httpDelete(blobURL) resp, _ = httpDelete(blobURL)
checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode)
} }
func TestProxyManifestGetByTag(t *testing.T) { func TestProxyManifestGetByTag(t *testing.T) {

View File

@ -703,7 +703,6 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler {
return return
} }
repository, err := app.registry.Repository(context, nameRef) repository, err := app.registry.Repository(context, nameRef)
if err != nil { if err != nil {
dcontext.GetLogger(context).Errorf("error resolving repository: %v", err) dcontext.GetLogger(context).Errorf("error resolving repository: %v", err)
@ -983,7 +982,6 @@ func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespac
registry = rmw registry = rmw
} }
return registry, nil return registry, nil
} }
// applyRepoMiddleware wraps a repository with the configured middlewares // applyRepoMiddleware wraps a repository with the configured middlewares

View File

@ -120,13 +120,11 @@ func TestAppDispatcher(t *testing.T) {
app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars))) app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars)))
route := router.GetRoute(testcase.endpoint).Host(serverURL.Host) route := router.GetRoute(testcase.endpoint).Host(serverURL.Host)
u, err := route.URL(testcase.vars...) u, err := route.URL(testcase.vars...)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
resp, err := http.Get(u.String()) resp, err := http.Get(u.String())
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -275,5 +273,4 @@ func TestAppendAccessRecords(t *testing.T) {
if ok := reflect.DeepEqual(result, expectedResult); !ok { if ok := reflect.DeepEqual(result, expectedResult); !ok {
t.Fatalf("Actual access record differs from expected") t.Fatalf("Actual access record differs from expected")
} }
} }

View File

@ -79,7 +79,6 @@ func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Req
blobs := buh.Repository.Blobs(buh) blobs := buh.Repository.Blobs(buh)
upload, err := blobs.Create(buh, options...) upload, err := blobs.Create(buh, options...)
if err != nil { if err != nil {
if ebm, ok := err.(distribution.ErrBlobMounted); ok { if ebm, ok := err.(distribution.ErrBlobMounted); ok {
if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil { if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil {
@ -219,7 +218,6 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht
// really set the mediatype. For now, we can let the backend take care // really set the mediatype. For now, we can let the backend take care
// of this. // of this.
}) })
if err != nil { if err != nil {
switch err := err.(type) { switch err := err.(type) {
case distribution.ErrBlobInvalidDigest: case distribution.ErrBlobInvalidDigest:

View File

@ -34,7 +34,7 @@ type catalogAPIResponse struct {
} }
func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) {
var moreEntries = true moreEntries := true
q := r.URL.Query() q := r.URL.Query()
lastEntry := q.Get("last") lastEntry := q.Get("last")

View File

@ -31,7 +31,7 @@ func closeResources(handler http.Handler, closers ...io.Closer) http.Handler {
func copyFullPayload(ctx context.Context, responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, limit int64, action string) error { func copyFullPayload(ctx context.Context, responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, limit int64, action string) error {
// Get a channel that tells us if the client disconnects // Get a channel that tells us if the client disconnects
clientClosed := r.Context().Done() clientClosed := r.Context().Done()
var body = r.Body body := r.Body
if limit > 0 { if limit > 0 {
body = http.MaxBytesReader(responseWriter, body, limit) body = http.MaxBytesReader(responseWriter, body, limit)
} }

View File

@ -479,7 +479,6 @@ func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest)
} }
return nil return nil
} }
// DeleteManifest removes the manifest with the given digest or the tag with the given name from the registry. // DeleteManifest removes the manifest with the given digest or the tag with the given name from the registry.

View File

@ -12,8 +12,10 @@ import (
// used to register the constructor for different RegistryMiddleware backends. // used to register the constructor for different RegistryMiddleware backends.
type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error)
var middlewares map[string]InitFunc var (
var registryoptions []storage.RegistryOption middlewares map[string]InitFunc
registryoptions []storage.RegistryOption
)
// Register is used to register an InitFunc for // Register is used to register an InitFunc for
// a RegistryMiddleware backend with the given name. // a RegistryMiddleware backend with the given name.

View File

@ -221,6 +221,7 @@ func populate(t *testing.T, te *testEnv, blobCount, size, numUnique int) {
te.inRemote = inRemote te.inRemote = inRemote
te.numUnique = numUnique te.numUnique = numUnique
} }
func TestProxyStoreGet(t *testing.T) { func TestProxyStoreGet(t *testing.T) {
te := makeTestEnv(t, "foo/bar") te := makeTestEnv(t, "foo/bar")
@ -253,7 +254,6 @@ func TestProxyStoreGet(t *testing.T) {
if (*remoteStats)["get"] != 1 { if (*remoteStats)["get"] != 1 {
t.Errorf("Unexpected remote get count") t.Errorf("Unexpected remote get count")
} }
} }
func TestProxyStoreStat(t *testing.T) { func TestProxyStoreStat(t *testing.T) {
@ -284,7 +284,6 @@ func TestProxyStoreStat(t *testing.T) {
if te.store.authChallenger.(*mockChallenger).count != len(te.inRemote) { if te.store.authChallenger.(*mockChallenger).count != len(te.inRemote) {
t.Fatalf("Unexpected auth challenge count, got %#v", te.store.authChallenger) t.Fatalf("Unexpected auth challenge count, got %#v", te.store.authChallenger)
} }
} }
func TestProxyStoreServeHighConcurrency(t *testing.T) { func TestProxyStoreServeHighConcurrency(t *testing.T) {

View File

@ -79,7 +79,7 @@ func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, optio
pms.scheduler.AddManifest(repoBlob, repositoryTTL) pms.scheduler.AddManifest(repoBlob, repositoryTTL)
// Ensure the manifest blob is cleaned up // Ensure the manifest blob is cleaned up
//pms.scheduler.AddBlob(blobRef, repositoryTTL) // pms.scheduler.AddBlob(blobRef, repositoryTTL)
} }

View File

@ -271,5 +271,4 @@ func TestProxyManifests(t *testing.T) {
if env.manifests.authChallenger.(*mockChallenger).count != 2 { if env.manifests.authChallenger.(*mockChallenger).count != 2 {
t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger)
} }
} }

View File

@ -70,5 +70,4 @@ func init() {
pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} { pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} {
return proxyMetrics.manifestMetrics return proxyMetrics.manifestMetrics
})) }))
} }

View File

@ -69,7 +69,6 @@ func TestSchedule(t *testing.T) {
s.Lock() s.Lock()
s.add(ref3, 1*timeUnit, entryTypeBlob) s.add(ref3, 1*timeUnit, entryTypeBlob)
s.Unlock() s.Unlock()
}() }()
// Ensure all repos are deleted // Ensure all repos are deleted
@ -195,7 +194,6 @@ func TestStopRestore(t *testing.T) {
if len(remainingRepos) != 0 { if len(remainingRepos) != 0 {
t.Fatalf("Repositories remaining: %#v", remainingRepos) t.Fatalf("Repositories remaining: %#v", remainingRepos)
} }
} }
func TestDoubleStart(t *testing.T) { func TestDoubleStart(t *testing.T) {

View File

@ -73,12 +73,14 @@ var defaultCipherSuites = []uint16{
} }
// maps tls version strings to constants // maps tls version strings to constants
var defaultTLSVersionStr = "tls1.2" var (
var tlsVersions = map[string]uint16{ defaultTLSVersionStr = "tls1.2"
tlsVersions = map[string]uint16{
// user specified values // user specified values
"tls1.2": tls.VersionTLS12, "tls1.2": tls.VersionTLS12,
"tls1.3": tls.VersionTLS13, "tls1.3": tls.VersionTLS13,
} }
)
// this channel gets notified when process receives signal. It is global to ease unit testing // this channel gets notified when process receives signal. It is global to ease unit testing
var quit = make(chan os.Signal, 1) var quit = make(chan os.Signal, 1)
@ -89,7 +91,6 @@ var ServeCmd = &cobra.Command{
Short: "`serve` stores and distributes Docker images", Short: "`serve` stores and distributes Docker images",
Long: "`serve` stores and distributes Docker images.", Long: "`serve` stores and distributes Docker images.",
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
// setup context // setup context
ctx := dcontext.WithVersion(dcontext.Background(), version.Version) ctx := dcontext.WithVersion(dcontext.Background(), version.Version)

View File

@ -152,7 +152,7 @@ func TestGetCipherSuite(t *testing.T) {
t.Error("did not return expected error about unknown cipher suite") t.Error("did not return expected error about unknown cipher suite")
} }
var insecureCipherSuites = []string{ insecureCipherSuites := []string{
"TLS_RSA_WITH_RC4_128_SHA", "TLS_RSA_WITH_RC4_128_SHA",
"TLS_RSA_WITH_AES_128_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA256",
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
@ -234,7 +234,7 @@ func buildRegistryTLSConfig(name, keyType string, cipherSuites []string) (*regis
} }
keyPath := path.Join(os.TempDir(), name+".key") keyPath := path.Join(os.TempDir(), name+".key")
keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open %s for writing: %v", keyPath, err) return nil, fmt.Errorf("failed to open %s for writing: %v", keyPath, err)
} }

View File

@ -36,8 +36,10 @@ var RootCmd = &cobra.Command{
}, },
} }
var dryRun bool var (
var removeUntagged bool dryRun bool
removeUntagged bool
)
// GCCmd is the cobra command that corresponds to the garbage-collect subcommand // GCCmd is the cobra command that corresponds to the garbage-collect subcommand
var GCCmd = &cobra.Command{ var GCCmd = &cobra.Command{

View File

@ -36,7 +36,6 @@ func TestWriteSeek(t *testing.T) {
bs := repository.Blobs(ctx) bs := repository.Blobs(ctx)
blobUpload, err := bs.Create(ctx) blobUpload, err := bs.Create(ctx)
if err != nil { if err != nil {
t.Fatalf("unexpected error starting layer upload: %s", err) t.Fatalf("unexpected error starting layer upload: %s", err)
} }
@ -47,7 +46,6 @@ func TestWriteSeek(t *testing.T) {
if offset != int64(len(contents)) { if offset != int64(len(contents)) {
t.Fatalf("unexpected value for blobUpload offset: %v != %v", offset, len(contents)) t.Fatalf("unexpected value for blobUpload offset: %v != %v", offset, len(contents))
} }
} }
// TestSimpleBlobUpload covers the blob upload process, exercising common // TestSimpleBlobUpload covers the blob upload process, exercising common
@ -75,7 +73,6 @@ func TestSimpleBlobUpload(t *testing.T) {
rd := io.TeeReader(randomDataReader, h) rd := io.TeeReader(randomDataReader, h)
blobUpload, err := bs.Create(ctx) blobUpload, err := bs.Create(ctx)
if err != nil { if err != nil {
t.Fatalf("unexpected error starting layer upload: %s", err) t.Fatalf("unexpected error starting layer upload: %s", err)
} }
@ -385,7 +382,6 @@ func TestBlobMount(t *testing.T) {
sbs := sourceRepository.Blobs(ctx) sbs := sourceRepository.Blobs(ctx)
blobUpload, err := sbs.Create(ctx) blobUpload, err := sbs.Create(ctx)
if err != nil { if err != nil {
t.Fatalf("unexpected error starting layer upload: %s", err) t.Fatalf("unexpected error starting layer upload: %s", err)
} }

View File

@ -121,7 +121,6 @@ func (bs *blobStore) path(dgst digest.Digest) (string, error) {
bp, err := pathFor(blobDataPathSpec{ bp, err := pathFor(blobDataPathSpec{
digest: dgst, digest: dgst,
}) })
if err != nil { if err != nil {
return "", err return "", err
} }
@ -165,7 +164,6 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi
path, err := pathFor(blobDataPathSpec{ path, err := pathFor(blobDataPathSpec{
digest: dgst, digest: dgst,
}) })
if err != nil { if err != nil {
return distribution.Descriptor{}, err return distribution.Descriptor{}, err
} }

View File

@ -15,9 +15,7 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
var ( var errResumableDigestNotAvailable = errors.New("resumable digest not available")
errResumableDigestNotAvailable = errors.New("resumable digest not available")
)
const ( const (
// digestSha256Empty is the canonical sha256 digest of empty data // digestSha256Empty is the canonical sha256 digest of empty data
@ -296,7 +294,6 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor
blobPath, err := pathFor(blobDataPathSpec{ blobPath, err := pathFor(blobDataPathSpec{
digest: desc.Digest, digest: desc.Digest,
}) })
if err != nil { if err != nil {
return err return err
} }
@ -355,7 +352,6 @@ func (bw *blobWriter) removeResources(ctx context.Context) error {
name: bw.blobStore.repository.Named().Name(), name: bw.blobStore.repository.Named().Name(),
id: bw.id, id: bw.id,
}) })
if err != nil { if err != nil {
return err return err
} }

View File

@ -85,7 +85,6 @@ func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry
alg: bw.digester.Digest().Algorithm(), alg: bw.digester.Digest().Algorithm(),
list: true, list: true,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -136,7 +135,6 @@ func (bw *blobWriter) storeHashState(ctx context.Context) error {
alg: bw.digester.Digest().Algorithm(), alg: bw.digester.Digest().Algorithm(),
offset: bw.written, offset: bw.written,
}) })
if err != nil { if err != nil {
return err return err
} }

View File

@ -39,14 +39,16 @@ func checkBlobDescriptorCacheEmptyRepository(ctx context.Context, t *testing.T,
if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{ if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{
Digest: "sha384:abc", Digest: "sha384:abc",
Size: 10, Size: 10,
MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat { MediaType: "application/octet-stream",
}); err != digest.ErrDigestInvalidFormat {
t.Fatalf("expected error with invalid digest: %v", err) t.Fatalf("expected error with invalid digest: %v", err)
} }
if err := cache.SetDescriptor(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", distribution.Descriptor{ if err := cache.SetDescriptor(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", distribution.Descriptor{
Digest: "", Digest: "",
Size: 10, Size: 10,
MediaType: "application/octet-stream"}); err == nil { MediaType: "application/octet-stream",
}); err == nil {
t.Fatalf("expected error setting value on invalid descriptor") t.Fatalf("expected error setting value on invalid descriptor")
} }
@ -68,7 +70,8 @@ func checkBlobDescriptorCacheSetAndRead(ctx context.Context, t *testing.T, provi
expected := distribution.Descriptor{ expected := distribution.Descriptor{
Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111", Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111",
Size: 10, Size: 10,
MediaType: "application/octet-stream"} MediaType: "application/octet-stream",
}
cache, err := provider.RepositoryScoped("foo/bar") cache, err := provider.RepositoryScoped("foo/bar")
if err != nil { if err != nil {
@ -152,7 +155,8 @@ func checkBlobDescriptorCacheClear(ctx context.Context, t *testing.T, provider c
expected := distribution.Descriptor{ expected := distribution.Descriptor{
Digest: "sha256:def1111111111111111111111111111111111111111111111111111111111111", Digest: "sha256:def1111111111111111111111111111111111111111111111111111111111111",
Size: 10, Size: 10,
MediaType: "application/octet-stream"} MediaType: "application/octet-stream",
}
cache, err := provider.RepositoryScoped("foo/bar") cache, err := provider.RepositoryScoped("foo/bar")
if err != nil { if err != nil {

View File

@ -14,10 +14,8 @@ type cachedBlobStatter struct {
backend distribution.BlobDescriptorService backend distribution.BlobDescriptorService
} }
var ( // cacheCount is the number of total cache request received/hits/misses
// cacheCount is the number of total cache request received/hits/misses var cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type")
cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type")
)
// NewCachedBlobStatter creates a new statter which prefers a cache and // NewCachedBlobStatter creates a new statter which prefers a cache and
// falls back to a backend. // falls back to a backend.

View File

@ -102,7 +102,6 @@ func makeRepo(ctx context.Context, t *testing.T, name string, reg distribution.N
if err != nil { if err != nil {
t.Fatalf("manifest upload failed: %v", err) t.Fatalf("manifest upload failed: %v", err)
} }
} }
func TestCatalog(t *testing.T) { func TestCatalog(t *testing.T) {
@ -289,8 +288,10 @@ func BenchmarkPathCompareNativeEqual(B *testing.B) {
} }
} }
var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") var (
var separatorChars = []byte("._-") filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789")
separatorChars = []byte("._-")
)
func randomPath(length int64) string { func randomPath(length int64) string {
path := "/" path := "/"

View File

@ -93,7 +93,8 @@ func New(accountName, accountKey, container, realm string) (*Driver, error) {
d := &driver{ d := &driver{
client: blobClient, client: blobClient,
container: container} container: container,
}
return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil
} }
@ -412,7 +413,6 @@ func (d *driver) listBlobs(container, virtPath string) ([]string, error) {
Marker: marker, Marker: marker,
Prefix: virtPath, Prefix: virtPath,
}) })
if err != nil { if err != nil {
return out, err return out, err
} }

View File

@ -48,10 +48,8 @@ import (
"github.com/docker/go-metrics" "github.com/docker/go-metrics"
) )
var ( // storageAction is the metrics of blob related operations
// storageAction is the metrics of blob related operations var storageAction = prometheus.StorageNamespace.NewLabeledTimer("action", "The number of seconds that the storage action takes", "driver", "action")
storageAction = prometheus.StorageNamespace.NewLabeledTimer("action", "The number of seconds that the storage action takes", "driver", "action")
)
func init() { func init() {
metrics.Register(prometheus.StorageNamespace) metrics.Register(prometheus.StorageNamespace)

View File

@ -145,7 +145,7 @@ func (r *regulator) Stat(ctx context.Context, path string) (storagedriver.FileIn
} }
// List returns a list of the objects that are direct descendants of the // List returns a list of the objects that are direct descendants of the
//given path. // given path.
func (r *regulator) List(ctx context.Context, path string) ([]string, error) { func (r *regulator) List(ctx context.Context, path string) ([]string, error) {
r.enter() r.enter()
defer r.exit() defer r.exit()

View File

@ -52,8 +52,10 @@ type FileInfoInternal struct {
FileInfoFields FileInfoFields
} }
var _ FileInfo = FileInfoInternal{} var (
var _ FileInfo = &FileInfoInternal{} _ FileInfo = FileInfoInternal{}
_ FileInfo = &FileInfoInternal{}
)
// Path provides the full path of the target of this file info. // Path provides the full path of the target of this file info.
func (fi FileInfoInternal) Path() string { func (fi FileInfoInternal) Path() string {

View File

@ -149,7 +149,7 @@ func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte
// Reader retrieves an io.ReadCloser for the content stored at "path" with a // Reader retrieves an io.ReadCloser for the content stored at "path" with a
// given byte offset. // given byte offset.
func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0o644)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil, storagedriver.PathNotFoundError{Path: path} return nil, storagedriver.PathNotFoundError{Path: path}
@ -173,11 +173,11 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
func (d *driver) Writer(ctx context.Context, subPath string, append bool) (storagedriver.FileWriter, error) { func (d *driver) Writer(ctx context.Context, subPath string, append bool) (storagedriver.FileWriter, error) {
fullPath := d.fullPath(subPath) fullPath := d.fullPath(subPath)
parentDir := path.Dir(fullPath) parentDir := path.Dir(fullPath)
if err := os.MkdirAll(parentDir, 0777); err != nil { if err := os.MkdirAll(parentDir, 0o777); err != nil {
return nil, err return nil, err
} }
fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0666) fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0o666)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -260,7 +260,7 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e
return storagedriver.PathNotFoundError{Path: sourcePath} return storagedriver.PathNotFoundError{Path: sourcePath}
} }
if err := os.MkdirAll(path.Dir(dest), 0777); err != nil { if err := os.MkdirAll(path.Dir(dest), 0o777); err != nil {
return err return err
} }

View File

@ -34,7 +34,6 @@ func init() {
} }
func TestFromParametersImpl(t *testing.T) { func TestFromParametersImpl(t *testing.T) {
tests := []struct { tests := []struct {
params map[string]interface{} // technically the yaml can contain anything params map[string]interface{} // technically the yaml can contain anything
expected DriverParameters expected DriverParameters
@ -109,5 +108,4 @@ func TestFromParametersImpl(t *testing.T) {
t.Fatalf("unexpected params from filesystem driver. expected %+v, got %+v", item.expected, params) t.Fatalf("unexpected params from filesystem driver. expected %+v, got %+v", item.expected, params)
} }
} }
} }

View File

@ -2,7 +2,7 @@
// store blobs in Google cloud storage. // store blobs in Google cloud storage.
// //
// This package leverages the google.golang.org/cloud/storage client library // This package leverages the google.golang.org/cloud/storage client library
//for interfacing with gcs. // for interfacing with gcs.
// //
// Because gcs is a key, value store the Stat call does not support last modification // Because gcs is a key, value store the Stat call does not support last modification
// time for directories (directories are an abstraction for key, value stores) // time for directories (directories are an abstraction for key, value stores)
@ -445,7 +445,6 @@ func putContentsClose(wc *storage.Writer, contents []byte) error {
// available for future calls to StorageDriver.GetContent and // available for future calls to StorageDriver.GetContent and
// StorageDriver.Reader. // StorageDriver.Reader.
func (w *writer) Commit() error { func (w *writer) Commit() error {
if err := w.checkClosed(); err != nil { if err := w.checkClosed(); err != nil {
return err return err
} }
@ -597,7 +596,7 @@ func retry(req request) error {
// size in bytes and the creation time. // size in bytes and the creation time.
func (d *driver) Stat(context context.Context, path string) (storagedriver.FileInfo, error) { func (d *driver) Stat(context context.Context, path string) (storagedriver.FileInfo, error) {
var fi storagedriver.FileInfoFields var fi storagedriver.FileInfoFields
//try to get as file // try to get as file
gcsContext := d.context(context) gcsContext := d.context(context)
obj, err := storageStatObject(gcsContext, d.bucket, d.pathToKey(path)) obj, err := storageStatObject(gcsContext, d.bucket, d.pathToKey(path))
if err == nil { if err == nil {
@ -612,7 +611,7 @@ func (d *driver) Stat(context context.Context, path string) (storagedriver.FileI
} }
return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
} }
//try to get as folder // try to get as folder
dirpath := d.pathToDirKey(path) dirpath := d.pathToDirKey(path)
var query *storage.Query var query *storage.Query
@ -640,7 +639,7 @@ func (d *driver) Stat(context context.Context, path string) (storagedriver.FileI
} }
// List returns a list of the objects that are direct descendants of the // List returns a list of the objects that are direct descendants of the
//given path. // given path.
func (d *driver) List(context context.Context, path string) ([]string, error) { func (d *driver) List(context context.Context, path string) ([]string, error) {
var query *storage.Query var query *storage.Query
query = &storage.Query{} query = &storage.Query{}

View File

@ -22,8 +22,10 @@ import (
// Hook up gocheck into the "go test" runner. // Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { check.TestingT(t) } func Test(t *testing.T) { check.TestingT(t) }
var gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error) var (
var skipGCS func() string gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error)
skipGCS func() string
)
func init() { func init() {
bucket := os.Getenv("REGISTRY_STORAGE_GCS_BUCKET") bucket := os.Getenv("REGISTRY_STORAGE_GCS_BUCKET")

View File

@ -190,7 +190,6 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) {
} }
entries, err := found.(*dir).list(normalized) entries, err := found.(*dir).list(normalized)
if err != nil { if err != nil {
switch err { switch err {
case errNotExists: case errNotExists:

View File

@ -163,7 +163,6 @@ func (d *dir) mkdirs(p string) (*dir, error) {
components := strings.Split(relative, "/") components := strings.Split(relative, "/")
for _, component := range components { for _, component := range components {
d, err := dd.mkdir(component) d, err := dd.mkdir(component)
if err != nil { if err != nil {
// This should actually never happen, since there are no children. // This should actually never happen, since there are no children.
return nil, err return nil, err

View File

@ -98,7 +98,6 @@ func newAliCDNStorageMiddleware(storageDriver storagedriver.StorageDriver, optio
// URLFor attempts to find a url which may be used to retrieve the file at the given path. // URLFor attempts to find a url which may be used to retrieve the file at the given path.
func (ac *aliCDNStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { func (ac *aliCDNStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
if ac.StorageDriver.Name() != "oss" { if ac.StorageDriver.Name() != "oss" {
dcontext.GetLogger(ctx).Warn("the AliCDN middleware does not support this backend storage driver") dcontext.GetLogger(ctx).Warn("the AliCDN middleware does not support this backend storage driver")
return ac.StorageDriver.URLFor(ctx, path, options) return ac.StorageDriver.URLFor(ctx, path, options)
@ -112,5 +111,5 @@ func (ac *aliCDNStorageMiddleware) URLFor(ctx context.Context, path string, opti
// init registers the alicdn layerHandler backend. // init registers the alicdn layerHandler backend.
func init() { func init() {
storagemiddleware.Register("alicdn", storagemiddleware.InitFunc(newAliCDNStorageMiddleware)) storagemiddleware.Register("alicdn", newAliCDNStorageMiddleware)
} }

View File

@ -1,6 +1,5 @@
// Package middleware - cloudfront wrapper for storage libs // Package middleware - cloudfront wrapper for storage libs
// N.B. currently only works with S3, not arbitrary sites // N.B. currently only works with S3, not arbitrary sites
//
package middleware package middleware
import ( import (
@ -34,12 +33,21 @@ var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{}
// newCloudFrontLayerHandler constructs and returns a new CloudFront // newCloudFrontLayerHandler constructs and returns a new CloudFront
// LayerHandler implementation. // LayerHandler implementation.
// Required options: baseurl, privatekey, keypairid //
// Required options:
// Optional options: ipFilteredBy, awsregion //
// ipfilteredby: valid value "none|aws|awsregion". "none", do not filter any IP, default value. "aws", only aws IP goes // - baseurl
// to S3 directly. "awsregion", only regions listed in awsregion options goes to S3 directly // - privatekey
// awsregion: a comma separated string of AWS regions. // - keypairid
//
// Optional options:
//
// - ipFilteredBy
// - awsregion
// - ipfilteredby: valid value "none|aws|awsregion". "none", do not filter any IP,
// default value. "aws", only aws IP goes to S3 directly. "awsregion", only
// regions listed in awsregion options goes to S3 directly
// - awsregion: a comma separated string of AWS regions.
func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) {
// parse baseurl // parse baseurl
base, ok := options["baseurl"] base, ok := options["baseurl"]
@ -211,5 +219,5 @@ func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string,
// init registers the cloudfront layerHandler backend. // init registers the cloudfront layerHandler backend.
func init() { func init() {
storagemiddleware.Register("cloudfront", storagemiddleware.InitFunc(newCloudFrontStorageMiddleware)) storagemiddleware.Register("cloudfront", newCloudFrontStorageMiddleware)
} }

View File

@ -21,11 +21,10 @@ func (s *MiddlewareSuite) TestNoConfig(c *check.C) {
} }
func TestCloudFrontStorageMiddlewareGenerateKey(t *testing.T) { func TestCloudFrontStorageMiddlewareGenerateKey(t *testing.T) {
options := make(map[string]interface{}) options := make(map[string]interface{})
options["baseurl"] = "example.com" options["baseurl"] = "example.com"
var privk = `-----BEGIN RSA PRIVATE KEY----- privk := `-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQCy0ZZsItDuYoX3y6hWqyU9YdH/0B+tlOhvjlaJqvkmAIBBatVV MIICXQIBAAKBgQCy0ZZsItDuYoX3y6hWqyU9YdH/0B+tlOhvjlaJqvkmAIBBatVV
VAShnEAEircBwV3i08439WYgjXnrZ0FjXBTjTKWwCsbpuWJY1w8hqHW3VDivUo1n VAShnEAEircBwV3i08439WYgjXnrZ0FjXBTjTKWwCsbpuWJY1w8hqHW3VDivUo1n
F9WTeclVJuEMhmiAhek3dhUdATaEDqBNskXMofSgKmQHqhPdXCgDmnzKoQIDAQAB F9WTeclVJuEMhmiAhek3dhUdATaEDqBNskXMofSgKmQHqhPdXCgDmnzKoQIDAQAB

View File

@ -113,7 +113,6 @@ func (s *awsIPs) tryUpdate() error {
if regionAllowed { if regionAllowed {
*output = append(*output, *network) *output = append(*output, *network)
} }
} }
for _, prefix := range response.Prefixes { for _, prefix := range response.Prefixes {

View File

@ -35,7 +35,6 @@ func (m mockIPRangeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return return
} }
w.Write(bytes) w.Write(bytes)
} }
func newTestHandler(data awsIPResponse) *httptest.Server { func newTestHandler(data awsIPResponse) *httptest.Server {
@ -68,7 +67,6 @@ func TestS3TryUpdate(t *testing.T) {
assertEqual(t, 1, len(ips.ipv4)) assertEqual(t, 1, len(ips.ipv4))
assertEqual(t, 0, len(ips.ipv6)) assertEqual(t, 0, len(ips.ipv6))
} }
func TestMatchIPV6(t *testing.T) { func TestMatchIPV6(t *testing.T) {
@ -215,7 +213,7 @@ func TestInvalidNetworkType(t *testing.T) {
} }
func TestParsing(t *testing.T) { func TestParsing(t *testing.T) {
var data = `{ data := `{
"prefixes": [{ "prefixes": [{
"ip_prefix": "192.168.0.0", "ip_prefix": "192.168.0.0",
"region": "someregion", "region": "someregion",

View File

@ -46,5 +46,5 @@ func (r *redirectStorageMiddleware) URLFor(ctx context.Context, path string, opt
} }
func init() { func init() {
storagemiddleware.Register("redirect", storagemiddleware.InitFunc(newRedirectStorageMiddleware)) storagemiddleware.Register("redirect", newRedirectStorageMiddleware)
} }

View File

@ -37,13 +37,15 @@ const driverName = "oss"
// OSS API requires multipart upload chunks to be at least 5MB // OSS API requires multipart upload chunks to be at least 5MB
const minChunkSize = 5 << 20 const minChunkSize = 5 << 20
const defaultChunkSize = 2 * minChunkSize const (
const defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk defaultChunkSize = 2 * minChunkSize
defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk
)
// listMax is the largest amount of objects you can request from OSS in a list call // listMax is the largest amount of objects you can request from OSS in a list call
const listMax = 1000 const listMax = 1000
//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set // DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
type DriverParameters struct { type DriverParameters struct {
AccessKeyID string AccessKeyID string
AccessKeySecret string AccessKeySecret string
@ -202,7 +204,6 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
// New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and // New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and
// bucketName // bucketName
func New(params DriverParameters) (*Driver, error) { func New(params DriverParameters) (*Driver, error) {
client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure)
client.SetEndpoint(params.Endpoint) client.SetEndpoint(params.Endpoint)
bucket := client.Bucket(params.Bucket) bucket := client.Bucket(params.Bucket)

View File

@ -24,15 +24,18 @@ var ossDriverConstructor func(rootDirectory string) (*Driver, error)
var skipCheck func() string var skipCheck func() string
func init() { func init() {
accessKey := os.Getenv("ALIYUN_ACCESS_KEY_ID") var (
secretKey := os.Getenv("ALIYUN_ACCESS_KEY_SECRET") accessKey = os.Getenv("ALIYUN_ACCESS_KEY_ID")
bucket := os.Getenv("OSS_BUCKET") secretKey = os.Getenv("ALIYUN_ACCESS_KEY_SECRET")
region := os.Getenv("OSS_REGION") bucket = os.Getenv("OSS_BUCKET")
internal := os.Getenv("OSS_INTERNAL") region = os.Getenv("OSS_REGION")
encrypt := os.Getenv("OSS_ENCRYPT") internal = os.Getenv("OSS_INTERNAL")
secure := os.Getenv("OSS_SECURE") encrypt = os.Getenv("OSS_ENCRYPT")
endpoint := os.Getenv("OSS_ENDPOINT") secure = os.Getenv("OSS_SECURE")
encryptionKeyID := os.Getenv("OSS_ENCRYPTIONKEYID") endpoint = os.Getenv("OSS_ENDPOINT")
encryptionKeyID = os.Getenv("OSS_ENCRYPTIONKEYID")
)
root, err := ioutil.TempDir("", "driver-") root, err := ioutil.TempDir("", "driver-")
if err != nil { if err != nil {
panic(err) panic(err)

View File

@ -93,7 +93,7 @@ var validRegions = map[string]struct{}{}
// validObjectACLs contains known s3 object Acls // validObjectACLs contains known s3 object Acls
var validObjectACLs = map[string]struct{}{} var validObjectACLs = map[string]struct{}{}
//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set // DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
type DriverParameters struct { type DriverParameters struct {
AccessKey string AccessKey string
SecretKey string SecretKey string
@ -632,7 +632,6 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
Key: aws.String(d.s3Path(path)), Key: aws.String(d.s3Path(path)),
Range: aws.String("bytes=" + strconv.FormatInt(offset, 10) + "-"), Range: aws.String("bytes=" + strconv.FormatInt(offset, 10) + "-"),
}) })
if err != nil { if err != nil {
if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "InvalidRange" { if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "InvalidRange" {
return ioutil.NopCloser(bytes.NewReader(nil)), nil return ioutil.NopCloser(bytes.NewReader(nil)), nil
@ -1166,16 +1165,22 @@ func (d *driver) doWalk(parentCtx context.Context, objectCount *int64, path, pre
// directoryDiff finds all directories that are not in common between // directoryDiff finds all directories that are not in common between
// the previous and current paths in sorted order. // the previous and current paths in sorted order.
// //
// Eg 1 directoryDiff("/path/to/folder", "/path/to/folder/folder/file") // # Examples
// => [ "/path/to/folder/folder" ], //
// Eg 2 directoryDiff("/path/to/folder/folder1", "/path/to/folder/folder2/file") // directoryDiff("/path/to/folder", "/path/to/folder/folder/file")
// => [ "/path/to/folder/folder2" ] // // => [ "/path/to/folder/folder" ]
// Eg 3 directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/file") //
// => [ "/path/to/folder/folder2" ] // directoryDiff("/path/to/folder/folder1", "/path/to/folder/folder2/file")
// Eg 4 directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/folder1/file") // // => [ "/path/to/folder/folder2" ]
// => [ "/path/to/folder/folder2", "/path/to/folder/folder2/folder1" ] //
// Eg 5 directoryDiff("/", "/path/to/folder/folder/file") // directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/file")
// => [ "/path", "/path/to", "/path/to/folder", "/path/to/folder/folder" ], // // => [ "/path/to/folder/folder2" ]
//
// directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/folder1/file")
// // => [ "/path/to/folder/folder2", "/path/to/folder/folder2/folder1" ]
//
// directoryDiff("/", "/path/to/folder/folder/file")
// // => [ "/path", "/path/to", "/path/to/folder", "/path/to/folder/folder" ]
func directoryDiff(prev, current string) []string { func directoryDiff(prev, current string) []string {
var paths []string var paths []string

View File

@ -27,27 +27,32 @@ import (
// Hook up gocheck into the "go test" runner. // Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { check.TestingT(t) } func Test(t *testing.T) { check.TestingT(t) }
var s3DriverConstructor func(rootDirectory, storageClass string) (*Driver, error) var (
var skipS3 func() string s3DriverConstructor func(rootDirectory, storageClass string) (*Driver, error)
skipS3 func() string
)
func init() { func init() {
accessKey := os.Getenv("AWS_ACCESS_KEY") var (
secretKey := os.Getenv("AWS_SECRET_KEY") accessKey = os.Getenv("AWS_ACCESS_KEY")
bucket := os.Getenv("S3_BUCKET") secretKey = os.Getenv("AWS_SECRET_KEY")
encrypt := os.Getenv("S3_ENCRYPT") bucket = os.Getenv("S3_BUCKET")
keyID := os.Getenv("S3_KEY_ID") encrypt = os.Getenv("S3_ENCRYPT")
secure := os.Getenv("S3_SECURE") keyID = os.Getenv("S3_KEY_ID")
skipVerify := os.Getenv("S3_SKIP_VERIFY") secure = os.Getenv("S3_SECURE")
v4Auth := os.Getenv("S3_V4_AUTH") skipVerify = os.Getenv("S3_SKIP_VERIFY")
region := os.Getenv("AWS_REGION") v4Auth = os.Getenv("S3_V4_AUTH")
objectACL := os.Getenv("S3_OBJECT_ACL") region = os.Getenv("AWS_REGION")
objectACL = os.Getenv("S3_OBJECT_ACL")
regionEndpoint = os.Getenv("REGION_ENDPOINT")
forcePathStyle = os.Getenv("AWS_S3_FORCE_PATH_STYLE")
sessionToken = os.Getenv("AWS_SESSION_TOKEN")
useDualStack = os.Getenv("S3_USE_DUALSTACK")
combineSmallPart = os.Getenv("MULTIPART_COMBINE_SMALL_PART")
accelerate = os.Getenv("S3_ACCELERATE")
)
root, err := ioutil.TempDir("", "driver-") root, err := ioutil.TempDir("", "driver-")
regionEndpoint := os.Getenv("REGION_ENDPOINT")
forcePathStyle := os.Getenv("AWS_S3_FORCE_PATH_STYLE")
sessionToken := os.Getenv("AWS_SESSION_TOKEN")
useDualStack := os.Getenv("S3_USE_DUALSTACK")
combineSmallPart := os.Getenv("MULTIPART_COMBINE_SMALL_PART")
accelerate := os.Getenv("S3_ACCELERATE")
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -343,7 +348,7 @@ func TestDelete(t *testing.T) {
return false return false
} }
var objs = []string{ objs := []string{
"/file1", "/file1",
"/file1-2", "/file1-2",
"/file1/2", "/file1/2",
@ -411,7 +416,7 @@ func TestDelete(t *testing.T) {
} }
// objects to skip auto-created test case // objects to skip auto-created test case
var skipCase = map[string]bool{ skipCase := map[string]bool{
// special case where deleting "/file1" also deletes "/file1/2" is tested explicitly // special case where deleting "/file1" also deletes "/file1/2" is tested explicitly
"/file1": true, "/file1": true,
} }
@ -536,7 +541,7 @@ func TestWalk(t *testing.T) {
t.Fatalf("unexpected error creating driver with standard storage: %v", err) t.Fatalf("unexpected error creating driver with standard storage: %v", err)
} }
var fileset = []string{ fileset := []string{
"/file1", "/file1",
"/folder1/file1", "/folder1/file1",
"/folder2/file1", "/folder2/file1",

View File

@ -66,7 +66,7 @@ type StorageDriver interface {
Stat(ctx context.Context, path string) (FileInfo, error) Stat(ctx context.Context, path string) (FileInfo, error)
// List returns a list of the objects that are direct descendants of the // List returns a list of the objects that are direct descendants of the
//given path. // given path.
List(ctx context.Context, path string) ([]string, error) List(ctx context.Context, path string) ([]string, error)
// Move moves an object stored at sourcePath to destPath, removing the // Move moves an object stored at sourcePath to destPath, removing the

View File

@ -341,8 +341,8 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
return file, err return file, err
} }
//if this is a DLO and it is clear that segments are still missing, // if this is a DLO and it is clear that segments are still missing,
//wait until they show up // wait until they show up
_, isDLO := headers["X-Object-Manifest"] _, isDLO := headers["X-Object-Manifest"]
size, err := file.Length() size, err := file.Length()
if err != nil { if err != nil {
@ -357,7 +357,7 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
continue continue
} }
//if not, then this reader will be fine // if not, then this reader will be fine
return file, nil return file, nil
} }
} }
@ -436,9 +436,9 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo,
} }
} }
//Don't trust an empty `objects` slice. A container listing can be // Don't trust an empty `objects` slice. A container listing can be
//outdated. For files, we can make a HEAD request on the object which // outdated. For files, we can make a HEAD request on the object which
//reports existence (at least) much more reliably. // reports existence (at least) much more reliably.
waitingTime := readAfterWriteWait waitingTime := readAfterWriteWait
endTime := time.Now().Add(readAfterWriteTimeout) endTime := time.Now().Add(readAfterWriteTimeout)
@ -451,8 +451,8 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo,
return nil, err return nil, err
} }
//if this is a DLO and it is clear that segments are still missing, // if this is a DLO and it is clear that segments are still missing,
//wait until they show up // wait until they show up
_, isDLO := headers["X-Object-Manifest"] _, isDLO := headers["X-Object-Manifest"]
if isDLO && info.Bytes == 0 { if isDLO && info.Bytes == 0 {
if time.Now().Add(waitingTime).After(endTime) { if time.Now().Add(waitingTime).After(endTime) {
@ -463,7 +463,7 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo,
continue continue
} }
//otherwise, accept the result // otherwise, accept the result
fi.IsDir = false fi.IsDir = false
fi.Size = info.Bytes fi.Size = info.Bytes
fi.ModTime = info.LastModified fi.ModTime = info.LastModified
@ -681,7 +681,7 @@ func (d *driver) swiftSegmentPath(path string) (string, error) {
} }
func (d *driver) getAllSegments(path string) ([]swift.Object, error) { func (d *driver) getAllSegments(path string) ([]swift.Object, error) {
//a simple container listing works 99.9% of the time // a simple container listing works 99.9% of the time
segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path}) segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path})
if err != nil { if err != nil {
if err == swift.ContainerNotFound { if err == swift.ContainerNotFound {
@ -690,15 +690,15 @@ func (d *driver) getAllSegments(path string) ([]swift.Object, error) {
return nil, err return nil, err
} }
//build a lookup table by object name // build a lookup table by object name
hasObjectName := make(map[string]struct{}) hasObjectName := make(map[string]struct{})
for _, segment := range segments { for _, segment := range segments {
hasObjectName[segment.Name] = struct{}{} hasObjectName[segment.Name] = struct{}{}
} }
//The container listing might be outdated (i.e. not contain all existing // The container listing might be outdated (i.e. not contain all existing
//segment objects yet) because of temporary inconsistency (Swift is only // segment objects yet) because of temporary inconsistency (Swift is only
//eventually consistent!). Check its completeness. // eventually consistent!). Check its completeness.
segmentNumber := 0 segmentNumber := 0
for { for {
segmentNumber++ segmentNumber++
@ -708,23 +708,23 @@ func (d *driver) getAllSegments(path string) ([]swift.Object, error) {
continue continue
} }
//This segment is missing in the container listing. Use a more reliable // This segment is missing in the container listing. Use a more reliable
//request to check its existence. (HEAD requests on segments are // request to check its existence. (HEAD requests on segments are
//guaranteed to return the correct metadata, except for the pathological // guaranteed to return the correct metadata, except for the pathological
//case of an outage of large parts of the Swift cluster or its network, // case of an outage of large parts of the Swift cluster or its network,
//since every segment is only written once.) // since every segment is only written once.)
segment, _, err := d.Conn.Object(d.Container, segmentPath) segment, _, err := d.Conn.Object(d.Container, segmentPath)
switch err { switch err {
case nil: case nil:
//found new segment -> keep going, more might be missing // found new segment -> keep going, more might be missing
segments = append(segments, segment) segments = append(segments, segment)
continue continue
case swift.ObjectNotFound: case swift.ObjectNotFound:
//This segment is missing. Since we upload segments sequentially, // This segment is missing. Since we upload segments sequentially,
//there won't be any more segments after it. // there won't be any more segments after it.
return segments, nil return segments, nil
default: default:
return nil, err //unexpected error return nil, err // unexpected error
} }
} }
} }

View File

@ -24,29 +24,6 @@ var swiftDriverConstructor func(prefix string) (*Driver, error)
func init() { func init() {
var ( var (
username string
password string
authURL string
tenant string
tenantID string
domain string
domainID string
tenantDomain string
tenantDomainID string
trustID string
container string
region string
AuthVersion int
endpointType string
insecureSkipVerify bool
secretKey string
accessKey string
containerKey bool
tempURLMethods []string
swiftServer *swifttest.SwiftServer
err error
)
username = os.Getenv("SWIFT_USERNAME") username = os.Getenv("SWIFT_USERNAME")
password = os.Getenv("SWIFT_PASSWORD") password = os.Getenv("SWIFT_PASSWORD")
authURL = os.Getenv("SWIFT_AUTH_URL") authURL = os.Getenv("SWIFT_AUTH_URL")
@ -67,6 +44,10 @@ func init() {
containerKey, _ = strconv.ParseBool(os.Getenv("SWIFT_TEMPURL_CONTAINERKEY")) containerKey, _ = strconv.ParseBool(os.Getenv("SWIFT_TEMPURL_CONTAINERKEY"))
tempURLMethods = strings.Split(os.Getenv("SWIFT_TEMPURL_METHODS"), ",") tempURLMethods = strings.Split(os.Getenv("SWIFT_TEMPURL_METHODS"), ",")
swiftServer *swifttest.SwiftServer
err error
)
if username == "" || password == "" || authURL == "" || container == "" { if username == "" || password == "" || authURL == "" || container == "" {
if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil { if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil {
panic(err) panic(err)

View File

@ -116,7 +116,8 @@ func (suite *DriverSuite) TestValidPaths(c *check.C) {
"/a-.b", "/a-.b",
"/_.abc", "/_.abc",
"/Docker/docker-registry", "/Docker/docker-registry",
"/Abc/Cba"} "/Abc/Cba",
}
for _, filename := range validFiles { for _, filename := range validFiles {
err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) err := suite.StorageDriver.PutContent(suite.ctx, filename, contents)
@ -154,7 +155,8 @@ func (suite *DriverSuite) TestInvalidPaths(c *check.C) {
"abc", "abc",
"123.abc", "123.abc",
"//bcd", "//bcd",
"/abc_123/"} "/abc_123/",
}
for _, filename := range invalidFiles { for _, filename := range invalidFiles {
err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) err := suite.StorageDriver.PutContent(suite.ctx, filename, contents)
@ -1175,8 +1177,10 @@ func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, c
c.Assert(readContents, check.DeepEquals, contents) c.Assert(readContents, check.DeepEquals, contents)
} }
var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") var (
var separatorChars = []byte("._-") filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789")
separatorChars = []byte("._-")
)
func randomPath(length int64) string { func randomPath(length int64) string {
path := "/" path := "/"

View File

@ -16,6 +16,7 @@ type changingFileSystem struct {
func (cfs *changingFileSystem) List(_ context.Context, _ string) ([]string, error) { func (cfs *changingFileSystem) List(_ context.Context, _ string) ([]string, error) {
return cfs.fileset, nil return cfs.fileset, nil
} }
func (cfs *changingFileSystem) Stat(_ context.Context, path string) (FileInfo, error) { func (cfs *changingFileSystem) Stat(_ context.Context, path string) (FileInfo, error) {
kept, ok := cfs.keptFiles[path] kept, ok := cfs.keptFiles[path]
if ok && kept { if ok && kept {
@ -48,6 +49,7 @@ func (cfs *fileSystem) Stat(_ context.Context, path string) (FileInfo, error) {
}, },
}, nil }, nil
} }
func (cfs *fileSystem) isDir(path string) bool { func (cfs *fileSystem) isDir(path string) bool {
_, isDir := cfs.fileset[path] _, isDir := cfs.fileset[path]
return isDir return isDir
@ -167,7 +169,6 @@ func TestWalkFallback(t *testing.T) {
compareWalked(t, tc.expected, walked) compareWalked(t, tc.expected, walked)
}) })
} }
} }
func compareWalked(t *testing.T, expected, walked []string) { func compareWalked(t *testing.T, expected, walked []string) {

View File

@ -61,7 +61,6 @@ func TestFileReaderSeek(t *testing.T) {
} }
fr, err := newFileReader(ctx, driver, path, int64(len(content))) fr, err := newFileReader(ctx, driver, path, int64(len(content)))
if err != nil { if err != nil {
t.Fatalf("unexpected error creating file reader: %v", err) t.Fatalf("unexpected error creating file reader: %v", err)
} }

View File

@ -109,7 +109,6 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis
return err return err
}) })
if err != nil { if err != nil {
return fmt.Errorf("failed to mark: %v", err) return fmt.Errorf("failed to mark: %v", err)
} }

View File

@ -173,7 +173,8 @@ func TestNoDeletionNoEffect(t *testing.T) {
// construct manifestlist for fun. // construct manifestlist for fun.
blobstatter := registry.BlobStatter() blobstatter := registry.BlobStatter()
manifestList, err := testutil.MakeManifestList(blobstatter, []digest.Digest{ manifestList, err := testutil.MakeManifestList(blobstatter, []digest.Digest{
image1.manifestDigest, image2.manifestDigest}) image1.manifestDigest, image2.manifestDigest,
})
if err != nil { if err != nil {
t.Fatalf("Failed to make manifest list: %v", err) t.Fatalf("Failed to make manifest list: %v", err)
} }

View File

@ -150,7 +150,6 @@ func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.
name: lbs.repository.Named().Name(), name: lbs.repository.Named().Name(),
id: uuid, id: uuid,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -159,7 +158,6 @@ func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.
name: lbs.repository.Named().Name(), name: lbs.repository.Named().Name(),
id: uuid, id: uuid,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -179,7 +177,6 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution
name: lbs.repository.Named().Name(), name: lbs.repository.Named().Name(),
id: id, id: id,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -203,7 +200,6 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution
name: lbs.repository.Named().Name(), name: lbs.repository.Named().Name(),
id: id, id: id,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -540,7 +540,6 @@ func testOCIManifestStorage(t *testing.T, testname string, includeMediaTypes boo
if payloadMediaType != v1.MediaTypeImageIndex { if payloadMediaType != v1.MediaTypeImageIndex {
t.Fatalf("%s: unexpected MediaType for index payload, %s", testname, payloadMediaType) t.Fatalf("%s: unexpected MediaType for index payload, %s", testname, payloadMediaType)
} }
} }
// TestLinkPathFuncs ensures that the link path functions behavior are locked // TestLinkPathFuncs ensures that the link path functions behavior are locked

View File

@ -12,7 +12,7 @@ import (
v1 "github.com/opencontainers/image-spec/specs-go/v1" v1 "github.com/opencontainers/image-spec/specs-go/v1"
) )
//ocischemaManifestHandler is a ManifestHandler that covers ocischema manifests. // ocischemaManifestHandler is a ManifestHandler that covers ocischema manifests.
type ocischemaManifestHandler struct { type ocischemaManifestHandler struct {
repository distribution.Repository repository distribution.Repository
blobStore distribution.BlobStore blobStore distribution.BlobStore

View File

@ -24,24 +24,32 @@ const (
// The path layout in the storage backend is roughly as follows: // The path layout in the storage backend is roughly as follows:
// //
// <root>/v2 // <root>/v2
// -> repositories/ // ├── blob
// -><name>/ // │ └── <algorithm>
// -> _manifests/ // │ └── <split directory content addressable storage>
// revisions // └── repositories
// -> <manifest digest path> // └── <name>
// -> link // ├── _layers
// tags/<tag> // │ └── <layer links to blob store>
// -> current/link // ├── _manifests
// -> index // │ ├── revisions
// -> <algorithm>/<hex digest>/link // │ │ └── <manifest digest path>
// -> _layers/ // │ │ └── link
// <layer links to blob store> // │ └── tags
// -> _uploads/<id> // │ └── <tag>
// data // │ ├── current
// startedat // │ │ └── link
// hashstates/<algorithm>/<offset> // │ └── index
// -> blob/<algorithm> // │ └── <algorithm>
// <split directory content addressable storage> // │ └── <hex digest>
// │ └── link
// └── _uploads
// └── <id>
// ├── data
// ├── hashstates
// │ └── <algorithm>
// │ └── <offset>
// └── startedat
// //
// The storage backend layout is broken up into a content-addressable blob // The storage backend layout is broken up into a content-addressable blob
// store and repositories. The content-addressable blob store holds most data // store and repositories. The content-addressable blob store holds most data
@ -105,7 +113,6 @@ const (
// For more information on the semantic meaning of each path and their // For more information on the semantic meaning of each path and their
// contents, please see the path spec documentation. // contents, please see the path spec documentation.
func pathFor(spec pathSpec) (string, error) { func pathFor(spec pathSpec) (string, error) {
// Switch on the path object type and return the appropriate path. At // Switch on the path object type and return the appropriate path. At
// first glance, one may wonder why we don't use an interface to // first glance, one may wonder why we don't use an interface to
// accomplish this. By keep the formatting separate from the pathSpec, we // accomplish this. By keep the formatting separate from the pathSpec, we
@ -135,7 +142,6 @@ func pathFor(spec pathSpec) (string, error) {
return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil
case manifestRevisionLinkPathSpec: case manifestRevisionLinkPathSpec:
root, err := pathFor(manifestRevisionPathSpec(v)) root, err := pathFor(manifestRevisionPathSpec(v))
if err != nil { if err != nil {
return "", err return "", err
} }
@ -147,7 +153,6 @@ func pathFor(spec pathSpec) (string, error) {
root, err := pathFor(manifestTagsPathSpec{ root, err := pathFor(manifestTagsPathSpec{
name: v.name, name: v.name,
}) })
if err != nil { if err != nil {
return "", err return "", err
} }
@ -155,7 +160,6 @@ func pathFor(spec pathSpec) (string, error) {
return path.Join(root, v.tag), nil return path.Join(root, v.tag), nil
case manifestTagCurrentPathSpec: case manifestTagCurrentPathSpec:
root, err := pathFor(manifestTagPathSpec(v)) root, err := pathFor(manifestTagPathSpec(v))
if err != nil { if err != nil {
return "", err return "", err
} }
@ -163,7 +167,6 @@ func pathFor(spec pathSpec) (string, error) {
return path.Join(root, "current", "link"), nil return path.Join(root, "current", "link"), nil
case manifestTagIndexPathSpec: case manifestTagIndexPathSpec:
root, err := pathFor(manifestTagPathSpec(v)) root, err := pathFor(manifestTagPathSpec(v))
if err != nil { if err != nil {
return "", err return "", err
} }
@ -171,7 +174,6 @@ func pathFor(spec pathSpec) (string, error) {
return path.Join(root, "index"), nil return path.Join(root, "index"), nil
case manifestTagIndexEntryLinkPathSpec: case manifestTagIndexEntryLinkPathSpec:
root, err := pathFor(manifestTagIndexEntryPathSpec(v)) root, err := pathFor(manifestTagIndexEntryPathSpec(v))
if err != nil { if err != nil {
return "", err return "", err
} }
@ -182,7 +184,6 @@ func pathFor(spec pathSpec) (string, error) {
name: v.name, name: v.name,
tag: v.tag, tag: v.tag,
}) })
if err != nil { if err != nil {
return "", err return "", err
} }
@ -431,8 +432,7 @@ type uploadHashStatePathSpec struct {
func (uploadHashStatePathSpec) pathSpec() {} func (uploadHashStatePathSpec) pathSpec() {}
// repositoriesRootPathSpec returns the root of repositories // repositoriesRootPathSpec returns the root of repositories
type repositoriesRootPathSpec struct { type repositoriesRootPathSpec struct{}
}
func (repositoriesRootPathSpec) pathSpec() {} func (repositoriesRootPathSpec) pathSpec() {}
@ -445,7 +445,6 @@ func (repositoriesRootPathSpec) pathSpec() {}
// groups of digest folder. It will be as follows: // groups of digest folder. It will be as follows:
// //
// <algorithm>/<first two bytes of digest>/<full digest> // <algorithm>/<first two bytes of digest>/<full digest>
//
func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) {
if err := dgst.Validate(); err != nil { if err := dgst.Validate(); err != nil {
return nil, err return nil, err
@ -468,7 +467,6 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error)
// Reconstructs a digest from a path // Reconstructs a digest from a path
func digestFromPath(digestPath string) (digest.Digest, error) { func digestFromPath(digestPath string) (digest.Digest, error) {
digestPath = strings.TrimSuffix(digestPath, "/data") digestPath = strings.TrimSuffix(digestPath, "/data")
dir, hex := path.Split(digestPath) dir, hex := path.Split(digestPath)
dir = path.Dir(dir) dir = path.Dir(dir)

View File

@ -108,7 +108,6 @@ func TestPathMapper(t *testing.T) {
if err == nil { if err == nil {
t.Fatalf("expected an error when mapping an invalid revision: %s", badpath) t.Fatalf("expected an error when mapping an invalid revision: %s", badpath)
} }
} }
func TestDigestFromPath(t *testing.T) { func TestDigestFromPath(t *testing.T) {
@ -132,7 +131,6 @@ func TestDigestFromPath(t *testing.T) {
if result != testcase.expected { if result != testcase.expected {
t.Fatalf("Unexpected result value %v when we wanted %v", result, testcase.expected) t.Fatalf("Unexpected result value %v when we wanted %v", result, testcase.expected)
} }
} }
} }

View File

@ -98,7 +98,6 @@ func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriv
} else { } else {
errors = pushError(errors, filePath, err) errors = pushError(errors, filePath, err)
} }
} }
uploads[uuid] = ud uploads[uuid] = ud

View File

@ -38,7 +38,6 @@ func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploa
if d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { if d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil {
t.Fatalf("Unable to write startedAt file") t.Fatalf("Unable to write startedAt file")
} }
} }
func TestPurgeGather(t *testing.T) { func TestPurgeGather(t *testing.T) {

View File

@ -18,7 +18,7 @@ var (
errInvalidURL = errors.New("invalid URL on layer") errInvalidURL = errors.New("invalid URL on layer")
) )
//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests. // schema2ManifestHandler is a ManifestHandler that covers schema2 manifests.
type schema2ManifestHandler struct { type schema2ManifestHandler struct {
repository distribution.Repository repository distribution.Repository
blobStore distribution.BlobStore blobStore distribution.BlobStore

Some files were not shown because too many files have changed in this diff Show More