1
0
mirror of https://github.com/distribution/distribution synced 2024-11-12 05:45:51 +01:00

Adds custom registry User-Agent header to s3 HTTP requests

Uses docker/goamz instead of AdRoll/goamz

Adds a registry UA string param to the storage parameters when
constructing the storage driver for the registry App.
This could be used by other storage drivers as well

Signed-off-by: Brian Bland <brian.bland@docker.com>
This commit is contained in:
Brian Bland 2016-01-20 16:40:58 -08:00
parent e4b654f2ce
commit f41a408e34
4 changed files with 58 additions and 29 deletions

@ -9,6 +9,7 @@ import (
"net/http"
"net/url"
"os"
"runtime"
"time"
log "github.com/Sirupsen/logrus"
@ -30,6 +31,7 @@ import (
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/factory"
storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware"
"github.com/docker/distribution/version"
"github.com/docker/libtrust"
"github.com/garyburd/redigo/redis"
"github.com/gorilla/mux"
@ -83,12 +85,12 @@ type App struct {
// NewApp takes a configuration and returns a configured app, ready to serve
// requests. The app only implements ServeHTTP and can be wrapped in other
// handlers accordingly.
func NewApp(ctx context.Context, configuration *configuration.Configuration) *App {
func NewApp(ctx context.Context, config *configuration.Configuration) *App {
app := &App{
Config: configuration,
Config: config,
Context: ctx,
router: v2.RouterWithPrefix(configuration.HTTP.Prefix),
isCache: configuration.Proxy.RemoteURL != "",
router: v2.RouterWithPrefix(config.HTTP.Prefix),
isCache: config.Proxy.RemoteURL != "",
}
// Register the handler dispatchers.
@ -102,8 +104,15 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap
app.register(v2.RouteNameBlobUpload, blobUploadDispatcher)
app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher)
// override the storage driver's UA string for registry outbound HTTP requests
storageParams := config.Storage.Parameters()
if storageParams == nil {
storageParams = make(configuration.Parameters)
}
storageParams["useragent"] = fmt.Sprintf("docker-distribution/%s %s", version.Version, runtime.Version())
var err error
app.driver, err = factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters())
app.driver, err = factory.Create(config.Storage.Type(), storageParams)
if err != nil {
// TODO(stevvooe): Move the creation of a service into a protected
// method, where this is created lazily. Its status can be queried via
@ -112,7 +121,7 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap
}
purgeConfig := uploadPurgeDefaultConfig()
if mc, ok := configuration.Storage["maintenance"]; ok {
if mc, ok := config.Storage["maintenance"]; ok {
if v, ok := mc["uploadpurging"]; ok {
purgeConfig, ok = v.(map[interface{}]interface{})
if !ok {
@ -135,15 +144,15 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap
startUploadPurger(app, app.driver, ctxu.GetLogger(app), purgeConfig)
app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"])
app.driver, err = applyStorageMiddleware(app.driver, config.Middleware["storage"])
if err != nil {
panic(err)
}
app.configureSecret(configuration)
app.configureEvents(configuration)
app.configureRedis(configuration)
app.configureLogHook(configuration)
app.configureSecret(config)
app.configureEvents(config)
app.configureRedis(config)
app.configureLogHook(config)
// Generate an ephemeral key to be used for signing converted manifests
// for clients that don't support schema2.
@ -152,8 +161,8 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap
panic(err)
}
if configuration.HTTP.Host != "" {
u, err := url.Parse(configuration.HTTP.Host)
if config.HTTP.Host != "" {
u, err := url.Parse(config.HTTP.Host)
if err != nil {
panic(fmt.Sprintf(`could not parse http "host" parameter: %v`, err))
}
@ -167,7 +176,7 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap
}
// configure deletion
if d, ok := configuration.Storage["delete"]; ok {
if d, ok := config.Storage["delete"]; ok {
e, ok := d["enabled"]
if ok {
if deleteEnabled, ok := e.(bool); ok && deleteEnabled {
@ -178,7 +187,7 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap
// configure redirects
var redirectDisabled bool
if redirectConfig, ok := configuration.Storage["redirect"]; ok {
if redirectConfig, ok := config.Storage["redirect"]; ok {
v := redirectConfig["disable"]
switch v := v.(type) {
case bool:
@ -194,7 +203,7 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap
}
// configure storage caches
if cc, ok := configuration.Storage["cache"]; ok {
if cc, ok := config.Storage["cache"]; ok {
v, ok := cc["blobdescriptor"]
if !ok {
// Backwards compatible: "layerinfo" == "blobdescriptor"
@ -223,7 +232,7 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap
ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache")
default:
if v != "" {
ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", configuration.Storage["cache"])
ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", config.Storage["cache"])
}
}
}
@ -236,15 +245,15 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap
}
}
app.registry, err = applyRegistryMiddleware(app.Context, app.registry, configuration.Middleware["registry"])
app.registry, err = applyRegistryMiddleware(app.Context, app.registry, config.Middleware["registry"])
if err != nil {
panic(err)
}
authType := configuration.Auth.Type()
authType := config.Auth.Type()
if authType != "" {
accessController, err := auth.GetAccessController(configuration.Auth.Type(), configuration.Auth.Parameters())
accessController, err := auth.GetAccessController(config.Auth.Type(), config.Auth.Parameters())
if err != nil {
panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err))
}
@ -253,13 +262,13 @@ func NewApp(ctx context.Context, configuration *configuration.Configuration) *Ap
}
// configure as a pull through cache
if configuration.Proxy.RemoteURL != "" {
app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, configuration.Proxy)
if config.Proxy.RemoteURL != "" {
app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, config.Proxy)
if err != nil {
panic(err.Error())
}
app.isCache = true
ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", configuration.Proxy.RemoteURL)
ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", config.Proxy.RemoteURL)
}
return app

@ -10,10 +10,10 @@ import (
"io/ioutil"
"time"
"github.com/AdRoll/goamz/cloudfront"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware"
"github.com/docker/goamz/cloudfront"
)
// cloudFrontStorageMiddleware provides an simple implementation of layerHandler that

@ -26,11 +26,12 @@ import (
"sync"
"time"
"github.com/AdRoll/goamz/aws"
"github.com/AdRoll/goamz/s3"
"github.com/Sirupsen/logrus"
"github.com/docker/goamz/aws"
"github.com/docker/goamz/s3"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/client/transport"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/base"
"github.com/docker/distribution/registry/storage/driver/factory"
@ -58,6 +59,7 @@ type DriverParameters struct {
V4Auth bool
ChunkSize int64
RootDirectory string
UserAgent string
}
func init() {
@ -168,7 +170,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
case int, uint, int32, uint32, uint64:
chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int()
default:
return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam)
return nil, fmt.Errorf("invalid value for chunksize: %#v", chunkSizeParam)
}
if chunkSize < minChunkSize {
@ -181,6 +183,11 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
rootDirectory = ""
}
userAgent, ok := parameters["useragent"]
if !ok {
userAgent = ""
}
params := DriverParameters{
fmt.Sprint(accessKey),
fmt.Sprint(secretKey),
@ -191,6 +198,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
v4AuthBool,
chunkSize,
fmt.Sprint(rootDirectory),
fmt.Sprint(userAgent),
}
return New(params)
@ -209,7 +217,16 @@ func New(params DriverParameters) (*Driver, error) {
}
s3obj := s3.New(auth, params.Region)
bucket := s3obj.Bucket(params.Bucket)
if params.UserAgent != "" {
s3obj.Client = &http.Client{
Transport: transport.NewTransport(http.DefaultTransport,
transport.NewHeaderRequestModifier(http.Header{
http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent},
}),
),
}
}
if params.V4Auth {
s3obj.Signature = aws.V4Signature
@ -219,6 +236,8 @@ func New(params DriverParameters) (*Driver, error) {
}
}
bucket := s3obj.Bucket(params.Bucket)
// TODO Currently multipart uploads have no timestamps, so this would be unwise
// if you initiated a new s3driver while another one is running on the same bucket.
// multis, _, err := bucket.ListMulti("", "")

@ -6,10 +6,10 @@ import (
"strconv"
"testing"
"github.com/AdRoll/goamz/aws"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/registry/storage/driver/testsuites"
"github.com/docker/goamz/aws"
"gopkg.in/check.v1"
)
@ -69,6 +69,7 @@ func init() {
v4AuthBool,
minChunkSize,
rootDirectory,
"",
}
return New(parameters)