1
0
mirror of https://github.com/distribution/distribution synced 2024-11-06 19:35:52 +01:00

fix some typos in source comments

Signed-off-by: bin liu <liubin0329@gmail.com>
This commit is contained in:
bin liu 2015-04-17 12:39:52 +00:00
parent 5ded64812a
commit e0521d2d01
10 changed files with 16 additions and 16 deletions

@ -6,10 +6,10 @@ import (
"regexp"
)
// TarSumRegexp defines a reguler expression to match tarsum identifiers.
// TarSumRegexp defines a regular expression to match tarsum identifiers.
var TarsumRegexp = regexp.MustCompile("tarsum(?:.[a-z0-9]+)?\\+[a-zA-Z0-9]+:[A-Fa-f0-9]+")
// TarsumRegexpCapturing defines a reguler expression to match tarsum identifiers with
// TarsumRegexpCapturing defines a regular expression to match tarsum identifiers with
// capture groups corresponding to each component.
var TarsumRegexpCapturing = regexp.MustCompile("(tarsum)(.([a-z0-9]+))?\\+([a-zA-Z0-9]+):([A-Fa-f0-9]+)")

2
doc.go

@ -2,6 +2,6 @@
// docker distribution. The goal is to allow users to reliably package, ship
// and store content related to docker images.
//
// This is currently a work in progress. More details are availalbe in the
// This is currently a work in progress. More details are available in the
// README.md.
package distribution

@ -220,7 +220,7 @@ type retryingSink struct {
sink Sink
closed bool
// circuit breaker hueristics
// circuit breaker heuristics
failures struct {
threshold int
recent int
@ -317,7 +317,7 @@ func (rs *retryingSink) wait(backoff time.Duration) {
time.Sleep(backoff)
}
// reset marks a succesful call.
// reset marks a successful call.
func (rs *retryingSink) reset() {
rs.failures.recent = 0
rs.failures.last = time.Time{}
@ -330,7 +330,7 @@ func (rs *retryingSink) failure() {
}
// proceed returns true if the call should proceed based on circuit breaker
// hueristics.
// heuristics.
func (rs *retryingSink) proceed() bool {
return rs.failures.recent < rs.failures.threshold ||
time.Now().UTC().After(rs.failures.last.Add(rs.failures.backoff))

@ -3,7 +3,7 @@
// An access controller has a simple interface with a single `Authorized`
// method which checks that a given request is authorized to perform one or
// more actions on one or more resources. This method should return a non-nil
// error if the requset is not authorized.
// error if the request is not authorized.
//
// An implementation registers its access controller by name with a constructor
// which accepts an options map for configuring the access controller.
@ -50,7 +50,7 @@ type Resource struct {
}
// Access describes a specific action that is
// requested or allowed for a given recource.
// requested or allowed for a given resource.
type Access struct {
Resource
Action string

@ -7,7 +7,7 @@ import (
)
// joseBase64UrlEncode encodes the given data using the standard base64 url
// encoding format but with all trailing '=' characters ommitted in accordance
// encoding format but with all trailing '=' characters omitted in accordance
// with the jose specification.
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
func joseBase64UrlEncode(b []byte) string {

@ -101,7 +101,7 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) {
}
case "ReadStream":
path, _ := request.Parameters["Path"].(string)
// Depending on serialization method, Offset may be convereted to any int/uint type
// Depending on serialization method, Offset may be converted to any int/uint type
offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int()
reader, err := driver.ReadStream(path, offset)
var response ReadStreamResponse
@ -116,9 +116,9 @@ func handleRequest(driver storagedriver.StorageDriver, request Request) {
}
case "WriteStream":
path, _ := request.Parameters["Path"].(string)
// Depending on serialization method, Offset may be convereted to any int/uint type
// Depending on serialization method, Offset may be converted to any int/uint type
offset := reflect.ValueOf(request.Parameters["Offset"]).Convert(reflect.TypeOf(int64(0))).Int()
// Depending on serialization method, Size may be convereted to any int/uint type
// Depending on serialization method, Size may be converted to any int/uint type
size := reflect.ValueOf(request.Parameters["Size"]).Convert(reflect.TypeOf(int64(0))).Int()
reader, _ := request.Parameters["Reader"].(io.ReadCloser)
err := driver.WriteStream(path, offset, size, reader)

@ -435,7 +435,7 @@ func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64)
c.Assert(err, check.IsNil)
c.Assert(received, check.DeepEquals, fullContents)
// Writing past size of file extends file (no offest error). We would like
// Writing past size of file extends file (no offset error). We would like
// to write chunk 4 one chunk length past chunk 3. It should be successful
// and the resulting file will be 5 chunks long, with a chunk of all
// zeros.

@ -336,7 +336,7 @@ func seekerSize(seeker io.ReadSeeker) (int64, error) {
// createTestLayer creates a simple test layer in the provided driver under
// tarsum dgst, returning the sha256 digest location. This is implemented
// peicemeal and should probably be replaced by the uploader when it's ready.
// piecemeal and should probably be replaced by the uploader when it's ready.
func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string, dgst digest.Digest, content io.Reader) (digest.Digest, error) {
h := sha256.New()
rd := io.TeeReader(content, h)

@ -182,7 +182,7 @@ func (lw *layerWriter) resumeHashAt(offset int64) error {
}
if offset == int64(lw.resumableDigester.Len()) {
// State of digester is already at the requseted offset.
// State of digester is already at the requested offset.
return nil
}

@ -387,7 +387,7 @@ type layerLinkPathSpec struct {
func (layerLinkPathSpec) pathSpec() {}
// blobAlgorithmReplacer does some very simple path sanitization for user
// input. Mostly, this is to provide some heirachry for tarsum digests. Paths
// input. Mostly, this is to provide some hierarchy for tarsum digests. Paths
// should be "safe" before getting this far due to strict digest requirements
// but we can add further path conversion here, if needed.
var blobAlgorithmReplacer = strings.NewReplacer(