diff --git a/.travis.yml b/.travis.yml
index 34f34e2f5750e8da631f8289795326f766d5bf4e..a377de10306130f8e66cc3d4d8dc909456f9e14e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -7,7 +7,7 @@ env:
   - MYSQL_TEST="true" MONGO_TEST="true"
 
 go:
-  - 1.7.1
+  - 1.7.3
   - tip
 
 matrix:
diff --git a/vendor/cloud.google.com/go/internal/optional/optional.go b/vendor/cloud.google.com/go/internal/optional/optional.go
new file mode 100644
index 0000000000000000000000000000000000000000..f9102f365162ec977fac9ef46a8ef87c43f9b870
--- /dev/null
+++ b/vendor/cloud.google.com/go/internal/optional/optional.go
@@ -0,0 +1,94 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package optional provides versions of primitive types that can
+// be nil. These are useful in methods that update some of an API object's
+// fields.
+package optional
+
+import (
+	"fmt"
+	"strings"
+)
+
+type (
+	// Bool is either a bool or nil.
+	Bool interface{}
+
+	// String is either a string or nil.
+	String interface{}
+
+	// Int is either an int or nil.
+	Int interface{}
+
+	// Uint is either a uint or nil.
+	Uint interface{}
+
+	// Float64 is either a float64 or nil.
+	Float64 interface{}
+)
+
+// ToBool returns its argument as a bool.
+// It panics if its argument is nil or not a bool.
+func ToBool(v Bool) bool {
+	x, ok := v.(bool)
+	if !ok {
+		doPanic("Bool", v)
+	}
+	return x
+}
+
+// ToString returns its argument as a string.
+// It panics if its argument is nil or not a string.
+func ToString(v String) string {
+	x, ok := v.(string)
+	if !ok {
+		doPanic("String", v)
+	}
+	return x
+}
+
+// ToInt returns its argument as an int.
+// It panics if its argument is nil or not an int.
+func ToInt(v Int) int {
+	x, ok := v.(int)
+	if !ok {
+		doPanic("Int", v)
+	}
+	return x
+}
+
+// ToUint returns its argument as a uint.
+// It panics if its argument is nil or not a uint.
+func ToUint(v Uint) uint {
+	x, ok := v.(uint)
+	if !ok {
+		doPanic("Uint", v)
+	}
+	return x
+}
+
+// ToFloat64 returns its argument as a float64.
+// It panics if its argument is nil or not a float64.
+func ToFloat64(v Float64) float64 {
+	x, ok := v.(float64)
+	if !ok {
+		doPanic("Float64", v)
+	}
+	return x
+}
+
+func doPanic(capType string, v interface{}) {
+	panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
+}
diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go
index e0cb948801c9c6d979b67f9658f399e5112eecf8..714d280e279c488899f96398fc2e2e56fffe2cb5 100644
--- a/vendor/cloud.google.com/go/storage/acl.go
+++ b/vendor/cloud.google.com/go/storage/acl.go
@@ -92,7 +92,12 @@ func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) {
 }
 
 func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
-	acls, err := a.c.raw.DefaultObjectAccessControls.List(a.bucket).Context(ctx).Do()
+	var acls *raw.ObjectAccessControls
+	var err error
+	err = runWithRetry(ctx, func() error {
+		acls, err = a.c.raw.DefaultObjectAccessControls.List(a.bucket).Context(ctx).Do()
+		return err
+	})
 	if err != nil {
 		return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", a.bucket, err)
 	}
@@ -105,7 +110,10 @@ func (a *ACLHandle) bucketDefaultSet(ctx context.Context, entity ACLEntity, role
 		Entity: string(entity),
 		Role:   string(role),
 	}
-	_, err := a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do()
+	err := runWithRetry(ctx, func() error {
+		_, err := a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do()
+		return err
+	})
 	if err != nil {
 		return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
 	}
@@ -113,7 +121,9 @@ func (a *ACLHandle) bucketDefaultSet(ctx context.Context, entity ACLEntity, role
 }
 
 func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
-	err := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do()
+	err := runWithRetry(ctx, func() error {
+		return a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do()
+	})
 	if err != nil {
 		return fmt.Errorf("storage: error deleting default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
 	}
@@ -121,7 +131,12 @@ func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) e
 }
 
 func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
-	acls, err := a.c.raw.BucketAccessControls.List(a.bucket).Context(ctx).Do()
+	var acls *raw.BucketAccessControls
+	var err error
+	err = runWithRetry(ctx, func() error {
+		acls, err = a.c.raw.BucketAccessControls.List(a.bucket).Context(ctx).Do()
+		return err
+	})
 	if err != nil {
 		return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", a.bucket, err)
 	}
@@ -139,7 +154,10 @@ func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRol
 		Entity: string(entity),
 		Role:   string(role),
 	}
-	_, err := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do()
+	err := runWithRetry(ctx, func() error {
+		_, err := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do()
+		return err
+	})
 	if err != nil {
 		return fmt.Errorf("storage: error updating bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
 	}
@@ -147,7 +165,9 @@ func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRol
 }
 
 func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
-	err := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do()
+	err := runWithRetry(ctx, func() error {
+		return a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do()
+	})
 	if err != nil {
 		return fmt.Errorf("storage: error deleting bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
 	}
@@ -155,7 +175,12 @@ func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
 }
 
 func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
-	acls, err := a.c.raw.ObjectAccessControls.List(a.bucket, a.object).Context(ctx).Do()
+	var acls *raw.ObjectAccessControls
+	var err error
+	err = runWithRetry(ctx, func() error {
+		acls, err = a.c.raw.ObjectAccessControls.List(a.bucket, a.object).Context(ctx).Do()
+		return err
+	})
 	if err != nil {
 		return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", a.bucket, a.object, err)
 	}
@@ -168,7 +193,10 @@ func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRol
 		Entity: string(entity),
 		Role:   string(role),
 	}
-	_, err := a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl).Context(ctx).Do()
+	err := runWithRetry(ctx, func() error {
+		_, err := a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl).Context(ctx).Do()
+		return err
+	})
 	if err != nil {
 		return fmt.Errorf("storage: error updating object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
 	}
@@ -176,7 +204,9 @@ func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRol
 }
 
 func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
-	err := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)).Context(ctx).Do()
+	err := runWithRetry(ctx, func() error {
+		return a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)).Context(ctx).Do()
+	})
 	if err != nil {
 		return fmt.Errorf("storage: error deleting object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
 	}
diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go
index 0875f7df877150e9c4c1bd8aa7deb0b26ddf404d..a2be0f483d098b64d4b7e09b31b88a2b84302102 100644
--- a/vendor/cloud.google.com/go/storage/bucket.go
+++ b/vendor/cloud.google.com/go/storage/bucket.go
@@ -35,14 +35,13 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
 	}
 	bkt.Name = b.name
 	req := b.c.raw.Buckets.Insert(projectID, bkt)
-	_, err := req.Context(ctx).Do()
-	return err
+	return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
 }
 
 // Delete deletes the Bucket.
 func (b *BucketHandle) Delete(ctx context.Context) error {
 	req := b.c.raw.Buckets.Delete(b.name)
-	return req.Context(ctx).Do()
+	return runWithRetry(ctx, func() error { return req.Context(ctx).Do() })
 }
 
 // ACL returns an ACLHandle, which provides access to the bucket's access control list.
@@ -75,12 +74,18 @@ func (b *BucketHandle) Object(name string) *ObjectHandle {
 			bucket: b.name,
 			object: name,
 		},
+		gen: -1,
 	}
 }
 
 // Attrs returns the metadata for the bucket.
 func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) {
-	resp, err := b.c.raw.Buckets.Get(b.name).Projection("full").Context(ctx).Do()
+	var resp *raw.Bucket
+	var err error
+	err = runWithRetry(ctx, func() error {
+		resp, err = b.c.raw.Buckets.Get(b.name).Projection("full").Context(ctx).Do()
+		return err
+	})
 	if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
 		return nil, ErrBucketNotExist
 	}
@@ -110,8 +115,11 @@ type BucketAttrs struct {
 
 	// StorageClass is the storage class of the bucket. This defines
 	// how objects in the bucket are stored and determines the SLA
-	// and the cost of storage. Typical values are "STANDARD" and
-	// "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD".
+	// and the cost of storage. Typical values are "MULTI_REGIONAL",
+	// "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and
+	// "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which
+	// is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on
+	// the bucket's location settings.
 	StorageClass string
 
 	// Created is the creation time of the bucket.
@@ -175,47 +183,6 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
 	}
 }
 
-// ObjectList represents a list of objects returned from a bucket List call.
-type ObjectList struct {
-	// Results represent a list of object results.
-	Results []*ObjectAttrs
-
-	// Next is the continuation query to retrieve more
-	// results with the same filtering criteria. If there
-	// are no more results to retrieve, it is nil.
-	Next *Query
-
-	// Prefixes represents prefixes of objects
-	// matching-but-not-listed up to and including
-	// the requested delimiter.
-	Prefixes []string
-}
-
-// List lists objects from the bucket. You can specify a query
-// to filter the results. If q is nil, no filtering is applied.
-//
-// Deprecated. Use BucketHandle.Objects instead.
-func (b *BucketHandle) List(ctx context.Context, q *Query) (*ObjectList, error) {
-	it := b.Objects(ctx, q)
-	nextToken, err := it.fetch(it.pageInfo.MaxSize, it.pageInfo.Token)
-	if err != nil {
-		return nil, err
-	}
-	list := &ObjectList{}
-	for _, item := range it.items {
-		if item.Prefix != "" {
-			list.Prefixes = append(list.Prefixes, item.Prefix)
-		} else {
-			list.Results = append(list.Results, item)
-		}
-	}
-	if nextToken != "" {
-		it.query.Cursor = nextToken
-		list.Next = &it.query
-	}
-	return list, nil
-}
-
 // Objects returns an iterator over the objects in the bucket that match the Query q.
 // If q is nil, no filtering is done.
 func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
@@ -229,8 +196,6 @@ func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
 		func() interface{} { b := it.items; it.items = nil; return b })
 	if q != nil {
 		it.query = *q
-		it.pageInfo.MaxSize = q.MaxResults
-		it.pageInfo.Token = q.Cursor
 	}
 	return it
 }
@@ -248,9 +213,9 @@ type ObjectIterator struct {
 // PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
 func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
 
-// Next returns the next result. Its second return value is Done if there are
-// no more results. Once Next returns Done, all subsequent calls will return
-// Done.
+// Next returns the next result. Its second return value is iterator.Done if
+// there are no more results. Once Next returns iterator.Done, all subsequent
+// calls will return iterator.Done.
 //
 // If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will
 // have a non-empty Prefix field, and a zero value for all other fields. These
@@ -274,7 +239,12 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error)
 	if pageSize > 0 {
 		req.MaxResults(int64(pageSize))
 	}
-	resp, err := req.Context(it.ctx).Do()
+	var resp *raw.Objects
+	var err error
+	err = runWithRetry(it.ctx, func() error {
+		resp, err = req.Context(it.ctx).Do()
+		return err
+	})
 	if err != nil {
 		return "", err
 	}
@@ -319,9 +289,9 @@ type BucketIterator struct {
 	nextFunc  func() error
 }
 
-// Next returns the next result. Its second return value is Done if there are
-// no more results. Once Next returns Done, all subsequent calls will return
-// Done.
+// Next returns the next result. Its second return value is iterator.Done if
+// there are no more results. Once Next returns iterator.Done, all subsequent
+// calls will return iterator.Done.
 func (it *BucketIterator) Next() (*BucketAttrs, error) {
 	if err := it.nextFunc(); err != nil {
 		return nil, err
@@ -342,7 +312,12 @@ func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error)
 	if pageSize > 0 {
 		req.MaxResults(int64(pageSize))
 	}
-	resp, err := req.Context(it.ctx).Do()
+	var resp *raw.Buckets
+	var err error
+	err = runWithRetry(it.ctx, func() error {
+		resp, err = req.Context(it.ctx).Do()
+		return err
+	})
 	if err != nil {
 		return "", err
 	}
diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go
index c0e4041ff5742694eff5dded4624843274898f6d..6adb5662b928438d806a793fb38eccac34780084 100644
--- a/vendor/cloud.google.com/go/storage/copy.go
+++ b/vendor/cloud.google.com/go/storage/copy.go
@@ -21,7 +21,6 @@ import (
 	"errors"
 	"fmt"
 	"reflect"
-	"unicode/utf8"
 
 	"golang.org/x/net/context"
 	raw "google.golang.org/api/storage/v1"
@@ -66,18 +65,11 @@ type Copier struct {
 
 // Run performs the copy.
 func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) {
-	// TODO(jba): add ObjectHandle.validate to do these checks.
-	if c.src.bucket == "" || c.dst.bucket == "" {
-		return nil, errors.New("storage: the source and destination bucket names must both be non-empty")
-	}
-	if c.src.object == "" || c.dst.object == "" {
-		return nil, errors.New("storage: the source and destination object names must both be non-empty")
-	}
-	if !utf8.ValidString(c.src.object) {
-		return nil, fmt.Errorf("storage: object name %q is not valid UTF-8", c.src.object)
+	if err := c.src.validate(); err != nil {
+		return nil, err
 	}
-	if !utf8.ValidString(c.dst.object) {
-		return nil, fmt.Errorf("storage: dst name %q is not valid UTF-8", c.dst.object)
+	if err := c.dst.validate(); err != nil {
+		return nil, err
 	}
 	var rawObject *raw.Object
 	// If any attribute was set, then we make sure the name matches the destination
@@ -112,13 +104,15 @@ func (c *Copier) callRewrite(ctx context.Context, src *ObjectHandle, rawObj *raw
 	if c.RewriteToken != "" {
 		call.RewriteToken(c.RewriteToken)
 	}
-	if err := applyConds("Copy destination", c.dst.conds, call); err != nil {
+	if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
 		return nil, err
 	}
-	if err := applyConds("Copy source", toSourceConds(c.src.conds), call); err != nil {
+	if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
 		return nil, err
 	}
-	res, err := call.Do()
+	var res *raw.RewriteResponse
+	var err error
+	err = runWithRetry(ctx, func() error { res, err = call.Do(); return err })
 	if err != nil {
 		return nil, err
 	}
@@ -146,41 +140,40 @@ type Composer struct {
 
 // Run performs the compose operation.
 func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) {
-	if c.dst.bucket == "" || c.dst.object == "" {
-		return nil, errors.New("storage: the destination bucket and object names must be non-empty")
+	if err := c.dst.validate(); err != nil {
+		return nil, err
 	}
 	if len(c.srcs) == 0 {
 		return nil, errors.New("storage: at least one source object must be specified")
 	}
 
 	req := &raw.ComposeRequest{}
-	if !reflect.DeepEqual(c.ObjectAttrs, ObjectAttrs{}) {
-		req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket)
-		req.Destination.Name = c.dst.object
-	}
-
+	// Compose requires a non-empty Destination, so we always set it,
+	// even if the caller-provided ObjectAttrs is the zero value.
+	req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket)
 	for _, src := range c.srcs {
+		if err := src.validate(); err != nil {
+			return nil, err
+		}
 		if src.bucket != c.dst.bucket {
 			return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket)
 		}
-		if src.object == "" {
-			return nil, errors.New("storage: all source object names must be non-empty")
-		}
 		srcObj := &raw.ComposeRequestSourceObjects{
 			Name: src.object,
 		}
-		if err := applyConds("ComposeFrom source", src.conds, composeSourceObj{srcObj}); err != nil {
+		if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
 			return nil, err
 		}
 		req.SourceObjects = append(req.SourceObjects, srcObj)
 	}
 
 	call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx)
-	if err := applyConds("ComposeFrom destination", c.dst.conds, call); err != nil {
+	if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
 		return nil, err
 	}
-
-	obj, err := call.Do()
+	var obj *raw.Object
+	var err error
+	err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
 	if err != nil {
 		return nil, err
 	}
diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go
index 39d54df403ee6897bfa3ac6724ddade8682d54fd..c23f2c8f945c1e693b74084a5f37e99d9e96471e 100644
--- a/vendor/cloud.google.com/go/storage/doc.go
+++ b/vendor/cloud.google.com/go/storage/doc.go
@@ -19,6 +19,10 @@ Google Cloud Storage stores data in named objects, which are grouped into bucket
 More information about Google Cloud Storage is available at
 https://cloud.google.com/storage/docs.
 
+All of the methods of this package use exponential backoff to retry calls
+that fail with certain errors, as described in
+https://cloud.google.com/storage/docs/exponential-backoff.
+
 Note: This package is experimental and may make backwards-incompatible changes.
 
 
@@ -134,8 +138,7 @@ For example, say you've read an object's metadata into objAttrs. Now
 you want to write to that object, but only if its contents haven't changed
 since you read it. Here is how to express that:
 
-    cond := storage.IfGenerationMatch(objAttrs.Generation)
-    w = obj.WithConditions(cond).NewWriter(ctx)
+    w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
     // Proceed with writing as above.
 
 Signed URLs
diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go
new file mode 100644
index 0000000000000000000000000000000000000000..03b98f4fba73580902650ec116f7cb1739a9fc66
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/invoke.go
@@ -0,0 +1,46 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+	gax "github.com/googleapis/gax-go"
+	"golang.org/x/net/context"
+	"google.golang.org/api/googleapi"
+)
+
+// runWithRetry calls the function until it returns nil or a non-retryable error, or
+// the context is done.
+func runWithRetry(ctx context.Context, call func() error) error {
+	var backoff gax.Backoff // use defaults for gax exponential backoff
+	for {
+		err := call()
+		if err == nil {
+			return nil
+		}
+		e, ok := err.(*googleapi.Error)
+		if !ok {
+			return err
+		}
+		// Retry on 429 and 5xx, according to
+		// https://cloud.google.com/storage/docs/exponential-backoff.
+		if e.Code == 429 || (e.Code >= 500 && e.Code < 600) {
+			if err := gax.Sleep(ctx, backoff.Pause()); err != nil {
+				return err
+			}
+			continue
+		}
+		return err
+	}
+}
diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go
index 3707ea909afee424d459b6e3e7765f8e18edf2b9..9dccf5ae2f9cedca882e82b1a73eac9228ea2799 100644
--- a/vendor/cloud.google.com/go/storage/storage.go
+++ b/vendor/cloud.google.com/go/storage/storage.go
@@ -35,10 +35,10 @@ import (
 	"time"
 	"unicode/utf8"
 
-	"google.golang.org/api/iterator"
 	"google.golang.org/api/option"
 	"google.golang.org/api/transport"
 
+	"cloud.google.com/go/internal/optional"
 	"golang.org/x/net/context"
 	"google.golang.org/api/googleapi"
 	raw "google.golang.org/api/storage/v1"
@@ -47,9 +47,6 @@ import (
 var (
 	ErrBucketNotExist = errors.New("storage: bucket doesn't exist")
 	ErrObjectNotExist = errors.New("storage: object doesn't exist")
-
-	// Done is returned by iterators in this package when they have no more items.
-	Done = iterator.Done
 )
 
 const userAgent = "gcloud-golang-storage/20151204"
@@ -68,49 +65,6 @@ const (
 	ScopeReadWrite = raw.DevstorageReadWriteScope
 )
 
-// AdminClient is a client type for performing admin operations on a project's
-// buckets.
-//
-// Deprecated: Client has all of AdminClient's methods.
-type AdminClient struct {
-	c         *Client
-	projectID string
-}
-
-// NewAdminClient creates a new AdminClient for a given project.
-//
-// Deprecated: use NewClient instead.
-func NewAdminClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*AdminClient, error) {
-	c, err := NewClient(ctx, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return &AdminClient{
-		c:         c,
-		projectID: projectID,
-	}, nil
-}
-
-// Close closes the AdminClient.
-func (c *AdminClient) Close() error {
-	return c.c.Close()
-}
-
-// Create creates a Bucket in the project.
-// If attrs is nil the API defaults will be used.
-//
-// Deprecated: use BucketHandle.Create instead.
-func (c *AdminClient) CreateBucket(ctx context.Context, bucketName string, attrs *BucketAttrs) error {
-	return c.c.Bucket(bucketName).Create(ctx, c.projectID, attrs)
-}
-
-// Delete deletes a Bucket in the project.
-//
-// Deprecated: use BucketHandle.Delete instead.
-func (c *AdminClient) DeleteBucket(ctx context.Context, bucketName string) error {
-	return c.c.Bucket(bucketName).Delete(ctx)
-}
-
 // Client is a client for interacting with Google Cloud Storage.
 //
 // Clients should be reused instead of created as needed.
@@ -321,9 +275,9 @@ type ObjectHandle struct {
 	c      *Client
 	bucket string
 	object string
-
-	acl   ACLHandle
-	conds []Condition
+	acl    ACLHandle
+	gen    int64 // a negative value indicates latest
+	conds  *Conditions
 }
 
 // ACL provides access to the object's access control list.
@@ -333,24 +287,41 @@ func (o *ObjectHandle) ACL() *ACLHandle {
 	return &o.acl
 }
 
-// WithConditions returns a copy of o using the provided conditions.
-func (o *ObjectHandle) WithConditions(conds ...Condition) *ObjectHandle {
+// Generation returns a new ObjectHandle that operates on a specific generation
+// of the object.
+// By default, the handle operates on the latest generation. Not
+// all operations work when given a specific generation; check the API
+// endpoints at https://cloud.google.com/storage/docs/json_api/ for details.
+func (o *ObjectHandle) Generation(gen int64) *ObjectHandle {
+	o2 := *o
+	o2.gen = gen
+	return &o2
+}
+
+// If returns a new ObjectHandle that applies a set of preconditions.
+// Preconditions already set on the ObjectHandle are ignored.
+// Operations on the new handle will only occur if the preconditions are
+// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions
+// for more details.
+func (o *ObjectHandle) If(conds Conditions) *ObjectHandle {
 	o2 := *o
-	o2.conds = conds
+	o2.conds = &conds
 	return &o2
 }
 
 // Attrs returns meta information about the object.
 // ErrObjectNotExist will be returned if the object is not found.
 func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
-	if !utf8.ValidString(o.object) {
-		return nil, fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
+	if err := o.validate(); err != nil {
+		return nil, err
 	}
 	call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx)
-	if err := applyConds("Attrs", o.conds, call); err != nil {
+	if err := applyConds("Attrs", o.gen, o.conds, call); err != nil {
 		return nil, err
 	}
-	obj, err := call.Do()
+	var obj *raw.Object
+	var err error
+	err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
 	if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
 		return nil, ErrObjectNotExist
 	}
@@ -363,15 +334,64 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
 // Update updates an object with the provided attributes.
 // All zero-value attributes are ignored.
 // ErrObjectNotExist will be returned if the object is not found.
-func (o *ObjectHandle) Update(ctx context.Context, attrs ObjectAttrs) (*ObjectAttrs, error) {
-	if !utf8.ValidString(o.object) {
-		return nil, fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
+func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (*ObjectAttrs, error) {
+	if err := o.validate(); err != nil {
+		return nil, err
 	}
-	call := o.c.raw.Objects.Patch(o.bucket, o.object, attrs.toRawObject(o.bucket)).Projection("full").Context(ctx)
-	if err := applyConds("Update", o.conds, call); err != nil {
+	var attrs ObjectAttrs
+	// Lists of fields to send, and set to null, in the JSON.
+	var forceSendFields, nullFields []string
+	if uattrs.ContentType != nil {
+		attrs.ContentType = optional.ToString(uattrs.ContentType)
+		forceSendFields = append(forceSendFields, "ContentType")
+	}
+	if uattrs.ContentLanguage != nil {
+		attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage)
+		// For ContentLanguage It's an error to send the empty string.
+		// Instead we send a null.
+		if attrs.ContentLanguage == "" {
+			nullFields = append(nullFields, "ContentLanguage")
+		} else {
+			forceSendFields = append(forceSendFields, "ContentLanguage")
+		}
+	}
+	if uattrs.ContentEncoding != nil {
+		attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding)
+		forceSendFields = append(forceSendFields, "ContentType")
+	}
+	if uattrs.ContentDisposition != nil {
+		attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition)
+		forceSendFields = append(forceSendFields, "ContentDisposition")
+	}
+	if uattrs.CacheControl != nil {
+		attrs.CacheControl = optional.ToString(uattrs.CacheControl)
+		forceSendFields = append(forceSendFields, "CacheControl")
+	}
+	if uattrs.Metadata != nil {
+		attrs.Metadata = uattrs.Metadata
+		if len(attrs.Metadata) == 0 {
+			// Sending the empty map is a no-op. We send null instead.
+			nullFields = append(nullFields, "Metadata")
+		} else {
+			forceSendFields = append(forceSendFields, "Metadata")
+		}
+	}
+	if uattrs.ACL != nil {
+		attrs.ACL = uattrs.ACL
+		// It's an error to attempt to delete the ACL, so
+		// we don't append to nullFields here.
+		forceSendFields = append(forceSendFields, "Acl")
+	}
+	rawObj := attrs.toRawObject(o.bucket)
+	rawObj.ForceSendFields = forceSendFields
+	rawObj.NullFields = nullFields
+	call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx)
+	if err := applyConds("Update", o.gen, o.conds, call); err != nil {
 		return nil, err
 	}
-	obj, err := call.Do()
+	var obj *raw.Object
+	var err error
+	err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
 	if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
 		return nil, ErrObjectNotExist
 	}
@@ -381,16 +401,37 @@ func (o *ObjectHandle) Update(ctx context.Context, attrs ObjectAttrs) (*ObjectAt
 	return newObject(obj), nil
 }
 
+// ObjectAttrsToUpdate is used to update the attributes of an object.
+// Only fields set to non-nil values will be updated.
+// Set a field to its zero value to delete it.
+//
+// For example, to change ContentType and delete ContentEncoding and
+// Metadata, use
+//    ObjectAttrsToUpdate{
+//        ContentType: "text/html",
+//        ContentEncoding: "",
+//        Metadata: map[string]string{},
+//    }
+type ObjectAttrsToUpdate struct {
+	ContentType        optional.String
+	ContentLanguage    optional.String
+	ContentEncoding    optional.String
+	ContentDisposition optional.String
+	CacheControl       optional.String
+	Metadata           map[string]string // set to map[string]string{} to delete
+	ACL                []ACLRule
+}
+
 // Delete deletes the single specified object.
 func (o *ObjectHandle) Delete(ctx context.Context) error {
-	if !utf8.ValidString(o.object) {
-		return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
+	if err := o.validate(); err != nil {
+		return err
 	}
 	call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx)
-	if err := applyConds("Delete", o.conds, call); err != nil {
+	if err := applyConds("Delete", o.gen, o.conds, call); err != nil {
 		return err
 	}
-	err := call.Do()
+	err := runWithRetry(ctx, func() error { return call.Do() })
 	switch e := err.(type) {
 	case nil:
 		return nil
@@ -402,32 +443,6 @@ func (o *ObjectHandle) Delete(ctx context.Context) error {
 	return err
 }
 
-// CopyTo copies the object to the given dst.
-// The copied object's attributes are overwritten by attrs if non-nil.
-//
-// Deprecated: use ObjectHandle.CopierFrom instead.
-func (o *ObjectHandle) CopyTo(ctx context.Context, dst *ObjectHandle, attrs *ObjectAttrs) (*ObjectAttrs, error) {
-	c := dst.CopierFrom(o)
-	if attrs != nil {
-		c.ObjectAttrs = *attrs
-	}
-	return c.Run(ctx)
-}
-
-// ComposeFrom concatenates the provided slice of source objects into a new
-// object whose destination is the receiver. The provided attrs, if not nil,
-// are used to set the attributes on the newly-created object. All source
-// objects must reside within the same bucket as the destination.
-//
-// Deprecated: use ObjectHandle.ComposerFrom instead.
-func (o *ObjectHandle) ComposeFrom(ctx context.Context, srcs []*ObjectHandle, attrs *ObjectAttrs) (*ObjectAttrs, error) {
-	c := o.ComposerFrom(srcs...)
-	if attrs != nil {
-		c.ObjectAttrs = *attrs
-	}
-	return c.Run(ctx)
-}
-
 // NewReader creates a new Reader to read the contents of the
 // object.
 // ErrObjectNotExist will be returned if the object is not found.
@@ -438,19 +453,25 @@ func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
 }
 
 // NewRangeReader reads part of an object, reading at most length bytes
-// starting at the given offset.  If length is negative, the object is read
+// starting at the given offset. If length is negative, the object is read
 // until the end.
 func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error) {
-	if !utf8.ValidString(o.object) {
-		return nil, fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
+	if err := o.validate(); err != nil {
+		return nil, err
 	}
 	if offset < 0 {
 		return nil, fmt.Errorf("storage: invalid offset %d < 0", offset)
 	}
+	if o.conds != nil {
+		if err := o.conds.validate("NewRangeReader"); err != nil {
+			return nil, err
+		}
+	}
 	u := &url.URL{
-		Scheme: "https",
-		Host:   "storage.googleapis.com",
-		Path:   fmt.Sprintf("/%s/%s", o.bucket, o.object),
+		Scheme:   "https",
+		Host:     "storage.googleapis.com",
+		Path:     fmt.Sprintf("/%s/%s", o.bucket, o.object),
+		RawQuery: conditionsQuery(o.gen, o.conds),
 	}
 	verb := "GET"
 	if length == 0 {
@@ -460,15 +481,13 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
 	if err != nil {
 		return nil, err
 	}
-	if err := applyConds("NewReader", o.conds, objectsGetCall{req}); err != nil {
-		return nil, err
-	}
 	if length < 0 && offset > 0 {
 		req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
 	} else if length > 0 {
 		req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
 	}
-	res, err := o.c.hc.Do(req)
+	var res *http.Response
+	err = runWithRetry(ctx, func() error { res, err = o.c.hc.Do(req); return err })
 	if err != nil {
 		return nil, err
 	}
@@ -542,7 +561,21 @@ func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer {
 		o:           o,
 		donec:       make(chan struct{}),
 		ObjectAttrs: ObjectAttrs{Name: o.object},
+		ChunkSize:   googleapi.DefaultUploadChunkSize,
+	}
+}
+
+func (o *ObjectHandle) validate() error {
+	if o.bucket == "" {
+		return errors.New("storage: bucket name is empty")
+	}
+	if o.object == "" {
+		return errors.New("storage: object name is empty")
 	}
+	if !utf8.ValidString(o.object) {
+		return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
+	}
+	return nil
 }
 
 // parseKey converts the binary contents of a private key file
@@ -664,8 +697,11 @@ type ObjectAttrs struct {
 	// StorageClass is the storage class of the bucket.
 	// This value defines how objects in the bucket are stored and
 	// determines the SLA and the cost of storage. Typical values are
-	// "STANDARD" and "DURABLE_REDUCED_AVAILABILITY".
-	// It defaults to "STANDARD". This field is read-only.
+	// "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD"
+	// and "DURABLE_REDUCED_AVAILABILITY".
+	// It defaults to "STANDARD", which is equivalent to "MULTI_REGIONAL"
+	// or "REGIONAL" depending on the bucket's location settings. This
+	// field is read-only.
 	StorageClass string
 
 	// Created is the time the object was created. This field is read-only.
@@ -760,21 +796,6 @@ type Query struct {
 	// Versions indicates whether multiple versions of the same
 	// object will be included in the results.
 	Versions bool
-
-	// Cursor is a previously-returned page token
-	// representing part of the larger set of results to view.
-	// Optional.
-	//
-	// Deprecated: Use ObjectIterator.PageInfo().Token instead.
-	Cursor string
-
-	// MaxResults is the maximum number of items plus prefixes
-	// to return. As duplicate prefixes are omitted,
-	// fewer total results may be returned than requested.
-	// The default page limit is used if it is negative or zero.
-	//
-	// Deprecated: Use ObjectIterator.PageInfo().MaxSize instead.
-	MaxResults int
 }
 
 // contentTyper implements ContentTyper to enable an
@@ -788,105 +809,195 @@ func (c *contentTyper) ContentType() string {
 	return c.t
 }
 
-// A Condition constrains methods to act on specific generations of
+// Conditions constrain methods to act on specific generations of
 // resources.
 //
-// Not all conditions or combinations of conditions are applicable to
-// all methods.
-type Condition interface {
-	// method is the high-level ObjectHandle method name, for
-	// error messages.  call is the call object to modify.
-	modifyCall(method string, call interface{}) error
-}
-
-// applyConds modifies the provided call using the conditions in conds.
-// call is something that quacks like a *raw.WhateverCall.
-func applyConds(method string, conds []Condition, call interface{}) error {
-	for _, cond := range conds {
-		if err := cond.modifyCall(method, call); err != nil {
-			return err
-		}
+// The zero value is an empty set of constraints. Not all conditions or
+// combinations of conditions are applicable to all methods.
+// See https://cloud.google.com/storage/docs/generations-preconditions
+// for details on how these operate.
+type Conditions struct {
+	// Generation constraints.
+	// At most one of the following can be set to a non-zero value.
+
+	// GenerationMatch specifies that the object must have the given generation
+	// for the operation to occur.
+	// If GenerationMatch is zero, it has no effect.
+	// Use DoesNotExist to specify that the object does not exist in the bucket.
+	GenerationMatch int64
+
+	// GenerationNotMatch specifies that the object must not have the given
+	// generation for the operation to occur.
+	// If GenerationNotMatch is zero, it has no effect.
+	GenerationNotMatch int64
+
+	// DoesNotExist specifies that the object must not exist in the bucket for
+	// the operation to occur.
+	// If DoesNotExist is false, it has no effect.
+	DoesNotExist bool
+
+	// Metadata generation constraints.
+	// At most one of the following can be set to a non-zero value.
+
+	// MetagenerationMatch specifies that the object must have the given
+	// metageneration for the operation to occur.
+	// If MetagenerationMatch is zero, it has no effect.
+	MetagenerationMatch int64
+
+	// MetagenerationNotMatch specifies that the object must not have the given
+	// metageneration for the operation to occur.
+	// If MetagenerationNotMatch is zero, it has no effect.
+	MetagenerationNotMatch int64
+}
+
+func (c *Conditions) validate(method string) error {
+	if *c == (Conditions{}) {
+		return fmt.Errorf("storage: %s: empty conditions", method)
+	}
+	if !c.isGenerationValid() {
+		return fmt.Errorf("storage: %s: multiple conditions specified for generation", method)
+	}
+	if !c.isMetagenerationValid() {
+		return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method)
 	}
 	return nil
 }
 
-// toSourceConds returns a slice of Conditions derived from Conds that instead
-// function on the equivalent Source methods of a call.
-func toSourceConds(conds []Condition) []Condition {
-	out := make([]Condition, 0, len(conds))
-	for _, c := range conds {
-		switch c := c.(type) {
-		case genCond:
-			var m string
-			if strings.HasPrefix(c.method, "If") {
-				m = "IfSource" + c.method[2:]
-			} else {
-				m = "Source" + c.method
-			}
-			out = append(out, genCond{method: m, val: c.val})
-		default:
-			// NOTE(djd): If the message from unsupportedCond becomes
-			// confusing, we'll need to find a way for Conditions to
-			// identify themselves.
-			out = append(out, unsupportedCond{})
-		}
+func (c *Conditions) isGenerationValid() bool {
+	n := 0
+	if c.GenerationMatch != 0 {
+		n++
 	}
-	return out
+	if c.GenerationNotMatch != 0 {
+		n++
+	}
+	if c.DoesNotExist {
+		n++
+	}
+	return n <= 1
 }
 
-func Generation(gen int64) Condition               { return genCond{"Generation", gen} }
-func IfGenerationMatch(gen int64) Condition        { return genCond{"IfGenerationMatch", gen} }
-func IfGenerationNotMatch(gen int64) Condition     { return genCond{"IfGenerationNotMatch", gen} }
-func IfMetaGenerationMatch(gen int64) Condition    { return genCond{"IfMetagenerationMatch", gen} }
-func IfMetaGenerationNotMatch(gen int64) Condition { return genCond{"IfMetagenerationNotMatch", gen} }
-
-type genCond struct {
-	method string
-	val    int64
+func (c *Conditions) isMetagenerationValid() bool {
+	return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0
 }
 
-func (g genCond) modifyCall(srcMethod string, call interface{}) error {
-	rv := reflect.ValueOf(call)
-	meth := rv.MethodByName(g.method)
-	if !meth.IsValid() {
-		return fmt.Errorf("%s: condition %s not supported", srcMethod, g.method)
+// applyConds modifies the provided call using the conditions in conds.
+// call is something that quacks like a *raw.WhateverCall.
+func applyConds(method string, gen int64, conds *Conditions, call interface{}) error {
+	cval := reflect.ValueOf(call)
+	if gen >= 0 {
+		if !setConditionField(cval, "Generation", gen) {
+			return fmt.Errorf("storage: %s: generation not supported", method)
+		}
+	}
+	if conds == nil {
+		return nil
+	}
+	if err := conds.validate(method); err != nil {
+		return err
+	}
+	switch {
+	case conds.GenerationMatch != 0:
+		if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) {
+			return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method)
+		}
+	case conds.GenerationNotMatch != 0:
+		if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) {
+			return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method)
+		}
+	case conds.DoesNotExist:
+		if !setConditionField(cval, "IfGenerationMatch", int64(0)) {
+			return fmt.Errorf("storage: %s: DoesNotExist not supported", method)
+		}
+	}
+	switch {
+	case conds.MetagenerationMatch != 0:
+		if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) {
+			return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
+		}
+	case conds.MetagenerationNotMatch != 0:
+		if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) {
+			return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
+		}
 	}
-	meth.Call([]reflect.Value{reflect.ValueOf(g.val)})
 	return nil
 }
 
-type unsupportedCond struct{}
-
-func (unsupportedCond) modifyCall(srcMethod string, call interface{}) error {
-	return fmt.Errorf("%s: condition not supported", srcMethod)
+func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error {
+	if gen >= 0 {
+		call.SourceGeneration(gen)
+	}
+	if conds == nil {
+		return nil
+	}
+	if err := conds.validate("CopyTo source"); err != nil {
+		return err
+	}
+	switch {
+	case conds.GenerationMatch != 0:
+		call.IfSourceGenerationMatch(conds.GenerationMatch)
+	case conds.GenerationNotMatch != 0:
+		call.IfSourceGenerationNotMatch(conds.GenerationNotMatch)
+	case conds.DoesNotExist:
+		call.IfSourceGenerationMatch(0)
+	}
+	switch {
+	case conds.MetagenerationMatch != 0:
+		call.IfSourceMetagenerationMatch(conds.MetagenerationMatch)
+	case conds.MetagenerationNotMatch != 0:
+		call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch)
+	}
+	return nil
 }
 
-func appendParam(req *http.Request, k, v string) {
-	sep := ""
-	if req.URL.RawQuery != "" {
-		sep = "&"
+// setConditionField sets a field on a *raw.WhateverCall.
+// We can't use anonymous interfaces because the return type is
+// different, since the field setters are builders.
+func setConditionField(call reflect.Value, name string, value interface{}) bool {
+	m := call.MethodByName(name)
+	if !m.IsValid() {
+		return false
 	}
-	req.URL.RawQuery += sep + url.QueryEscape(k) + "=" + url.QueryEscape(v)
+	m.Call([]reflect.Value{reflect.ValueOf(value)})
+	return true
 }
 
-// objectsGetCall wraps an *http.Request for an object fetch call, but adds the methods
-// that modifyCall searches for by name. (the same names as the raw, auto-generated API)
-type objectsGetCall struct{ req *http.Request }
+// conditionsQuery returns the generation and conditions as a URL query
+// string suitable for URL.RawQuery.  It assumes that the conditions
+// have been validated.
+func conditionsQuery(gen int64, conds *Conditions) string {
+	// URL escapes are elided because integer strings are URL-safe.
+	var buf []byte
 
-func (c objectsGetCall) Generation(gen int64) {
-	appendParam(c.req, "generation", fmt.Sprint(gen))
-}
-func (c objectsGetCall) IfGenerationMatch(gen int64) {
-	appendParam(c.req, "ifGenerationMatch", fmt.Sprint(gen))
-}
-func (c objectsGetCall) IfGenerationNotMatch(gen int64) {
-	appendParam(c.req, "ifGenerationNotMatch", fmt.Sprint(gen))
-}
-func (c objectsGetCall) IfMetagenerationMatch(gen int64) {
-	appendParam(c.req, "ifMetagenerationMatch", fmt.Sprint(gen))
-}
-func (c objectsGetCall) IfMetagenerationNotMatch(gen int64) {
-	appendParam(c.req, "ifMetagenerationNotMatch", fmt.Sprint(gen))
+	appendParam := func(s string, n int64) {
+		if len(buf) > 0 {
+			buf = append(buf, '&')
+		}
+		buf = append(buf, s...)
+		buf = strconv.AppendInt(buf, n, 10)
+	}
+
+	if gen >= 0 {
+		appendParam("generation=", gen)
+	}
+	if conds == nil {
+		return string(buf)
+	}
+	switch {
+	case conds.GenerationMatch != 0:
+		appendParam("ifGenerationMatch=", conds.GenerationMatch)
+	case conds.GenerationNotMatch != 0:
+		appendParam("ifGenerationNotMatch=", conds.GenerationNotMatch)
+	case conds.DoesNotExist:
+		appendParam("ifGenerationMatch=", 0)
+	}
+	switch {
+	case conds.MetagenerationMatch != 0:
+		appendParam("ifMetagenerationMatch=", conds.MetagenerationMatch)
+	case conds.MetagenerationNotMatch != 0:
+		appendParam("ifMetagenerationNotMatch=", conds.MetagenerationNotMatch)
+	}
+	return string(buf)
 }
 
 // composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods
diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go
index 79ab7914a272ffdf2e73d7870770ee1e393e6c60..8f98156aabf2be2b38fa5ba280b84c1555bfa3b3 100644
--- a/vendor/cloud.google.com/go/storage/writer.go
+++ b/vendor/cloud.google.com/go/storage/writer.go
@@ -15,6 +15,7 @@
 package storage
 
 import (
+	"errors"
 	"fmt"
 	"io"
 	"unicode/utf8"
@@ -31,6 +32,17 @@ type Writer struct {
 	// attributes are ignored.
 	ObjectAttrs
 
+	// ChunkSize controls the maximum number of bytes of the object that the
+	// Writer will attempt to send to the server in a single request. Objects
+	// smaller than the size will be sent in a single request, while larger
+	// objects will be split over multiple requests. The size will be rounded up
+	// to the nearest multiple of 256K. If zero, chunking will be disabled and
+	// the object will be uploaded in a single request.
+	//
+	// ChunkSize will default to a reasonable value. Any custom configuration
+	// must be done before the first Write call.
+	ChunkSize int
+
 	ctx context.Context
 	o   *ObjectHandle
 
@@ -56,7 +68,12 @@ func (w *Writer) open() error {
 	w.pw = pw
 	w.opened = true
 
-	var mediaOpts []googleapi.MediaOption
+	if w.ChunkSize < 0 {
+		return errors.New("storage: Writer.ChunkSize must non-negative")
+	}
+	mediaOpts := []googleapi.MediaOption{
+		googleapi.ChunkSize(w.ChunkSize),
+	}
 	if c := attrs.ContentType; c != "" {
 		mediaOpts = append(mediaOpts, googleapi.ContentType(c))
 	}
@@ -70,9 +87,9 @@ func (w *Writer) open() error {
 			Context(w.ctx)
 
 		var resp *raw.Object
-		err := applyConds("NewWriter", w.o.conds, call)
+		err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
 		if err == nil {
-			resp, err = call.Do()
+			err = runWithRetry(w.ctx, func() error { resp, err = call.Do(); return err })
 		}
 		if err != nil {
 			w.err = err
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
index fca922584f6adc7de60a57b2e671edbe88fc595e..34c2bab3380bf4670b3577050daf39c9c9533393 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -137,9 +137,6 @@ type Config struct {
 	// accelerate enabled. If the bucket is not enabled for accelerate an error
 	// will be returned. The bucket name must be DNS compatible to also work
 	// with accelerate.
-	//
-	// Not compatible with UseDualStack requests will fail if both flags are
-	// specified.
 	S3UseAccelerate *bool
 
 	// Set this to `true` to disable the EC2Metadata client from overriding the
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
index 857311f64cc4c7f2fef40907d1b5dd968ee5900a..6efc77bf0932597668da2d11b38498cb46fe745f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
@@ -34,7 +34,7 @@ var (
 //
 // Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
 // In this example EnvProvider will first check if any credentials are available
-// vai the environment variables. If there are none ChainProvider will check
+// via the environment variables. If there are none ChainProvider will check
 // the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
 // does not return any credentials ChainProvider will return the error
 // ErrNoValidProvidersFoundInChain
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
index 097d3237b3caa9925f51e9bde4612f981e503011..d3dc8404ed214082df7ec8d6bcf4e13821c950a3 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
@@ -66,7 +66,7 @@ through code instead of being driven by environment variables only.
 Use NewSessionWithOptions when you want to provide the config profile, or
 override the shared config state (AWS_SDK_LOAD_CONFIG).
 
-	// Equivalent to session.New
+	// Equivalent to session.NewSession()
 	sess, err := session.NewSessionWithOptions(session.Options{})
 
 	// Specify profile to load for the session's config
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
index 0147eedeb94cde60739cb5e3ca4e517f2a72f526..b58076f5e321457d797095020cc16ba42656c1a1 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
@@ -2,7 +2,7 @@ package session
 
 import (
 	"fmt"
-	"os"
+	"io/ioutil"
 
 	"github.com/aws/aws-sdk-go/aws/awserr"
 	"github.com/aws/aws-sdk-go/aws/credentials"
@@ -105,12 +105,13 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
 	files := make([]sharedConfigFile, 0, len(filenames))
 
 	for _, filename := range filenames {
-		if _, err := os.Stat(filename); os.IsNotExist(err) {
-			// Trim files from the list that don't exist.
+		b, err := ioutil.ReadFile(filename)
+		if err != nil {
+			// Skip files which can't be opened and read for whatever reason
 			continue
 		}
 
-		f, err := ini.Load(filename)
+		f, err := ini.Load(b)
 		if err != nil {
 			return nil, SharedConfigLoadError{Filename: filename}
 		}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
new file mode 100644
index 0000000000000000000000000000000000000000..bd082e9d1f784af980ef9905e9782b7d5bcb0f8a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
@@ -0,0 +1,24 @@
+// +build go1.5
+
+package v4
+
+import (
+	"net/url"
+	"strings"
+)
+
+func getURIPath(u *url.URL) string {
+	var uri string
+
+	if len(u.Opaque) > 0 {
+		uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
+	} else {
+		uri = u.EscapedPath()
+	}
+
+	if len(uri) == 0 {
+		uri = "/"
+	}
+
+	return uri
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go
new file mode 100644
index 0000000000000000000000000000000000000000..796604121ce8e83b1a6c44c93714d31d97123dff
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go
@@ -0,0 +1,24 @@
+// +build !go1.5
+
+package v4
+
+import (
+	"net/url"
+	"strings"
+)
+
+func getURIPath(u *url.URL) string {
+	var uri string
+
+	if len(u.Opaque) > 0 {
+		uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
+	} else {
+		uri = u.Path
+	}
+
+	if len(uri) == 0 {
+		uri = "/"
+	}
+
+	return uri
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
index eb79ded9321f9638a229b5dbd8a2101cf2da7d83..986530b401937612803f67a314c4f5ec8502f402 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -2,6 +2,48 @@
 //
 // Provides request signing for request that need to be signed with
 // AWS V4 Signatures.
+//
+// Standalone Signer
+//
+// Generally using the signer outside of the SDK should not require any additional
+// logic when using Go v1.5 or higher. The signer does this by taking advantage
+// of the URL.EscapedPath method. If your request URI requires additional escaping
+// you many need to use the URL.Opaque to define what the raw URI should be sent
+// to the service as.
+//
+// The signer will first check the URL.Opaque field, and use its value if set.
+// The signer does require the URL.Opaque field to be set in the form of:
+//
+//     "//<hostname>/<path>"
+//
+//     // e.g.
+//     "//example.com/some/path"
+//
+// The leading "//" and hostname are required or the URL.Opaque escaping will
+// not work correctly.
+//
+// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
+// method and using the returned value. If you're using Go v1.4 you must set
+// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
+// Go v1.5 the signer will fallback to URL.Path.
+//
+// AWS v4 signature validation requires that the canonical string's URI path
+// element must be the URI escaped form of the HTTP request's path.
+// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+//
+// The Go HTTP client will perform escaping automatically on the request. Some
+// of these escaping may cause signature validation errors because the HTTP
+// request differs from the URI path or query that the signature was generated.
+// https://golang.org/pkg/net/url/#URL.EscapedPath
+//
+// Because of this, it is recommended that when using the signer outside of the
+// SDK that explicitly escaping the request prior to being signed is preferable,
+// and will help prevent signature validation errors. This can be done by setting
+// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
+// call URL.EscapedPath() if Opaque is not set.
+//
+// Test `TestStandaloneSign` provides a complete example of using the signer
+// outside of the SDK and pre-escaping the URI path.
 package v4
 
 import (
@@ -120,6 +162,15 @@ type Signer struct {
 	// request's query string.
 	DisableHeaderHoisting bool
 
+	// Disables the automatic escaping of the URI path of the request for the
+	// siganture's canonical string's path. For services that do not need additional
+	// escaping then use this to disable the signer escaping the path.
+	//
+	// S3 is an example of a service that does not need additional escaping.
+	//
+	// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+	DisableURIPathEscaping bool
+
 	// currentTimeFn returns the time value which represents the current time.
 	// This value should only be used for testing. If it is nil the default
 	// time.Now will be used.
@@ -151,6 +202,8 @@ type signingCtx struct {
 	ExpireTime       time.Duration
 	SignedHeaderVals http.Header
 
+	DisableURIPathEscaping bool
+
 	credValues         credentials.Value
 	isPresign          bool
 	formattedTime      string
@@ -236,14 +289,15 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi
 	}
 
 	ctx := &signingCtx{
-		Request:     r,
-		Body:        body,
-		Query:       r.URL.Query(),
-		Time:        signTime,
-		ExpireTime:  exp,
-		isPresign:   exp != 0,
-		ServiceName: service,
-		Region:      region,
+		Request:                r,
+		Body:                   body,
+		Query:                  r.URL.Query(),
+		Time:                   signTime,
+		ExpireTime:             exp,
+		isPresign:              exp != 0,
+		ServiceName:            service,
+		Region:                 region,
+		DisableURIPathEscaping: v4.DisableURIPathEscaping,
 	}
 
 	if ctx.isRequestSigned() {
@@ -354,6 +408,10 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time
 		v4.Logger = req.Config.Logger
 		v4.DisableHeaderHoisting = req.NotHoist
 		v4.currentTimeFn = curTimeFn
+		if name == "s3" {
+			// S3 service should not have any escaping applied
+			v4.DisableURIPathEscaping = true
+		}
 	})
 
 	signingTime := req.Time
@@ -510,17 +568,10 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
 
 func (ctx *signingCtx) buildCanonicalString() {
 	ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
-	uri := ctx.Request.URL.Opaque
-	if uri != "" {
-		uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
-	} else {
-		uri = ctx.Request.URL.Path
-	}
-	if uri == "" {
-		uri = "/"
-	}
 
-	if ctx.ServiceName != "s3" {
+	uri := getURIPath(ctx.Request.URL)
+
+	if !ctx.DisableURIPathEscaping {
 		uri = rest.EscapePath(uri, false)
 	}
 
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index 472f38cd45295f41532a25a86515c99b3a4c80d4..b01cd705aef6de9a3a06be972b2b10cecd2c48fb 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
 const SDKName = "aws-sdk-go"
 
 // SDKVersion is the version of this SDK
-const SDKVersion = "1.4.14"
+const SDKVersion = "1.4.22"
diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go
index b4ad7405c61f22962ec70895cd58662e6b17f5b0..19d97562fee7f20166a6e7fdf5ddb5e118d4c76d 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go
@@ -1,7 +1,7 @@
 // Package endpoints validates regional endpoints for services.
 package endpoints
 
-//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
+//go:generate go run -tags codegen ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
 //go:generate gofmt -s -w endpoints_map.go
 
 import (
diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
index c5bf3c7c356add5e08713b086388f7b41940e1ef..5594f2efd23a507754dd4d1ccf99217cd82dd4f9 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
+++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
@@ -23,6 +23,10 @@
     "us-gov-west-1/ec2metadata": {
       "endpoint": "http://169.254.169.254/latest"
     },
+    "*/budgets": {
+      "endpoint": "budgets.amazonaws.com",
+      "signingRegion": "us-east-1"
+    },
     "*/cloudfront": {
       "endpoint": "cloudfront.amazonaws.com",
       "signingRegion": "us-east-1"
diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
index a81d158c3d490acb44bcff75c3d8447e5798ec4f..e79e6782a68f970e09305ef1cccc017d905875d6 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
@@ -18,6 +18,10 @@ var endpointsMap = endpointStruct{
 		"*/*": {
 			Endpoint: "{service}.{region}.amazonaws.com",
 		},
+		"*/budgets": {
+			Endpoint:      "budgets.amazonaws.com",
+			SigningRegion: "us-east-1",
+		},
 		"*/cloudfront": {
 			Endpoint:      "cloudfront.amazonaws.com",
 			SigningRegion: "us-east-1",
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
index c705481c3cf1da13320923b1049f7bb1c21ae0eb..18169f0f8ce49a971a647c4ce0d8581dbe47257e 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
@@ -1,7 +1,7 @@
 // Package query provides serialization of AWS query requests, and responses.
 package query
 
-//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
 
 import (
 	"net/url"
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
index a3ea40955d7cb31196f64b09221ec8cc4e526000..e0f4d5a541941b274f51a29a8b87e5f6ddab6b17 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
@@ -1,6 +1,6 @@
 package query
 
-//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
 
 import (
 	"encoding/xml"
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
index c74b97e17b30e2f12529cfa4a070855b28a048bd..7bdf4c8538f5f7138485316ff62cf7ccf1f33dac 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
@@ -2,8 +2,8 @@
 // requests and responses.
 package restxml
 
-//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go
-//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go
 
 import (
 	"bytes"
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
index c71b6ebe0eae243b0d0f441bf12ee48e938142a9..9eec0738646165a0e16f481ec37fd02eaeca28e8 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
@@ -21,6 +21,8 @@ const opAbortMultipartUpload = "AbortMultipartUpload"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See AbortMultipartUpload for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -55,11 +57,25 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req
 	return
 }
 
+// AbortMultipartUpload API operation for Amazon Simple Storage Service.
+//
 // Aborts a multipart upload.
 //
 // To verify that all parts have been removed, so you don't get charged for
 // the part storage, you should call the List Parts operation and ensure the
 // parts list is empty.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation AbortMultipartUpload for usage and error information.
+//
+// Returned Error Codes:
+//   * NoSuchUpload
+//   The specified multipart upload does not exist.
+//
 func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) {
 	req, out := c.AbortMultipartUploadRequest(input)
 	err := req.Send()
@@ -73,6 +89,8 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See CompleteMultipartUpload for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -107,7 +125,16 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput)
 	return
 }
 
+// CompleteMultipartUpload API operation for Amazon Simple Storage Service.
+//
 // Completes a multipart upload by assembling previously uploaded parts.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CompleteMultipartUpload for usage and error information.
 func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) {
 	req, out := c.CompleteMultipartUploadRequest(input)
 	err := req.Send()
@@ -121,6 +148,8 @@ const opCopyObject = "CopyObject"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See CopyObject for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -155,7 +184,22 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou
 	return
 }
 
+// CopyObject API operation for Amazon Simple Storage Service.
+//
 // Creates a copy of an object that is already stored in Amazon S3.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CopyObject for usage and error information.
+//
+// Returned Error Codes:
+//   * ObjectNotInActiveTierError
+//   The source object of the COPY operation is not in the active tier and is
+//   only stored in Amazon Glacier.
+//
 func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) {
 	req, out := c.CopyObjectRequest(input)
 	err := req.Send()
@@ -169,6 +213,8 @@ const opCreateBucket = "CreateBucket"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See CreateBucket for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -203,7 +249,25 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request
 	return
 }
 
+// CreateBucket API operation for Amazon Simple Storage Service.
+//
 // Creates a new bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CreateBucket for usage and error information.
+//
+// Returned Error Codes:
+//   * BucketAlreadyExists
+//   The requested bucket name is not available. The bucket namespace is shared
+//   by all users of the system. Please select a different name and try again.
+//
+//   * BucketAlreadyOwnedByYou
+
+//
 func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) {
 	req, out := c.CreateBucketRequest(input)
 	err := req.Send()
@@ -217,6 +281,8 @@ const opCreateMultipartUpload = "CreateMultipartUpload"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See CreateMultipartUpload for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -251,6 +317,8 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re
 	return
 }
 
+// CreateMultipartUpload API operation for Amazon Simple Storage Service.
+//
 // Initiates a multipart upload and returns an upload ID.
 //
 // Note: After you initiate multipart upload and upload one or more parts, you
@@ -258,6 +326,13 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re
 // for storage of the uploaded parts. Only after you either complete or abort
 // multipart upload, Amazon S3 frees up the parts storage and stops charging
 // you for the parts storage.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CreateMultipartUpload for usage and error information.
 func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) {
 	req, out := c.CreateMultipartUploadRequest(input)
 	err := req.Send()
@@ -271,6 +346,8 @@ const opDeleteBucket = "DeleteBucket"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See DeleteBucket for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -307,8 +384,17 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request
 	return
 }
 
+// DeleteBucket API operation for Amazon Simple Storage Service.
+//
 // Deletes the bucket. All objects (including all object versions and Delete
 // Markers) in the bucket must be deleted before the bucket itself can be deleted.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucket for usage and error information.
 func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) {
 	req, out := c.DeleteBucketRequest(input)
 	err := req.Send()
@@ -322,6 +408,8 @@ const opDeleteBucketCors = "DeleteBucketCors"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See DeleteBucketCors for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -358,7 +446,16 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request
 	return
 }
 
+// DeleteBucketCors API operation for Amazon Simple Storage Service.
+//
 // Deletes the cors configuration information set for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketCors for usage and error information.
 func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) {
 	req, out := c.DeleteBucketCorsRequest(input)
 	err := req.Send()
@@ -372,6 +469,8 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See DeleteBucketLifecycle for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -408,7 +507,16 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re
 	return
 }
 
+// DeleteBucketLifecycle API operation for Amazon Simple Storage Service.
+//
 // Deletes the lifecycle configuration from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketLifecycle for usage and error information.
 func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) {
 	req, out := c.DeleteBucketLifecycleRequest(input)
 	err := req.Send()
@@ -422,6 +530,8 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See DeleteBucketPolicy for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -458,7 +568,16 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req
 	return
 }
 
+// DeleteBucketPolicy API operation for Amazon Simple Storage Service.
+//
 // Deletes the policy from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketPolicy for usage and error information.
 func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) {
 	req, out := c.DeleteBucketPolicyRequest(input)
 	err := req.Send()
@@ -472,6 +591,8 @@ const opDeleteBucketReplication = "DeleteBucketReplication"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See DeleteBucketReplication for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -508,7 +629,16 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput)
 	return
 }
 
+// DeleteBucketReplication API operation for Amazon Simple Storage Service.
+//
 // Deletes the replication configuration from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketReplication for usage and error information.
 func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) {
 	req, out := c.DeleteBucketReplicationRequest(input)
 	err := req.Send()
@@ -522,6 +652,8 @@ const opDeleteBucketTagging = "DeleteBucketTagging"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See DeleteBucketTagging for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -558,7 +690,16 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r
 	return
 }
 
+// DeleteBucketTagging API operation for Amazon Simple Storage Service.
+//
 // Deletes the tags from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketTagging for usage and error information.
 func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) {
 	req, out := c.DeleteBucketTaggingRequest(input)
 	err := req.Send()
@@ -572,6 +713,8 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See DeleteBucketWebsite for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -608,7 +751,16 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r
 	return
 }
 
+// DeleteBucketWebsite API operation for Amazon Simple Storage Service.
+//
 // This operation removes the website configuration from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketWebsite for usage and error information.
 func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) {
 	req, out := c.DeleteBucketWebsiteRequest(input)
 	err := req.Send()
@@ -622,6 +774,8 @@ const opDeleteObject = "DeleteObject"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See DeleteObject for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -656,9 +810,18 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request
 	return
 }
 
+// DeleteObject API operation for Amazon Simple Storage Service.
+//
 // Removes the null version (if there is one) of an object and inserts a delete
 // marker, which becomes the latest version of the object. If there isn't a
 // null version, Amazon S3 does not remove any objects.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteObject for usage and error information.
 func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) {
 	req, out := c.DeleteObjectRequest(input)
 	err := req.Send()
@@ -672,6 +835,8 @@ const opDeleteObjects = "DeleteObjects"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See DeleteObjects for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -706,8 +871,17 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque
 	return
 }
 
+// DeleteObjects API operation for Amazon Simple Storage Service.
+//
 // This operation enables you to delete multiple objects from a bucket using
 // a single HTTP request. You may specify up to 1000 keys.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteObjects for usage and error information.
 func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) {
 	req, out := c.DeleteObjectsRequest(input)
 	err := req.Send()
@@ -721,6 +895,8 @@ const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetBucketAccelerateConfiguration for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -755,7 +931,16 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC
 	return
 }
 
+// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service.
+//
 // Returns the accelerate configuration of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketAccelerateConfiguration for usage and error information.
 func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) {
 	req, out := c.GetBucketAccelerateConfigurationRequest(input)
 	err := req.Send()
@@ -769,6 +954,8 @@ const opGetBucketAcl = "GetBucketAcl"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetBucketAcl for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -803,7 +990,16 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request
 	return
 }
 
+// GetBucketAcl API operation for Amazon Simple Storage Service.
+//
 // Gets the access control policy for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketAcl for usage and error information.
 func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) {
 	req, out := c.GetBucketAclRequest(input)
 	err := req.Send()
@@ -817,6 +1013,8 @@ const opGetBucketCors = "GetBucketCors"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetBucketCors for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -851,7 +1049,16 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque
 	return
 }
 
+// GetBucketCors API operation for Amazon Simple Storage Service.
+//
 // Returns the cors configuration for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketCors for usage and error information.
 func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) {
 	req, out := c.GetBucketCorsRequest(input)
 	err := req.Send()
@@ -865,6 +1072,8 @@ const opGetBucketLifecycle = "GetBucketLifecycle"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetBucketLifecycle for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -902,7 +1111,16 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req
 	return
 }
 
+// GetBucketLifecycle API operation for Amazon Simple Storage Service.
+//
 // Deprecated, see the GetBucketLifecycleConfiguration operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLifecycle for usage and error information.
 func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) {
 	req, out := c.GetBucketLifecycleRequest(input)
 	err := req.Send()
@@ -916,6 +1134,8 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetBucketLifecycleConfiguration for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -950,7 +1170,16 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon
 	return
 }
 
+// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service.
+//
 // Returns the lifecycle configuration information set on the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLifecycleConfiguration for usage and error information.
 func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) {
 	req, out := c.GetBucketLifecycleConfigurationRequest(input)
 	err := req.Send()
@@ -964,6 +1193,8 @@ const opGetBucketLocation = "GetBucketLocation"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetBucketLocation for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -998,7 +1229,16 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque
 	return
 }
 
+// GetBucketLocation API operation for Amazon Simple Storage Service.
+//
 // Returns the region the bucket resides in.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLocation for usage and error information.
 func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) {
 	req, out := c.GetBucketLocationRequest(input)
 	err := req.Send()
@@ -1012,6 +1252,8 @@ const opGetBucketLogging = "GetBucketLogging"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetBucketLogging for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1046,8 +1288,17 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request
 	return
 }
 
+// GetBucketLogging API operation for Amazon Simple Storage Service.
+//
 // Returns the logging status of a bucket and the permissions users have to
 // view and modify that status. To use GET, you must be the bucket owner.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLogging for usage and error information.
 func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) {
 	req, out := c.GetBucketLoggingRequest(input)
 	err := req.Send()
@@ -1061,6 +1312,8 @@ const opGetBucketNotification = "GetBucketNotification"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetBucketNotification for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1098,7 +1351,16 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat
 	return
 }
 
+// GetBucketNotification API operation for Amazon Simple Storage Service.
+//
 // Deprecated, see the GetBucketNotificationConfiguration operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketNotification for usage and error information.
 func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) {
 	req, out := c.GetBucketNotificationRequest(input)
 	err := req.Send()
@@ -1112,6 +1374,8 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetBucketNotificationConfiguration for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1146,7 +1410,16 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat
 	return
 }
 
+// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service.
+//
 // Returns the notification configuration of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketNotificationConfiguration for usage and error information.
 func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) {
 	req, out := c.GetBucketNotificationConfigurationRequest(input)
 	err := req.Send()
@@ -1160,6 +1433,8 @@ const opGetBucketPolicy = "GetBucketPolicy"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetBucketPolicy for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1194,7 +1469,16 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R
 	return
 }
 
+// GetBucketPolicy API operation for Amazon Simple Storage Service.
+//
 // Returns the policy of a specified bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketPolicy for usage and error information.
 func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) {
 	req, out := c.GetBucketPolicyRequest(input)
 	err := req.Send()
@@ -1208,6 +1492,8 @@ const opGetBucketReplication = "GetBucketReplication"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetBucketReplication for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1242,7 +1528,16 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req
 	return
 }
 
+// GetBucketReplication API operation for Amazon Simple Storage Service.
+//
 // Returns the replication configuration of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketReplication for usage and error information.
 func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) {
 	req, out := c.GetBucketReplicationRequest(input)
 	err := req.Send()
@@ -1256,6 +1551,8 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetBucketRequestPayment for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1290,7 +1587,16 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput)
 	return
 }
 
+// GetBucketRequestPayment API operation for Amazon Simple Storage Service.
+//
 // Returns the request payment configuration of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketRequestPayment for usage and error information.
 func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) {
 	req, out := c.GetBucketRequestPaymentRequest(input)
 	err := req.Send()
@@ -1304,6 +1610,8 @@ const opGetBucketTagging = "GetBucketTagging"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetBucketTagging for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1338,7 +1646,16 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request
 	return
 }
 
+// GetBucketTagging API operation for Amazon Simple Storage Service.
+//
 // Returns the tag set associated with the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketTagging for usage and error information.
 func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) {
 	req, out := c.GetBucketTaggingRequest(input)
 	err := req.Send()
@@ -1352,6 +1669,8 @@ const opGetBucketVersioning = "GetBucketVersioning"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetBucketVersioning for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1386,7 +1705,16 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r
 	return
 }
 
+// GetBucketVersioning API operation for Amazon Simple Storage Service.
+//
 // Returns the versioning state of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketVersioning for usage and error information.
 func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) {
 	req, out := c.GetBucketVersioningRequest(input)
 	err := req.Send()
@@ -1400,6 +1728,8 @@ const opGetBucketWebsite = "GetBucketWebsite"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetBucketWebsite for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1434,7 +1764,16 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request
 	return
 }
 
+// GetBucketWebsite API operation for Amazon Simple Storage Service.
+//
 // Returns the website configuration for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketWebsite for usage and error information.
 func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) {
 	req, out := c.GetBucketWebsiteRequest(input)
 	err := req.Send()
@@ -1448,6 +1787,8 @@ const opGetObject = "GetObject"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetObject for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1482,7 +1823,21 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp
 	return
 }
 
+// GetObject API operation for Amazon Simple Storage Service.
+//
 // Retrieves objects from Amazon S3.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObject for usage and error information.
+//
+// Returned Error Codes:
+//   * NoSuchKey
+//   The specified key does not exist.
+//
 func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) {
 	req, out := c.GetObjectRequest(input)
 	err := req.Send()
@@ -1496,6 +1851,8 @@ const opGetObjectAcl = "GetObjectAcl"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetObjectAcl for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1530,7 +1887,21 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request
 	return
 }
 
+// GetObjectAcl API operation for Amazon Simple Storage Service.
+//
 // Returns the access control list (ACL) of an object.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObjectAcl for usage and error information.
+//
+// Returned Error Codes:
+//   * NoSuchKey
+//   The specified key does not exist.
+//
 func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) {
 	req, out := c.GetObjectAclRequest(input)
 	err := req.Send()
@@ -1544,6 +1915,8 @@ const opGetObjectTorrent = "GetObjectTorrent"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetObjectTorrent for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1578,7 +1951,16 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request
 	return
 }
 
+// GetObjectTorrent API operation for Amazon Simple Storage Service.
+//
 // Return torrent files from a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObjectTorrent for usage and error information.
 func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) {
 	req, out := c.GetObjectTorrentRequest(input)
 	err := req.Send()
@@ -1592,6 +1974,8 @@ const opHeadBucket = "HeadBucket"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See HeadBucket for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1628,8 +2012,22 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou
 	return
 }
 
+// HeadBucket API operation for Amazon Simple Storage Service.
+//
 // This operation is useful to determine if a bucket exists and you have permission
 // to access it.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation HeadBucket for usage and error information.
+//
+// Returned Error Codes:
+//   * NoSuchBucket
+//   The specified bucket does not exist.
+//
 func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) {
 	req, out := c.HeadBucketRequest(input)
 	err := req.Send()
@@ -1643,6 +2041,8 @@ const opHeadObject = "HeadObject"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See HeadObject for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1677,9 +2077,23 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou
 	return
 }
 
+// HeadObject API operation for Amazon Simple Storage Service.
+//
 // The HEAD operation retrieves metadata from an object without returning the
 // object itself. This operation is useful if you're only interested in an object's
 // metadata. To use HEAD, you must have READ access to the object.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation HeadObject for usage and error information.
+//
+// Returned Error Codes:
+//   * NoSuchKey
+//   The specified key does not exist.
+//
 func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) {
 	req, out := c.HeadObjectRequest(input)
 	err := req.Send()
@@ -1693,6 +2107,8 @@ const opListBuckets = "ListBuckets"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See ListBuckets for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1727,7 +2143,16 @@ func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request,
 	return
 }
 
+// ListBuckets API operation for Amazon Simple Storage Service.
+//
 // Returns a list of all buckets owned by the authenticated sender of the request.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBuckets for usage and error information.
 func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) {
 	req, out := c.ListBucketsRequest(input)
 	err := req.Send()
@@ -1741,6 +2166,8 @@ const opListMultipartUploads = "ListMultipartUploads"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See ListMultipartUploads for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1781,7 +2208,16 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req
 	return
 }
 
+// ListMultipartUploads API operation for Amazon Simple Storage Service.
+//
 // This operation lists in-progress multipart uploads.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListMultipartUploads for usage and error information.
 func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) {
 	req, out := c.ListMultipartUploadsRequest(input)
 	err := req.Send()
@@ -1820,6 +2256,8 @@ const opListObjectVersions = "ListObjectVersions"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See ListObjectVersions for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1860,7 +2298,16 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req
 	return
 }
 
+// ListObjectVersions API operation for Amazon Simple Storage Service.
+//
 // Returns metadata about all of the versions of objects in a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListObjectVersions for usage and error information.
 func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) {
 	req, out := c.ListObjectVersionsRequest(input)
 	err := req.Send()
@@ -1899,6 +2346,8 @@ const opListObjects = "ListObjects"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See ListObjects for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -1939,9 +2388,23 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request,
 	return
 }
 
+// ListObjects API operation for Amazon Simple Storage Service.
+//
 // Returns some or all (up to 1000) of the objects in a bucket. You can use
 // the request parameters as selection criteria to return a subset of the objects
 // in a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListObjects for usage and error information.
+//
+// Returned Error Codes:
+//   * NoSuchBucket
+//   The specified bucket does not exist.
+//
 func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) {
 	req, out := c.ListObjectsRequest(input)
 	err := req.Send()
@@ -1980,6 +2443,8 @@ const opListObjectsV2 = "ListObjectsV2"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See ListObjectsV2 for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2020,10 +2485,24 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque
 	return
 }
 
+// ListObjectsV2 API operation for Amazon Simple Storage Service.
+//
 // Returns some or all (up to 1000) of the objects in a bucket. You can use
 // the request parameters as selection criteria to return a subset of the objects
 // in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend
 // you use this revised API for new application development.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListObjectsV2 for usage and error information.
+//
+// Returned Error Codes:
+//   * NoSuchBucket
+//   The specified bucket does not exist.
+//
 func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) {
 	req, out := c.ListObjectsV2Request(input)
 	err := req.Send()
@@ -2062,6 +2541,8 @@ const opListParts = "ListParts"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See ListParts for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2102,7 +2583,16 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp
 	return
 }
 
+// ListParts API operation for Amazon Simple Storage Service.
+//
 // Lists the parts that have been uploaded for a specific multipart upload.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListParts for usage and error information.
 func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) {
 	req, out := c.ListPartsRequest(input)
 	err := req.Send()
@@ -2141,6 +2631,8 @@ const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See PutBucketAccelerateConfiguration for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2177,7 +2669,16 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC
 	return
 }
 
+// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service.
+//
 // Sets the accelerate configuration of an existing bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketAccelerateConfiguration for usage and error information.
 func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) {
 	req, out := c.PutBucketAccelerateConfigurationRequest(input)
 	err := req.Send()
@@ -2191,6 +2692,8 @@ const opPutBucketAcl = "PutBucketAcl"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See PutBucketAcl for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2227,7 +2730,16 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request
 	return
 }
 
+// PutBucketAcl API operation for Amazon Simple Storage Service.
+//
 // Sets the permissions on a bucket using access control lists (ACL).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketAcl for usage and error information.
 func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) {
 	req, out := c.PutBucketAclRequest(input)
 	err := req.Send()
@@ -2241,6 +2753,8 @@ const opPutBucketCors = "PutBucketCors"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See PutBucketCors for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2277,7 +2791,16 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque
 	return
 }
 
+// PutBucketCors API operation for Amazon Simple Storage Service.
+//
 // Sets the cors configuration for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketCors for usage and error information.
 func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) {
 	req, out := c.PutBucketCorsRequest(input)
 	err := req.Send()
@@ -2291,6 +2814,8 @@ const opPutBucketLifecycle = "PutBucketLifecycle"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See PutBucketLifecycle for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2330,7 +2855,16 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req
 	return
 }
 
+// PutBucketLifecycle API operation for Amazon Simple Storage Service.
+//
 // Deprecated, see the PutBucketLifecycleConfiguration operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketLifecycle for usage and error information.
 func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) {
 	req, out := c.PutBucketLifecycleRequest(input)
 	err := req.Send()
@@ -2344,6 +2878,8 @@ const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See PutBucketLifecycleConfiguration for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2380,8 +2916,17 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon
 	return
 }
 
+// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service.
+//
 // Sets lifecycle configuration for your bucket. If a lifecycle configuration
 // exists, it replaces it.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketLifecycleConfiguration for usage and error information.
 func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) {
 	req, out := c.PutBucketLifecycleConfigurationRequest(input)
 	err := req.Send()
@@ -2395,6 +2940,8 @@ const opPutBucketLogging = "PutBucketLogging"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See PutBucketLogging for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2431,9 +2978,18 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request
 	return
 }
 
+// PutBucketLogging API operation for Amazon Simple Storage Service.
+//
 // Set the logging parameters for a bucket and to specify permissions for who
 // can view and modify the logging parameters. To set the logging status of
 // a bucket, you must be the bucket owner.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketLogging for usage and error information.
 func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) {
 	req, out := c.PutBucketLoggingRequest(input)
 	err := req.Send()
@@ -2447,6 +3003,8 @@ const opPutBucketNotification = "PutBucketNotification"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See PutBucketNotification for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2486,7 +3044,16 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re
 	return
 }
 
+// PutBucketNotification API operation for Amazon Simple Storage Service.
+//
 // Deprecated, see the PutBucketNotificationConfiguraiton operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketNotification for usage and error information.
 func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) {
 	req, out := c.PutBucketNotificationRequest(input)
 	err := req.Send()
@@ -2500,6 +3067,8 @@ const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See PutBucketNotificationConfiguration for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2536,7 +3105,16 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat
 	return
 }
 
+// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service.
+//
 // Enables notifications of specified events for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketNotificationConfiguration for usage and error information.
 func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) {
 	req, out := c.PutBucketNotificationConfigurationRequest(input)
 	err := req.Send()
@@ -2550,6 +3128,8 @@ const opPutBucketPolicy = "PutBucketPolicy"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See PutBucketPolicy for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2586,8 +3166,17 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R
 	return
 }
 
+// PutBucketPolicy API operation for Amazon Simple Storage Service.
+//
 // Replaces a policy on a bucket. If the bucket already has a policy, the one
 // in this request completely replaces it.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketPolicy for usage and error information.
 func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) {
 	req, out := c.PutBucketPolicyRequest(input)
 	err := req.Send()
@@ -2601,6 +3190,8 @@ const opPutBucketReplication = "PutBucketReplication"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See PutBucketReplication for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2637,8 +3228,17 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req
 	return
 }
 
+// PutBucketReplication API operation for Amazon Simple Storage Service.
+//
 // Creates a new replication configuration (or replaces an existing one, if
 // present).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketReplication for usage and error information.
 func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) {
 	req, out := c.PutBucketReplicationRequest(input)
 	err := req.Send()
@@ -2652,6 +3252,8 @@ const opPutBucketRequestPayment = "PutBucketRequestPayment"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See PutBucketRequestPayment for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2688,11 +3290,20 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput)
 	return
 }
 
+// PutBucketRequestPayment API operation for Amazon Simple Storage Service.
+//
 // Sets the request payment configuration for a bucket. By default, the bucket
 // owner pays for downloads from the bucket. This configuration parameter enables
 // the bucket owner (only) to specify that the person requesting the download
 // will be charged for the download. Documentation on requester pays buckets
 // can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketRequestPayment for usage and error information.
 func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) {
 	req, out := c.PutBucketRequestPaymentRequest(input)
 	err := req.Send()
@@ -2706,6 +3317,8 @@ const opPutBucketTagging = "PutBucketTagging"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See PutBucketTagging for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2742,7 +3355,16 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request
 	return
 }
 
+// PutBucketTagging API operation for Amazon Simple Storage Service.
+//
 // Sets the tags for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketTagging for usage and error information.
 func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) {
 	req, out := c.PutBucketTaggingRequest(input)
 	err := req.Send()
@@ -2756,6 +3378,8 @@ const opPutBucketVersioning = "PutBucketVersioning"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See PutBucketVersioning for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2792,8 +3416,17 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r
 	return
 }
 
+// PutBucketVersioning API operation for Amazon Simple Storage Service.
+//
 // Sets the versioning state of an existing bucket. To set the versioning state,
 // you must be the bucket owner.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketVersioning for usage and error information.
 func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) {
 	req, out := c.PutBucketVersioningRequest(input)
 	err := req.Send()
@@ -2807,6 +3440,8 @@ const opPutBucketWebsite = "PutBucketWebsite"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See PutBucketWebsite for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2843,7 +3478,16 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request
 	return
 }
 
+// PutBucketWebsite API operation for Amazon Simple Storage Service.
+//
 // Set the website configuration for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketWebsite for usage and error information.
 func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) {
 	req, out := c.PutBucketWebsiteRequest(input)
 	err := req.Send()
@@ -2857,6 +3501,8 @@ const opPutObject = "PutObject"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See PutObject for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2891,7 +3537,16 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp
 	return
 }
 
+// PutObject API operation for Amazon Simple Storage Service.
+//
 // Adds an object to a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObject for usage and error information.
 func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) {
 	req, out := c.PutObjectRequest(input)
 	err := req.Send()
@@ -2905,6 +3560,8 @@ const opPutObjectAcl = "PutObjectAcl"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See PutObjectAcl for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2939,8 +3596,22 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request
 	return
 }
 
+// PutObjectAcl API operation for Amazon Simple Storage Service.
+//
 // uses the acl subresource to set the access control list (ACL) permissions
 // for an object that already exists in a bucket
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObjectAcl for usage and error information.
+//
+// Returned Error Codes:
+//   * NoSuchKey
+//   The specified key does not exist.
+//
 func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) {
 	req, out := c.PutObjectAclRequest(input)
 	err := req.Send()
@@ -2954,6 +3625,8 @@ const opRestoreObject = "RestoreObject"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See RestoreObject for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -2988,7 +3661,21 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque
 	return
 }
 
+// RestoreObject API operation for Amazon Simple Storage Service.
+//
 // Restores an archived copy of an object back into Amazon S3
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation RestoreObject for usage and error information.
+//
+// Returned Error Codes:
+//   * ObjectAlreadyInActiveTierError
+//   This operation is not allowed against this storage tier
+//
 func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) {
 	req, out := c.RestoreObjectRequest(input)
 	err := req.Send()
@@ -3002,6 +3689,8 @@ const opUploadPart = "UploadPart"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See UploadPart for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -3036,6 +3725,8 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou
 	return
 }
 
+// UploadPart API operation for Amazon Simple Storage Service.
+//
 // Uploads a part in a multipart upload.
 //
 // Note: After you initiate multipart upload and upload one or more parts, you
@@ -3043,6 +3734,13 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou
 // for storage of the uploaded parts. Only after you either complete or abort
 // multipart upload, Amazon S3 frees up the parts storage and stops charging
 // you for the parts storage.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation UploadPart for usage and error information.
 func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) {
 	req, out := c.UploadPartRequest(input)
 	err := req.Send()
@@ -3056,6 +3754,8 @@ const opUploadPartCopy = "UploadPartCopy"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See UploadPartCopy for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -3090,7 +3790,16 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req
 	return
 }
 
+// UploadPartCopy API operation for Amazon Simple Storage Service.
+//
 // Uploads a part by copying data from an existing object as data source.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation UploadPartCopy for usage and error information.
 func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) {
 	req, out := c.UploadPartCopyRequest(input)
 	err := req.Send()
@@ -4778,7 +5487,6 @@ type FilterRule struct {
 	// the filtering rule applies. Maximum prefix length can be up to 1,024 characters.
 	// Overlapping prefixes and suffixes are not supported. For more information,
 	// go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
-	// in the Amazon Simple Storage Service Developer Guide.
 	Name *string `type:"string" enum:"FilterRuleName"`
 
 	Value *string `type:"string"`
@@ -6202,7 +6910,6 @@ type LambdaFunctionConfiguration struct {
 
 	// Container for object key name filtering rules. For information about key
 	// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
-	// in the Amazon Simple Storage Service Developer Guide.
 	Filter *NotificationConfigurationFilter `type:"structure"`
 
 	// Optional unique identifier for configurations in a notification configuration.
@@ -7052,8 +7759,7 @@ type NoncurrentVersionExpiration struct {
 	// Specifies the number of days an object is noncurrent before Amazon S3 can
 	// perform the associated action. For information about the noncurrent days
 	// calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
-	// (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in
-	// the Amazon Simple Storage Service Developer Guide.
+	// (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
 	NoncurrentDays *int64 `type:"integer"`
 }
 
@@ -7078,8 +7784,7 @@ type NoncurrentVersionTransition struct {
 	// Specifies the number of days an object is noncurrent before Amazon S3 can
 	// perform the associated action. For information about the noncurrent days
 	// calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
-	// (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in
-	// the Amazon Simple Storage Service Developer Guide.
+	// (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
 	NoncurrentDays *int64 `type:"integer"`
 
 	// The class of storage used to store the object.
@@ -7180,7 +7885,6 @@ func (s NotificationConfigurationDeprecated) GoString() string {
 
 // Container for object key name filtering rules. For information about key
 // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
-// in the Amazon Simple Storage Service Developer Guide.
 type NotificationConfigurationFilter struct {
 	_ struct{} `type:"structure"`
 
@@ -8393,7 +9097,6 @@ type QueueConfiguration struct {
 
 	// Container for object key name filtering rules. For information about key
 	// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
-	// in the Amazon Simple Storage Service Developer Guide.
 	Filter *NotificationConfigurationFilter `type:"structure"`
 
 	// Optional unique identifier for configurations in a notification configuration.
@@ -9011,7 +9714,6 @@ type TopicConfiguration struct {
 
 	// Container for object key name filtering rules. For information about key
 	// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
-	// in the Amazon Simple Storage Service Developer Guide.
 	Filter *NotificationConfigurationFilter `type:"structure"`
 
 	// Optional unique identifier for configurations in a notification configuration.
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
index ccbf5cc1ab737389e0aa2f52e5b5b433d359a432..f05d1eae9a15f42b060b9a0762f0dea56b84fbbf 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
@@ -1,6 +1,7 @@
 package s3
 
 import (
+	"bytes"
 	"fmt"
 	"net/url"
 	"regexp"
@@ -37,14 +38,6 @@ var accelerateOpBlacklist = operationBlacklist{
 func updateEndpointForS3Config(r *request.Request) {
 	forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
 	accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
-	useDualStack := aws.BoolValue(r.Config.UseDualStack)
-
-	if useDualStack && accelerate {
-		r.Error = awserr.New("InvalidParameterException",
-			fmt.Sprintf("configuration aws.Config.UseDualStack is not compatible with aws.Config.Accelerate"),
-			nil)
-		return
-	}
 
 	if accelerate && accelerateOpBlacklist.Continue(r) {
 		if forceHostStyle {
@@ -75,6 +68,10 @@ func updateEndpointForHostStyle(r *request.Request) {
 	moveBucketToHost(r.HTTPRequest.URL, bucket)
 }
 
+var (
+	accelElem = []byte("s3-accelerate.dualstack.")
+)
+
 func updateEndpointForAccelerate(r *request.Request) {
 	bucket, ok := bucketNameFromReqParams(r.Params)
 	if !ok {
@@ -93,6 +90,22 @@ func updateEndpointForAccelerate(r *request.Request) {
 
 	// Change endpoint from s3(-[a-z0-1-])?.amazonaws.com to s3-accelerate.amazonaws.com
 	r.HTTPRequest.URL.Host = replaceHostRegion(r.HTTPRequest.URL.Host, "accelerate")
+
+	if aws.BoolValue(r.Config.UseDualStack) {
+		host := []byte(r.HTTPRequest.URL.Host)
+
+		// Strip region from hostname
+		if idx := bytes.Index(host, accelElem); idx >= 0 {
+			start := idx + len(accelElem)
+			if end := bytes.IndexByte(host[start:], '.'); end >= 0 {
+				end += start + 1
+				copy(host[start:], host[end:])
+				host = host[:len(host)-(end-start)]
+				r.HTTPRequest.URL.Host = string(host)
+			}
+		}
+	}
+
 	moveBucketToHost(r.HTTPRequest.URL, bucket)
 }
 
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
index d183fab877123ef2c53a64c81d4e0a0ccb8960f1..e10ca8f26bdffe4510a66a753817aeab0033b71e 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -17,6 +17,8 @@ const opAssumeRole = "AssumeRole"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See AssumeRole for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -51,6 +53,8 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
 	return
 }
 
+// AssumeRole API operation for AWS Security Token Service.
+//
 // Returns a set of temporary security credentials (consisting of an access
 // key ID, a secret access key, and a security token) that you can use to access
 // AWS resources that you might not normally have access to. Typically, you
@@ -60,7 +64,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
 // and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
 // in the IAM User Guide.
 //
-//  Important: You cannot call AssumeRole by using AWS root account credentials;
+// Important: You cannot call AssumeRole by using AWS root account credentials;
 // access is denied. You must use credentials for an IAM user or an IAM role
 // to call AssumeRole.
 //
@@ -89,18 +93,18 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
 // when calling AssumeRole, which can be from 900 seconds (15 minutes) to a
 // maximum of 3600 seconds (1 hour). The default is 1 hour.
 //
-// The temporary security credentials created by AssumeRole can be used to
-// make API calls to any AWS service with the following exception: you cannot
-// call the STS service's GetFederationToken or GetSessionToken APIs.
+// The temporary security credentials created by AssumeRole can be used to make
+// API calls to any AWS service with the following exception: you cannot call
+// the STS service's GetFederationToken or GetSessionToken APIs.
 //
-// Optionally, you can pass an IAM access policy to this operation. If you
-// choose not to pass a policy, the temporary security credentials that are
-// returned by the operation have the permissions that are defined in the access
-// policy of the role that is being assumed. If you pass a policy to this operation,
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
 // the temporary security credentials that are returned by the operation have
 // the permissions that are allowed by both the access policy of the role that
-// is being assumed,  and  the policy that you pass. This gives you a way to
-// further restrict the permissions for the resulting temporary security credentials.
+// is being assumed, and the policy that you pass. This gives you a way to further
+// restrict the permissions for the resulting temporary security credentials.
 // You cannot use the passed policy to grant permissions that are in excess
 // of those allowed by the access policy of the role that is being assumed.
 // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
@@ -120,7 +124,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
 // a policy to the user (identical to the previous different account user),
 // or you can add the user as a principal directly in the role's trust policy
 //
-//  Using MFA with AssumeRole
+// Using MFA with AssumeRole
 //
 // You can optionally include multi-factor authentication (MFA) information
 // when you call AssumeRole. This is useful for cross-account scenarios in which
@@ -131,7 +135,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
 // denied. The condition in a trust policy that tests for MFA authentication
 // might look like the following example.
 //
-//  "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
+// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
 //
 // For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
 // in the IAM User Guide guide.
@@ -140,6 +144,31 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
 // parameters. The SerialNumber value identifies the user's hardware or virtual
 // MFA device. The TokenCode is the time-based one-time password (TOTP) that
 // the MFA devices produces.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRole for usage and error information.
+//
+// Returned Error Codes:
+//   * MalformedPolicyDocument
+//   The request was rejected because the policy document was malformed. The error
+//   message describes the specific error.
+//
+//   * PackedPolicyTooLarge
+//   The request was rejected because the policy document was too large. The error
+//   message describes how big the policy document is, in packed form, as a percentage
+//   of what the API allows.
+//
+//   * RegionDisabledException
+//   STS is not activated in the requested region for the account that is being
+//   asked to generate credentials. The account administrator must use the IAM
+//   console to activate STS in that region. For more information, see Activating
+//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+//   in the IAM User Guide.
+//
 func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
 	req, out := c.AssumeRoleRequest(input)
 	err := req.Send()
@@ -153,6 +182,8 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See AssumeRoleWithSAML for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -187,6 +218,8 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
 	return
 }
 
+// AssumeRoleWithSAML API operation for AWS Security Token Service.
+//
 // Returns a set of temporary security credentials for users who have been authenticated
 // via a SAML authentication response. This operation provides a mechanism for
 // tying an enterprise identity store or directory to role-based AWS access
@@ -206,17 +239,17 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
 // can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour).
 // The default is 1 hour.
 //
-// The temporary security credentials created by AssumeRoleWithSAML can be
-// used to make API calls to any AWS service with the following exception: you
-// cannot call the STS service's GetFederationToken or GetSessionToken APIs.
+// The temporary security credentials created by AssumeRoleWithSAML can be used
+// to make API calls to any AWS service with the following exception: you cannot
+// call the STS service's GetFederationToken or GetSessionToken APIs.
 //
-// Optionally, you can pass an IAM access policy to this operation. If you
-// choose not to pass a policy, the temporary security credentials that are
-// returned by the operation have the permissions that are defined in the access
-// policy of the role that is being assumed. If you pass a policy to this operation,
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
 // the temporary security credentials that are returned by the operation have
 // the permissions that are allowed by the intersection of both the access policy
-// of the role that is being assumed,  and  the policy that you pass. This means
+// of the role that is being assumed, and the policy that you pass. This means
 // that both policies must grant the permission for the action to be allowed.
 // This gives you a way to further restrict the permissions for the resulting
 // temporary security credentials. You cannot use the passed policy to grant
@@ -225,8 +258,8 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
 // AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
 // in the IAM User Guide.
 //
-// Before your application can call AssumeRoleWithSAML, you must configure
-// your SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
+// Before your application can call AssumeRoleWithSAML, you must configure your
+// SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
 // you must use AWS Identity and Access Management (IAM) to create a SAML provider
 // entity in your AWS account that represents your identity provider, and create
 // an IAM role that specifies this SAML provider in its trust policy.
@@ -235,25 +268,65 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
 // The identity of the caller is validated by using keys in the metadata document
 // that is uploaded for the SAML provider entity for your identity provider.
 //
-//  Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail
+// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail
 // logs. The entry includes the value in the NameID element of the SAML assertion.
 // We recommend that you use a NameIDType that is not associated with any personally
 // identifiable information (PII). For example, you could instead use the Persistent
 // Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).
 //
-//  For more information, see the following resources:
+// For more information, see the following resources:
 //
-//    About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
-// in the IAM User Guide.
+//    * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
+//    in the IAM User Guide.
 //
-//    Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
-// in the IAM User Guide.
+//    * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
+//    in the IAM User Guide.
 //
-//    Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
-// in the IAM User Guide.
+//    * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
+//    in the IAM User Guide.
+//
+//    * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
+//    in the IAM User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRoleWithSAML for usage and error information.
+//
+// Returned Error Codes:
+//   * MalformedPolicyDocument
+//   The request was rejected because the policy document was malformed. The error
+//   message describes the specific error.
+//
+//   * PackedPolicyTooLarge
+//   The request was rejected because the policy document was too large. The error
+//   message describes how big the policy document is, in packed form, as a percentage
+//   of what the API allows.
+//
+//   * IDPRejectedClaim
+//   The identity provider (IdP) reported that authentication failed. This might
+//   be because the claim is invalid.
+//
+//   If this error is returned for the AssumeRoleWithWebIdentity operation, it
+//   can also mean that the claim has expired or has been explicitly revoked.
+//
+//   * InvalidIdentityToken
+//   The web identity token that was passed could not be validated by AWS. Get
+//   a new identity token from the identity provider and then retry the request.
+//
+//   * ExpiredTokenException
+//   The web identity token that was passed is expired or is not valid. Get a
+//   new identity token from the identity provider and then retry the request.
+//
+//   * RegionDisabledException
+//   STS is not activated in the requested region for the account that is being
+//   asked to generate credentials. The account administrator must use the IAM
+//   console to activate STS in that region. For more information, see Activating
+//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+//   in the IAM User Guide.
 //
-//    Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
-// in the IAM User Guide.
 func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
 	req, out := c.AssumeRoleWithSAMLRequest(input)
 	err := req.Send()
@@ -267,6 +340,8 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See AssumeRoleWithWebIdentity for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -301,13 +376,15 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
 	return
 }
 
+// AssumeRoleWithWebIdentity API operation for AWS Security Token Service.
+//
 // Returns a set of temporary security credentials for users who have been authenticated
 // in a mobile or web application with a web identity provider, such as Amazon
 // Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible
 // identity provider.
 //
-//  For mobile applications, we recommend that you use Amazon Cognito. You
-// can use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/)
+// For mobile applications, we recommend that you use Amazon Cognito. You can
+// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/)
 // and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely
 // identify a user and supply the user with a consistent identity throughout
 // the lifetime of an application.
@@ -317,7 +394,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
 // (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
 // in the AWS SDK for iOS Developer Guide.
 //
-//  Calling AssumeRoleWithWebIdentity does not require the use of AWS security
+// Calling AssumeRoleWithWebIdentity does not require the use of AWS security
 // credentials. Therefore, you can distribute an application (for example, on
 // mobile devices) that requests temporary security credentials without including
 // long-term AWS credentials in the application, and without deploying server-based
@@ -336,18 +413,18 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
 // AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to
 // a maximum of 3600 seconds (1 hour). The default is 1 hour.
 //
-// The temporary security credentials created by AssumeRoleWithWebIdentity
-// can be used to make API calls to any AWS service with the following exception:
+// The temporary security credentials created by AssumeRoleWithWebIdentity can
+// be used to make API calls to any AWS service with the following exception:
 // you cannot call the STS service's GetFederationToken or GetSessionToken APIs.
 //
-// Optionally, you can pass an IAM access policy to this operation. If you
-// choose not to pass a policy, the temporary security credentials that are
-// returned by the operation have the permissions that are defined in the access
-// policy of the role that is being assumed. If you pass a policy to this operation,
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
 // the temporary security credentials that are returned by the operation have
 // the permissions that are allowed by both the access policy of the role that
-// is being assumed,  and  the policy that you pass. This gives you a way to
-// further restrict the permissions for the resulting temporary security credentials.
+// is being assumed, and the policy that you pass. This gives you a way to further
+// restrict the permissions for the resulting temporary security credentials.
 // You cannot use the passed policy to grant permissions that are in excess
 // of those allowed by the access policy of the role that is being assumed.
 // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
@@ -360,32 +437,83 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
 // the identity provider that is associated with the identity token. In other
 // words, the identity provider must be specified in the role's trust policy.
 //
-//  Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail
+// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail
 // logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims)
 // of the provided Web Identity Token. We recommend that you avoid using any
 // personally identifiable information (PII) in this field. For example, you
 // could instead use a GUID or a pairwise identifier, as suggested in the OIDC
 // specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes).
 //
-//  For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
+// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
 // API, see the following resources:
 //
-//    Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual)
-// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//    * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual)
+//    and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+//
+//    *  Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
+//    This interactive website lets you walk through the process of authenticating
+//    via Login with Amazon, Facebook, or Google, getting temporary security
+//    credentials, and then using those credentials to make a request to AWS.
+//
+//
+//    * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android
+//    (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample
+//    apps that show how to invoke the identity providers, and then how to use
+//    the information from these providers to get and use temporary security
+//    credentials.
+//
+//    * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313).
+//    This article discusses web identity federation and shows an example of
+//    how to use web identity federation to get access to content in Amazon
+//    S3.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
 //
-//     Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
-// This interactive website lets you walk through the process of authenticating
-// via Login with Amazon, Facebook, or Google, getting temporary security credentials,
-// and then using those credentials to make a request to AWS.
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRoleWithWebIdentity for usage and error information.
 //
-//    AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android
-// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample apps
-// that show how to invoke the identity providers, and then how to use the information
-// from these providers to get and use temporary security credentials.
+// Returned Error Codes:
+//   * MalformedPolicyDocument
+//   The request was rejected because the policy document was malformed. The error
+//   message describes the specific error.
+//
+//   * PackedPolicyTooLarge
+//   The request was rejected because the policy document was too large. The error
+//   message describes how big the policy document is, in packed form, as a percentage
+//   of what the API allows.
+//
+//   * IDPRejectedClaim
+//   The identity provider (IdP) reported that authentication failed. This might
+//   be because the claim is invalid.
+//
+//   If this error is returned for the AssumeRoleWithWebIdentity operation, it
+//   can also mean that the claim has expired or has been explicitly revoked.
+//
+//   * IDPCommunicationError
+//   The request could not be fulfilled because the non-AWS identity provider
+//   (IDP) that was asked to verify the incoming identity token could not be reached.
+//   This is often a transient error caused by network conditions. Retry the request
+//   a limited number of times so that you don't exceed the request rate. If the
+//   error persists, the non-AWS identity provider might be down or not responding.
+//
+//   * InvalidIdentityToken
+//   The web identity token that was passed could not be validated by AWS. Get
+//   a new identity token from the identity provider and then retry the request.
+//
+//   * ExpiredTokenException
+//   The web identity token that was passed is expired or is not valid. Get a
+//   new identity token from the identity provider and then retry the request.
+//
+//   * RegionDisabledException
+//   STS is not activated in the requested region for the account that is being
+//   asked to generate credentials. The account administrator must use the IAM
+//   console to activate STS in that region. For more information, see Activating
+//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+//   in the IAM User Guide.
 //
-//    Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313).
-// This article discusses web identity federation and shows an example of how
-// to use web identity federation to get access to content in Amazon S3.
 func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
 	req, out := c.AssumeRoleWithWebIdentityRequest(input)
 	err := req.Send()
@@ -399,6 +527,8 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See DecodeAuthorizationMessage for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -433,6 +563,8 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag
 	return
 }
 
+// DecodeAuthorizationMessage API operation for AWS Security Token Service.
+//
 // Decodes additional information about the authorization status of a request
 // from an encoded message returned in response to an AWS request.
 //
@@ -441,30 +573,44 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag
 // (an HTTP 403 response). Some AWS actions additionally return an encoded message
 // that can provide details about this authorization failure.
 //
-//  Only certain AWS actions return an encoded authorization message. The documentation
+// Only certain AWS actions return an encoded authorization message. The documentation
 // for an individual action indicates whether that action returns an encoded
 // message in addition to returning an HTTP code.
 //
-//  The message is encoded because the details of the authorization status
-// can constitute privileged information that the user who requested the action
+// The message is encoded because the details of the authorization status can
+// constitute privileged information that the user who requested the action
 // should not see. To decode an authorization status message, a user must be
 // granted permissions via an IAM policy to request the DecodeAuthorizationMessage
 // (sts:DecodeAuthorizationMessage) action.
 //
 // The decoded message includes the following type of information:
 //
-//   Whether the request was denied due to an explicit deny or due to the absence
-// of an explicit allow. For more information, see Determining Whether a Request
-// is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
-// in the IAM User Guide.
+//    * Whether the request was denied due to an explicit deny or due to the
+//    absence of an explicit allow. For more information, see Determining Whether
+//    a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
+//    in the IAM User Guide.
+//
+//    * The principal who made the request.
+//
+//    * The requested action.
+//
+//    * The requested resource.
 //
-//   The principal who made the request.
+//    * The values of condition keys in the context of the user's request.
 //
-//   The requested action.
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
 //
-//   The requested resource.
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation DecodeAuthorizationMessage for usage and error information.
+//
+// Returned Error Codes:
+//   * InvalidAuthorizationMessageException
+//   The error returned if the message passed to DecodeAuthorizationMessage was
+//   invalid. This can happen if the token contains invalid characters, such as
+//   linebreaks.
 //
-//   The values of condition keys in the context of the user's request.
 func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
 	req, out := c.DecodeAuthorizationMessageRequest(input)
 	err := req.Send()
@@ -478,6 +624,8 @@ const opGetCallerIdentity = "GetCallerIdentity"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetCallerIdentity for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -512,8 +660,17 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ
 	return
 }
 
+// GetCallerIdentity API operation for AWS Security Token Service.
+//
 // Returns details about the IAM identity whose credentials are used to call
 // the API.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetCallerIdentity for usage and error information.
 func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) {
 	req, out := c.GetCallerIdentityRequest(input)
 	err := req.Send()
@@ -527,6 +684,8 @@ const opGetFederationToken = "GetFederationToken"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetFederationToken for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -561,6 +720,8 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
 	return
 }
 
+// GetFederationToken API operation for AWS Security Token Service.
+//
 // Returns a set of temporary security credentials (consisting of an access
 // key ID, a secret access key, and a security token) for a federated user.
 // A typical use is in a proxy application that gets temporary security credentials
@@ -573,20 +734,20 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
 // and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
 // in the IAM User Guide.
 //
-//   If you are creating a mobile-based or browser-based app that can authenticate
+// If you are creating a mobile-based or browser-based app that can authenticate
 // users using a web identity provider like Login with Amazon, Facebook, Google,
 // or an OpenID Connect-compatible identity provider, we recommend that you
 // use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
 // For more information, see Federation Through a Web-based Identity Provider
 // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
 //
-//  The GetFederationToken action must be called by using the long-term AWS
-// security credentials of an IAM user. You can also call GetFederationToken
-// using the security credentials of an AWS root account, but we do not recommended
-// it. Instead, we recommend that you create an IAM user for the purpose of
-// the proxy application and then attach a policy to the IAM user that limits
-// federated users to only the actions and resources that they need access to.
-// For more information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
+// The GetFederationToken action must be called by using the long-term AWS security
+// credentials of an IAM user. You can also call GetFederationToken using the
+// security credentials of an AWS root account, but we do not recommended it.
+// Instead, we recommend that you create an IAM user for the purpose of the
+// proxy application and then attach a policy to the IAM user that limits federated
+// users to only the actions and resources that they need access to. For more
+// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
 // in the IAM User Guide.
 //
 // The temporary security credentials that are obtained by using the long-term
@@ -595,30 +756,30 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
 // is 43200 seconds (12 hours). Temporary credentials that are obtained by using
 // AWS root account credentials have a maximum duration of 3600 seconds (1 hour).
 //
-// The temporary security credentials created by GetFederationToken can be
-// used to make API calls to any AWS service with the following exceptions:
+// The temporary security credentials created by GetFederationToken can be used
+// to make API calls to any AWS service with the following exceptions:
 //
-//   You cannot use these credentials to call any IAM APIs.
+//    * You cannot use these credentials to call any IAM APIs.
 //
-//   You cannot call any STS APIs.
+//    * You cannot call any STS APIs.
 //
-//    Permissions
+// Permissions
 //
 // The permissions for the temporary security credentials returned by GetFederationToken
 // are determined by a combination of the following:
 //
-//   The policy or policies that are attached to the IAM user whose credentials
-// are used to call GetFederationToken.
+//    * The policy or policies that are attached to the IAM user whose credentials
+//    are used to call GetFederationToken.
 //
-//   The policy that is passed as a parameter in the call.
+//    * The policy that is passed as a parameter in the call.
 //
-//   The passed policy is attached to the temporary security credentials that
+// The passed policy is attached to the temporary security credentials that
 // result from the GetFederationToken API call--that is, to the federated user.
 // When the federated user makes an AWS request, AWS evaluates the policy attached
 // to the federated user in combination with the policy or policies attached
 // to the IAM user whose credentials were used to call GetFederationToken. AWS
-// allows the federated user's request only when both the federated user  and
-//  the IAM user are explicitly allowed to perform the requested action. The
+// allows the federated user's request only when both the federated user and
+// the IAM user are explicitly allowed to perform the requested action. The
 // passed policy cannot grant more permissions than those that are defined in
 // the IAM user policy.
 //
@@ -639,6 +800,31 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
 // For information about using GetFederationToken to create temporary security
 // credentials, see GetFederationToken—Federation Through a Custom Identity
 // Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetFederationToken for usage and error information.
+//
+// Returned Error Codes:
+//   * MalformedPolicyDocument
+//   The request was rejected because the policy document was malformed. The error
+//   message describes the specific error.
+//
+//   * PackedPolicyTooLarge
+//   The request was rejected because the policy document was too large. The error
+//   message describes how big the policy document is, in packed form, as a percentage
+//   of what the API allows.
+//
+//   * RegionDisabledException
+//   STS is not activated in the requested region for the account that is being
+//   asked to generate credentials. The account administrator must use the IAM
+//   console to activate STS in that region. For more information, see Activating
+//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+//   in the IAM User Guide.
+//
 func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) {
 	req, out := c.GetFederationTokenRequest(input)
 	err := req.Send()
@@ -652,6 +838,8 @@ const opGetSessionToken = "GetSessionToken"
 // value can be used to capture response data after the request's "Send" method
 // is called.
 //
+// See GetSessionToken for usage and error information.
+//
 // Creating a request object using this method should be used when you want to inject
 // custom logic into the request's lifecycle using a custom handler, or if you want to
 // access properties on the request object before or after sending the request. If
@@ -686,6 +874,8 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
 	return
 }
 
+// GetSessionToken API operation for AWS Security Token Service.
+//
 // Returns a set of temporary credentials for an AWS account or IAM user. The
 // credentials consist of an access key ID, a secret access key, and a security
 // token. Typically, you use GetSessionToken if you want to use MFA to protect
@@ -711,17 +901,17 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
 // The temporary security credentials created by GetSessionToken can be used
 // to make API calls to any AWS service with the following exceptions:
 //
-//   You cannot call any IAM APIs unless MFA authentication information is
-// included in the request.
+//    * You cannot call any IAM APIs unless MFA authentication information is
+//    included in the request.
 //
-//   You cannot call any STS API except AssumeRole.
+//    * You cannot call any STS API exceptAssumeRole.
 //
-//    We recommend that you do not call GetSessionToken with root account credentials.
+// We recommend that you do not call GetSessionToken with root account credentials.
 // Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
 // by creating one or more IAM users, giving them the necessary permissions,
 // and using IAM users for everyday interaction with AWS.
 //
-//  The permissions associated with the temporary security credentials returned
+// The permissions associated with the temporary security credentials returned
 // by GetSessionToken are based on the permissions associated with account or
 // IAM user whose credentials are used to call the action. If GetSessionToken
 // is called using root account credentials, the temporary credentials have
@@ -732,6 +922,22 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
 // For more information about using GetSessionToken to create temporary credentials,
 // go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
 // in the IAM User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetSessionToken for usage and error information.
+//
+// Returned Error Codes:
+//   * RegionDisabledException
+//   STS is not activated in the requested region for the account that is being
+//   asked to generate credentials. The account administrator must use the IAM
+//   console to activate STS in that region. For more information, see Activating
+//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+//   in the IAM User Guide.
+//
 func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) {
 	req, out := c.GetSessionTokenRequest(input)
 	err := req.Send()
@@ -745,9 +951,9 @@ type AssumeRoleInput struct {
 	// seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
 	// to 3600 seconds.
 	//
-	//  This is separate from the duration of a console session that you might
-	// request using the returned credentials. The request to the federation endpoint
-	// for a console sign-in token takes a SessionDuration parameter that specifies
+	// This is separate from the duration of a console session that you might request
+	// using the returned credentials. The request to the federation endpoint for
+	// a console sign-in token takes a SessionDuration parameter that specifies
 	// the maximum length of the console session, separately from the DurationSeconds
 	// parameter on this API. For more information, see Creating a URL that Enables
 	// Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
@@ -789,7 +995,7 @@ type AssumeRoleInput struct {
 	// list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
 	// and carriage return (\u000D) characters.
 	//
-	//  The policy plain text must be 2048 bytes or shorter. However, an internal
+	// The policy plain text must be 2048 bytes or shorter. However, an internal
 	// conversion compresses it into a packed binary format with a separate limit.
 	// The PackedPolicySize response element indicates by percentage how close to
 	// the upper size limit the policy is, with 100% equaling the maximum allowed
@@ -903,10 +1109,10 @@ type AssumeRoleOutput struct {
 	// The temporary security credentials, which include an access key ID, a secret
 	// access key, and a security (or session) token.
 	//
-	//  Note: The size of the security token that STS APIs return is not fixed.
-	// We strongly recommend that you make no assumptions about the maximum size.
-	// As of this writing, the typical size is less than 4096 bytes, but that can
-	// vary. Also, future updates to AWS might require larger sizes.
+	// Note: The size of the security token that STS APIs return is not fixed. We
+	// strongly recommend that you make no assumptions about the maximum size. As
+	// of this writing, the typical size is less than 4096 bytes, but that can vary.
+	// Also, future updates to AWS might require larger sizes.
 	Credentials *Credentials `type:"structure"`
 
 	// A percentage value that indicates the size of the policy in packed form.
@@ -934,9 +1140,9 @@ type AssumeRoleWithSAMLInput struct {
 	// response's SessionNotOnOrAfter value. The actual expiration time is whichever
 	// value is shorter.
 	//
-	//  This is separate from the duration of a console session that you might
-	// request using the returned credentials. The request to the federation endpoint
-	// for a console sign-in token takes a SessionDuration parameter that specifies
+	// This is separate from the duration of a console session that you might request
+	// using the returned credentials. The request to the federation endpoint for
+	// a console sign-in token takes a SessionDuration parameter that specifies
 	// the maximum length of the console session, separately from the DurationSeconds
 	// parameter on this API. For more information, see Enabling SAML 2.0 Federated
 	// Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html)
@@ -948,8 +1154,8 @@ type AssumeRoleWithSAMLInput struct {
 	// The policy parameter is optional. If you pass a policy, the temporary security
 	// credentials that are returned by the operation have the permissions that
 	// are allowed by both the access policy of the role that is being assumed,
-	//  and  the policy that you pass. This gives you a way to further restrict
-	// the permissions for the resulting temporary security credentials. You cannot
+	// and the policy that you pass. This gives you a way to further restrict the
+	// permissions for the resulting temporary security credentials. You cannot
 	// use the passed policy to grant permissions that are in excess of those allowed
 	// by the access policy of the role that is being assumed. For more information,
 	// Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity
@@ -962,7 +1168,7 @@ type AssumeRoleWithSAMLInput struct {
 	// list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
 	// and carriage return (\u000D) characters.
 	//
-	//  The policy plain text must be 2048 bytes or shorter. However, an internal
+	// The policy plain text must be 2048 bytes or shorter. However, an internal
 	// conversion compresses it into a packed binary format with a separate limit.
 	// The PackedPolicySize response element indicates by percentage how close to
 	// the upper size limit the policy is, with 100% equaling the maximum allowed
@@ -982,8 +1188,7 @@ type AssumeRoleWithSAMLInput struct {
 
 	// The base-64 encoded SAML authentication response provided by the IdP.
 	//
-	// For more information, see Configuring a Relying Party and Adding Claims
-	// (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
+	// For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
 	// in the Using IAM guide.
 	//
 	// SAMLAssertion is a required field
@@ -1050,10 +1255,10 @@ type AssumeRoleWithSAMLOutput struct {
 	// The temporary security credentials, which include an access key ID, a secret
 	// access key, and a security (or session) token.
 	//
-	//  Note: The size of the security token that STS APIs return is not fixed.
-	// We strongly recommend that you make no assumptions about the maximum size.
-	// As of this writing, the typical size is less than 4096 bytes, but that can
-	// vary. Also, future updates to AWS might require larger sizes.
+	// Note: The size of the security token that STS APIs return is not fixed. We
+	// strongly recommend that you make no assumptions about the maximum size. As
+	// of this writing, the typical size is less than 4096 bytes, but that can vary.
+	// Also, future updates to AWS might require larger sizes.
 	Credentials *Credentials `type:"structure"`
 
 	// The value of the Issuer element of the SAML assertion.
@@ -1066,7 +1271,7 @@ type AssumeRoleWithSAMLOutput struct {
 	//
 	// The following pseudocode shows how the hash value is calculated:
 	//
-	//  BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP"
+	// BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP"
 	// ) )
 	NameQualifier *string `type:"string"`
 
@@ -1082,7 +1287,7 @@ type AssumeRoleWithSAMLOutput struct {
 	// element of the SAML assertion. Typical examples of the format are transient
 	// or persistent.
 	//
-	//  If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format,
+	// If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format,
 	// that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient
 	// is returned as transient. If the format includes any other prefix, the format
 	// is returned with no modifications.
@@ -1106,9 +1311,9 @@ type AssumeRoleWithWebIdentityInput struct {
 	// seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
 	// to 3600 seconds.
 	//
-	//  This is separate from the duration of a console session that you might
-	// request using the returned credentials. The request to the federation endpoint
-	// for a console sign-in token takes a SessionDuration parameter that specifies
+	// This is separate from the duration of a console session that you might request
+	// using the returned credentials. The request to the federation endpoint for
+	// a console sign-in token takes a SessionDuration parameter that specifies
 	// the maximum length of the console session, separately from the DurationSeconds
 	// parameter on this API. For more information, see Creating a URL that Enables
 	// Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
@@ -1120,8 +1325,8 @@ type AssumeRoleWithWebIdentityInput struct {
 	// The policy parameter is optional. If you pass a policy, the temporary security
 	// credentials that are returned by the operation have the permissions that
 	// are allowed by both the access policy of the role that is being assumed,
-	//  and  the policy that you pass. This gives you a way to further restrict
-	// the permissions for the resulting temporary security credentials. You cannot
+	// and the policy that you pass. This gives you a way to further restrict the
+	// permissions for the resulting temporary security credentials. You cannot
 	// use the passed policy to grant permissions that are in excess of those allowed
 	// by the access policy of the role that is being assumed. For more information,
 	// see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
@@ -1133,7 +1338,7 @@ type AssumeRoleWithWebIdentityInput struct {
 	// list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
 	// and carriage return (\u000D) characters.
 	//
-	//  The policy plain text must be 2048 bytes or shorter. However, an internal
+	// The policy plain text must be 2048 bytes or shorter. However, an internal
 	// conversion compresses it into a packed binary format with a separate limit.
 	// The PackedPolicySize response element indicates by percentage how close to
 	// the upper size limit the policy is, with 100% equaling the maximum allowed
@@ -1244,10 +1449,10 @@ type AssumeRoleWithWebIdentityOutput struct {
 	// The temporary security credentials, which include an access key ID, a secret
 	// access key, and a security token.
 	//
-	//  Note: The size of the security token that STS APIs return is not fixed.
-	// We strongly recommend that you make no assumptions about the maximum size.
-	// As of this writing, the typical size is less than 4096 bytes, but that can
-	// vary. Also, future updates to AWS might require larger sizes.
+	// Note: The size of the security token that STS APIs return is not fixed. We
+	// strongly recommend that you make no assumptions about the maximum size. As
+	// of this writing, the typical size is less than 4096 bytes, but that can vary.
+	// Also, future updates to AWS might require larger sizes.
 	Credentials *Credentials `type:"structure"`
 
 	// A percentage value that indicates the size of the policy in packed form.
@@ -1519,13 +1724,13 @@ type GetFederationTokenInput struct {
 	// list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
 	// and carriage return (\u000D) characters.
 	//
-	//  The policy plain text must be 2048 bytes or shorter. However, an internal
+	// The policy plain text must be 2048 bytes or shorter. However, an internal
 	// conversion compresses it into a packed binary format with a separate limit.
 	// The PackedPolicySize response element indicates by percentage how close to
 	// the upper size limit the policy is, with 100% equaling the maximum allowed
 	// size.
 	//
-	//  For more information about how permissions work, see Permissions for GetFederationToken
+	// For more information about how permissions work, see Permissions for GetFederationToken
 	// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
 	Policy *string `min:"1" type:"string"`
 }
@@ -1570,10 +1775,10 @@ type GetFederationTokenOutput struct {
 	// The temporary security credentials, which include an access key ID, a secret
 	// access key, and a security (or session) token.
 	//
-	//  Note: The size of the security token that STS APIs return is not fixed.
-	// We strongly recommend that you make no assumptions about the maximum size.
-	// As of this writing, the typical size is less than 4096 bytes, but that can
-	// vary. Also, future updates to AWS might require larger sizes.
+	// Note: The size of the security token that STS APIs return is not fixed. We
+	// strongly recommend that you make no assumptions about the maximum size. As
+	// of this writing, the typical size is less than 4096 bytes, but that can vary.
+	// Also, future updates to AWS might require larger sizes.
 	Credentials *Credentials `type:"structure"`
 
 	// Identifiers for the federated user associated with the credentials (such
@@ -1671,10 +1876,10 @@ type GetSessionTokenOutput struct {
 	// The temporary security credentials, which include an access key ID, a secret
 	// access key, and a security (or session) token.
 	//
-	//  Note: The size of the security token that STS APIs return is not fixed.
-	// We strongly recommend that you make no assumptions about the maximum size.
-	// As of this writing, the typical size is less than 4096 bytes, but that can
-	// vary. Also, future updates to AWS might require larger sizes.
+	// Note: The size of the security token that STS APIs return is not fixed. We
+	// strongly recommend that you make no assumptions about the maximum size. As
+	// of this writing, the typical size is less than 4096 bytes, but that can vary.
+	// Also, future updates to AWS might require larger sizes.
 	Credentials *Credentials `type:"structure"`
 }
 
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
index c938e6ca1142884861b82f706991b352b7a7bec2..a9b9b3255ce5e6cfcd109fe52371efed635f9872 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
@@ -17,7 +17,7 @@ import (
 // This guide provides descriptions of the STS API. For more detailed information
 // about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
 //
-//   As an alternative to using the API, you can use one of the AWS SDKs, which
+// As an alternative to using the API, you can use one of the AWS SDKs, which
 // consist of libraries and sample code for various programming languages and
 // platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
 // way to create programmatic access to STS. For example, the SDKs take care
@@ -25,7 +25,7 @@ import (
 // automatically. For information about the AWS SDKs, including how to download
 // and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
 //
-//  For information about setting up signatures and authorization through the
+// For information about setting up signatures and authorization through the
 // API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
 // in the AWS General Reference. For general information about the Query API,
 // go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
@@ -37,7 +37,7 @@ import (
 // AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
 // (http://aws.amazon.com/documentation/).
 //
-//  Endpoints
+// Endpoints
 //
 // The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
 // that maps to the US East (N. Virginia) region. Additional regions are available
@@ -48,7 +48,7 @@ import (
 // For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
 // in the AWS General Reference.
 //
-//  Recording API requests
+// Recording API requests
 //
 // STS supports AWS CloudTrail, which is a service that records AWS calls for
 // your AWS account and delivers log files to an Amazon S3 bucket. By using
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
index bb67332310b9709d35e92ebcd75f36d7e9c45684..c836416192da3691281691dfb0d16e177dfd068b 100644
--- a/vendor/github.com/davecgh/go-spew/LICENSE
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -1,6 +1,6 @@
 ISC License
 
-Copyright (c) 2012-2013 Dave Collins <dave@davec.name>
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
 
 Permission to use, copy, modify, and distribute this software for any
 purpose with or without fee is hereby granted, provided that the above
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
index d42a0bc4afcca5d5113e4fd6e5f33110b920da61..8a4a6589a2d42ca8efa4a2dc8477f602d8dff328 100644
--- a/vendor/github.com/davecgh/go-spew/spew/bypass.go
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2015 Dave Collins <dave@davec.name>
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
 //
 // Permission to use, copy, modify, and distribute this software for any
 // purpose with or without fee is hereby granted, provided that the above
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
index e47a4e795138b537ec06b8bdb068157b6a6a6514..1fe3cf3d5d10ef9e2d4145186c691ccce698195c 100644
--- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2015 Dave Collins <dave@davec.name>
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
 //
 // Permission to use, copy, modify, and distribute this software for any
 // purpose with or without fee is hereby granted, provided that the above
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
index 14f02dc15b7dd70886b25b055fe9db99bc0941ff..7c519ff47ac3cb8f39fa775e031dd9c5f27db58b 100644
--- a/vendor/github.com/davecgh/go-spew/spew/common.go
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
  *
  * Permission to use, copy, modify, and distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
index 5552827238cc11ae2cb1c7040c46e0315fad6bca..2e3d22f312026ff2c863bbffcbc88b7f6fb942f5 100644
--- a/vendor/github.com/davecgh/go-spew/spew/config.go
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
  *
  * Permission to use, copy, modify, and distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -67,6 +67,15 @@ type ConfigState struct {
 	// Google App Engine or with the "safe" build tag specified.
 	DisablePointerMethods bool
 
+	// DisablePointerAddresses specifies whether to disable the printing of
+	// pointer addresses. This is useful when diffing data structures in tests.
+	DisablePointerAddresses bool
+
+	// DisableCapacities specifies whether to disable the printing of capacities
+	// for arrays, slices, maps and channels. This is useful when diffing
+	// data structures in tests.
+	DisableCapacities bool
+
 	// ContinueOnMethod specifies whether or not recursion should continue once
 	// a custom error or Stringer interface is invoked.  The default, false,
 	// means it will print the results of invoking the custom error or Stringer
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
index 5be0c4060908e349051ee5c61194db883938b157..aacaac6f1e1e936ee0022c00e139756c9bdc2b3e 100644
--- a/vendor/github.com/davecgh/go-spew/spew/doc.go
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
  *
  * Permission to use, copy, modify, and distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -91,6 +91,15 @@ The following configuration options are available:
 		which only accept pointer receivers from non-pointer variables.
 		Pointer method invocation is enabled by default.
 
+	* DisablePointerAddresses
+		DisablePointerAddresses specifies whether to disable the printing of
+		pointer addresses. This is useful when diffing data structures in tests.
+
+	* DisableCapacities
+		DisableCapacities specifies whether to disable the printing of
+		capacities for arrays, slices, maps and channels. This is useful when
+		diffing data structures in tests.
+
 	* ContinueOnMethod
 		Enables recursion into types after invoking error and Stringer interface
 		methods. Recursion after method invocation is disabled by default.
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
index a0ff95e27e524b488f1b5d9f51ecfef5d64b7981..df1d582a728aec65edfe02b828f75d8a7def892b 100644
--- a/vendor/github.com/davecgh/go-spew/spew/dump.go
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
  *
  * Permission to use, copy, modify, and distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -129,7 +129,7 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
 	d.w.Write(closeParenBytes)
 
 	// Display pointer information.
-	if len(pointerChain) > 0 {
+	if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
 		d.w.Write(openParenBytes)
 		for i, addr := range pointerChain {
 			if i > 0 {
@@ -282,13 +282,13 @@ func (d *dumpState) dump(v reflect.Value) {
 	case reflect.Map, reflect.String:
 		valueLen = v.Len()
 	}
-	if valueLen != 0 || valueCap != 0 {
+	if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
 		d.w.Write(openParenBytes)
 		if valueLen != 0 {
 			d.w.Write(lenEqualsBytes)
 			printInt(d.w, int64(valueLen), 10)
 		}
-		if valueCap != 0 {
+		if !d.cs.DisableCapacities && valueCap != 0 {
 			if valueLen != 0 {
 				d.w.Write(spaceBytes)
 			}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
index ecf3b80e24bc054808efe0d5b5f14cf7d3502bbb..c49875bacbb88a2252d942c1ee9b84381305fa29 100644
--- a/vendor/github.com/davecgh/go-spew/spew/format.go
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
  *
  * Permission to use, copy, modify, and distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
index d8233f542e126f9a76a6fad0826e48c9b8bd8d86..32c0e338825308f6b9b4d0407aa5682a23e2dc9c 100644
--- a/vendor/github.com/davecgh/go-spew/spew/spew.go
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Dave Collins <dave@davec.name>
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
  *
  * Permission to use, copy, modify, and distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
index 7fe879472536ed1069739de8449f3f462dbe5f87..8c732c1d85ca4e8b6b9e3ac719c84592ad9abbc2 100644
--- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
+++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
@@ -1,5 +1,9 @@
 # Changelog
 
+## v1.4.2 / 2016-10-10
+
+* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
+
 ## v1.4.1 / 2016-10-04
 
 * Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
@@ -10,7 +14,7 @@
 
 ## v1.3.1 / 2016-06-28
 
-* windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
+* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
 
 ## v1.3.0 / 2016-04-19
 
@@ -75,7 +79,7 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
 
 ## v1.0.2 / 2014-08-17
 
-* [Fix] Missing create events on OS X. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
 * [Fix] Make ./path and path equivalent. (thanks @zhsso)
 
 ## v1.0.0 / 2014-08-15
@@ -138,7 +142,7 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
 
 ## v0.9.2 / 2014-08-17
 
-* [Backport] Fix missing create events on OS X. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
 
 ## v0.9.1 / 2014-06-12
 
@@ -157,7 +161,7 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
 ## v0.8.11 / 2013-11-02
 
 * [Doc] Add Changelog [#72][] (thanks @nathany)
-* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond)
+* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
 
 ## v0.8.10 / 2013-10-19
 
diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
index 6a81ba48909262c52f04d81e2638b5e600730a77..828a60b24ba265b9dd88b15b0d942ed9e780ed90 100644
--- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
+++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
@@ -17,7 +17,7 @@ Please indicate that you have signed the CLA in your pull request.
 ### How fsnotify is Developed
 
 * Development is done on feature branches.
-* Tests are run on BSD, Linux, OS X and Windows.
+* Tests are run on BSD, Linux, macOS and Windows.
 * Pull requests are reviewed and [applied to master][am] using [hub][].
   * Maintainers may modify or squash commits rather than asking contributors to.
 * To issue a new release, the maintainers will:
@@ -44,7 +44,7 @@ This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/
 
 ### Testing
 
-fsnotify uses build tags to compile different code on Linux, BSD, OS X, and Windows.
+fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
 
 Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
 
@@ -58,7 +58,7 @@ To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
 
 Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
 
-Right now there is no equivalent solution for Windows and OS X, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
+Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
 
 ### Maintainers
 
diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md
index 3c891e349bfd64fb200ee674cc688a5c998bfb9b..25180c6d683c1bddfe5cf41bf635260a7e5b4755 100644
--- a/vendor/github.com/fsnotify/fsnotify/README.md
+++ b/vendor/github.com/fsnotify/fsnotify/README.md
@@ -8,14 +8,14 @@ fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather
 go get -u golang.org/x/sys/...
 ```
 
-Cross platform: Windows, Linux, BSD and OS X.
+Cross platform: Windows, Linux, BSD and macOS.
 
 |Adapter   |OS        |Status    |
 |----------|----------|----------|
 |inotify   |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
-|kqueue    |BSD, OS X, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|kqueue    |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
 |ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
-|FSEvents  |OS X          |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
+|FSEvents  |macOS         |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
 |FEN       |Solaris 11    |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
 |fanotify  |Linux 2.6.37+ | |
 |USN Journals |Windows    |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
index e7f55fee7a145226a927fbab4ff197524c5e348f..190bf0de575629521ae129b755007ca5cc874845 100644
--- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -9,6 +9,7 @@ package fsnotify
 
 import (
 	"bytes"
+	"errors"
 	"fmt"
 )
 
@@ -60,3 +61,6 @@ func (op Op) String() string {
 func (e Event) String() string {
 	return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
 }
+
+// Common errors that can be reported by a watcher
+var ErrEventOverflow = errors.New("fsnotify queue overflow")
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go
index 9700df55e9ace34ba12b3ab941b31b8c530a7c44..bfa9dbc3c7c6528b0ceda7fa7e64a60ba8df0cb9 100644
--- a/vendor/github.com/fsnotify/fsnotify/inotify.go
+++ b/vendor/github.com/fsnotify/fsnotify/inotify.go
@@ -36,7 +36,7 @@ type Watcher struct {
 // NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
 func NewWatcher() (*Watcher, error) {
 	// Create inotify fd
-	fd, errno := unix.InotifyInit()
+	fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
 	if fd == -1 {
 		return nil, errno
 	}
@@ -245,6 +245,15 @@ func (w *Watcher) readEvents() {
 
 			mask := uint32(raw.Mask)
 			nameLen := uint32(raw.Len)
+
+			if mask&unix.IN_Q_OVERFLOW != 0 {
+				select {
+				case w.Errors <- ErrEventOverflow:
+				case <-w.done:
+					return
+				}
+			}
+
 			// If the event happened to the watched directory or the watched file, the kernel
 			// doesn't append the filename to the event, but we would like to always fill the
 			// the "Name" field with a valid filename. We retrieve the path of the watch from
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
index 3774919d7a3bd0b5e57cb649d1ad347474682d03..466ac86abcd8147c1a613df08756a7f4f3dbcdf4 100644
--- a/vendor/github.com/go-sql-driver/mysql/AUTHORS
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -31,6 +31,7 @@ Julien Lefevre <julien.lefevr at gmail.com>
 Julien Schmidt <go-sql-driver at julienschmidt.com>
 Kamil Dziedzic <kamil at klecza.pl>
 Kevin Malachowski <kevin at chowski.com>
+Lennart Rudolph <lrudolph at hmc.edu>
 Leonardo YongUk Kim <dalinaum at gmail.com>
 Luca Looz <luca.looz92 at gmail.com>
 Lucas Liu <extrafliu at gmail.com>
@@ -44,6 +45,7 @@ Stan Putrya <root.vagner at gmail.com>
 Stanley Gunawan <gunawan.stanley at gmail.com>
 Xiaobing Jiang <s7v7nislands at gmail.com>
 Xiuming Chen <cc at cxm.cc>
+Zhenye Xie <xiezhenye at gmail.com>
 
 # Organizations
 
diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
index 381d91825d29ea28376262dfa23751e5da46e1ed..617ad80fc5740a57562ff87c0e733d22bb208444 100644
--- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
+++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
@@ -3,16 +3,30 @@
 Changes:
 
  - Go 1.1 is no longer supported
- - Use decimals field from MySQL to format time types (#249)
+ - Use decimals fields in MySQL to format time types (#249)
  - Buffer optimizations (#269)
  - TLS ServerName defaults to the host (#283)
+ - Refactoring (#400, #410, #437)
+ - Adjusted documentation for second generation CloudSQL (#485)
 
-Bugfixes:
+New Features:
 
  - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
+ - Support for returning table alias on Columns() (#289, #359, #382)
+ - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
+ - Support for uint64 parameters with high bit set (#332, #345)
+ - Cleartext authentication plugin support (#327)
+ - Exported ParseDSN function and the Config struct (#403, #419, #429)
+ - Read / Write timeouts (#401)
+ - Support for JSON field type (#414)
+ - Support for multi-statements and multi-results (#411, #431)
+ - DSN parameter to set the driver-side max_allowed_packet value manually (#489)
+
+Bugfixes:
+
  - Fixed handling of queries without columns and rows (#255)
  - Fixed a panic when SetKeepAlive() failed (#298)
- - Support receiving ERR packet while reading rows (#321)
+ - Handle ERR packets while reading rows (#321)
  - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
  - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
  - Actually zero out bytes in handshake response (#378)
@@ -20,13 +34,10 @@ Bugfixes:
  - Fixed tests with MySQL 5.7.9+ (#380)
  - QueryUnescape TLS config names (#397)
  - Fixed "broken pipe" error by writing to closed socket (#390)
-
-New Features:
- - Support for returning table alias on Columns() (#289, #359, #382)
- - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318)
- - Support for uint64 parameters with high bit set (#332, #345)
- - Cleartext authentication plugin support (#327)
-
+ - Fixed LOAD LOCAL DATA INFILE buffering (#424)
+ - Fixed parsing of floats into float64 when placeholders are used (#434)
+ - Fixed DSN tests with Go 1.7+ (#459)
+ - Handle ERR packets while waiting for EOF (#473)
 
 
 ## Version 1.2 (2014-06-03)
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
index c64aae264c8929e4dd02213b8e8df127071c836f..a110cf1d31dfb7a06c5d8099ffe835b369829979 100644
--- a/vendor/github.com/go-sql-driver/mysql/README.md
+++ b/vendor/github.com/go-sql-driver/mysql/README.md
@@ -221,6 +221,14 @@ Note that this sets the location for time.Time values but does not change MySQL'
 
 Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
 
+##### `maxAllowedPacket`
+```
+Type:          decimal number
+Default:       0
+```
+
+Max packet size allowed in bytes. Use `maxAllowedPacket=0` to automatically fetch the `max_allowed_packet` variable from server.
+
 ##### `multiStatements`
 
 ```
@@ -233,7 +241,6 @@ Allow multiple statements in one query. While this allows batch queries, it also
 
 When `multiStatements` is used, `?` parameters must only be used in the first statement.
 
-
 ##### `parseTime`
 
 ```
@@ -254,7 +261,6 @@ Default:        0
 
 I/O read timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
 
-
 ##### `strict`
 
 ```
@@ -263,10 +269,11 @@ Valid Values:   true, false
 Default:        false
 ```
 
-`strict=true` enables the strict mode in which MySQL warnings are treated as errors.
+`strict=true` enables a driver-side strict mode in which MySQL warnings are treated as errors. This mode should not be used in production as it may lead to data corruption in certain situations.
 
-By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes. See the [examples](#examples) for an DSN example.
+A server-side strict mode, which is safe for production use, can be set via the [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html) system variable.
 
+By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes.
 
 ##### `timeout`
 
@@ -277,7 +284,6 @@ Default:        OS default
 
 *Driver* side connection timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout).
 
-
 ##### `tls`
 
 ```
@@ -288,7 +294,6 @@ Default:        false
 
 `tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
 
-
 ##### `writeTimeout`
 
 ```
@@ -322,9 +327,9 @@ root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
 user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
 ```
 
-Use the [strict mode](#strict) but ignore notes:
+Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
 ```
-user:password@/dbname?strict=true&sql_notes=false
+user:password@/dbname?sql_mode=TRADITIONAL
 ```
 
 TCP via IPv6:
@@ -337,11 +342,16 @@ TCP on a remote host, e.g. Amazon RDS:
 id:password@tcp(your-amazonaws-uri.com:3306)/dbname
 ```
 
-Google Cloud SQL on App Engine:
+Google Cloud SQL on App Engine (First Generation MySQL Server):
 ```
 user@cloudsql(project-id:instance-name)/dbname
 ```
 
+Google Cloud SQL on App Engine (Second Generation MySQL Server):
+```
+user@cloudsql(project-id:regionname:instance-name)/dbname
+```
+
 TCP using default port (3306) on localhost:
 ```
 user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
index c3899de0eb1219e07a3e0c2446b2de981fe2a4e2..d82c728f3b86b0eda1adb01e36293615eb2016fe 100644
--- a/vendor/github.com/go-sql-driver/mysql/connection.go
+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -22,7 +22,7 @@ type mysqlConn struct {
 	affectedRows     uint64
 	insertId         uint64
 	cfg              *Config
-	maxPacketAllowed int
+	maxAllowedPacket int
 	maxWriteSize     int
 	writeTimeout     time.Duration
 	flags            clientFlag
@@ -135,6 +135,11 @@ func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
 }
 
 func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
+	// Number of ? should be same to len(args)
+	if strings.Count(query, "?") != len(args) {
+		return "", driver.ErrSkip
+	}
+
 	buf := mc.buf.takeCompleteBuffer()
 	if buf == nil {
 		// can not take the buffer. Something must be wrong with the connection
@@ -241,7 +246,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
 			return "", driver.ErrSkip
 		}
 
-		if len(buf)+4 > mc.maxPacketAllowed {
+		if len(buf)+4 > mc.maxAllowedPacket {
 			return "", driver.ErrSkip
 		}
 	}
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
index 899f955fbb33dd324c07b13add7cb0cd98a0de6d..562ddeffbe8e1f0a8962653dc12951af83427e39 100644
--- a/vendor/github.com/go-sql-driver/mysql/driver.go
+++ b/vendor/github.com/go-sql-driver/mysql/driver.go
@@ -50,7 +50,7 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
 
 	// New mysqlConn
 	mc := &mysqlConn{
-		maxPacketAllowed: maxPacketSize,
+		maxAllowedPacket: maxPacketSize,
 		maxWriteSize:     maxPacketSize - 1,
 	}
 	mc.cfg, err = ParseDSN(dsn)
@@ -109,15 +109,19 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
 		return nil, err
 	}
 
-	// Get max allowed packet size
-	maxap, err := mc.getSystemVar("max_allowed_packet")
-	if err != nil {
-		mc.Close()
-		return nil, err
+	if mc.cfg.MaxAllowedPacket > 0 {
+		mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
+	} else {
+		// Get max allowed packet size
+		maxap, err := mc.getSystemVar("max_allowed_packet")
+		if err != nil {
+			mc.Close()
+			return nil, err
+		}
+		mc.maxAllowedPacket = stringToInt(maxap) - 1
 	}
-	mc.maxPacketAllowed = stringToInt(maxap) - 1
-	if mc.maxPacketAllowed < maxPacketSize {
-		mc.maxWriteSize = mc.maxPacketAllowed
+	if mc.maxAllowedPacket < maxPacketSize {
+		mc.maxWriteSize = mc.maxAllowedPacket
 	}
 
 	// Handle DSN Params
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
index 73138bc5732b6ec8cb7554ded9a0872e1c3494cc..896be9ef5332f62b33fafa7e38e36ff64ffb4369 100644
--- a/vendor/github.com/go-sql-driver/mysql/dsn.go
+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -15,6 +15,7 @@ import (
 	"fmt"
 	"net"
 	"net/url"
+	"strconv"
 	"strings"
 	"time"
 )
@@ -28,19 +29,20 @@ var (
 
 // Config is a configuration parsed from a DSN string
 type Config struct {
-	User         string            // Username
-	Passwd       string            // Password (requires User)
-	Net          string            // Network type
-	Addr         string            // Network address (requires Net)
-	DBName       string            // Database name
-	Params       map[string]string // Connection parameters
-	Collation    string            // Connection collation
-	Loc          *time.Location    // Location for time.Time values
-	TLSConfig    string            // TLS configuration name
-	tls          *tls.Config       // TLS configuration
-	Timeout      time.Duration     // Dial timeout
-	ReadTimeout  time.Duration     // I/O read timeout
-	WriteTimeout time.Duration     // I/O write timeout
+	User             string            // Username
+	Passwd           string            // Password (requires User)
+	Net              string            // Network type
+	Addr             string            // Network address (requires Net)
+	DBName           string            // Database name
+	Params           map[string]string // Connection parameters
+	Collation        string            // Connection collation
+	Loc              *time.Location    // Location for time.Time values
+	MaxAllowedPacket int               // Max packet size allowed
+	TLSConfig        string            // TLS configuration name
+	tls              *tls.Config       // TLS configuration
+	Timeout          time.Duration     // Dial timeout
+	ReadTimeout      time.Duration     // I/O read timeout
+	WriteTimeout     time.Duration     // I/O write timeout
 
 	AllowAllFiles           bool // Allow all files to be used with LOAD DATA LOCAL INFILE
 	AllowCleartextPasswords bool // Allows the cleartext client side plugin
@@ -222,6 +224,17 @@ func (cfg *Config) FormatDSN() string {
 		buf.WriteString(cfg.WriteTimeout.String())
 	}
 
+	if cfg.MaxAllowedPacket > 0 {
+		if hasParam {
+			buf.WriteString("&maxAllowedPacket=")
+		} else {
+			hasParam = true
+			buf.WriteString("?maxAllowedPacket=")
+		}
+		buf.WriteString(strconv.Itoa(cfg.MaxAllowedPacket))
+
+	}
+
 	// other params
 	if cfg.Params != nil {
 		for param, value := range cfg.Params {
@@ -496,7 +509,11 @@ func parseDSNParams(cfg *Config, params string) (err error) {
 			if err != nil {
 				return
 			}
-
+		case "maxAllowedPacket":
+			cfg.MaxAllowedPacket, err = strconv.Atoi(value)
+			if err != nil {
+				return
+			}
 		default:
 			// lazy init
 			if cfg.Params == nil {
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
index 60253994288c368d97fe3ef7742a776a34c83269..f06752b02e3a3bdce24d39d074072ca1ac0166b6 100644
--- a/vendor/github.com/go-sql-driver/mysql/packets.go
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -80,7 +80,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
 func (mc *mysqlConn) writePacket(data []byte) error {
 	pktLen := len(data) - 4
 
-	if pktLen > mc.maxPacketAllowed {
+	if pktLen > mc.maxAllowedPacket {
 		return ErrPktTooLarge
 	}
 
@@ -786,7 +786,7 @@ func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
 
 // http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html
 func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
-	maxLen := stmt.mc.maxPacketAllowed - 1
+	maxLen := stmt.mc.maxAllowedPacket - 1
 	pktLen := maxLen
 
 	// After the header (bytes 0-3) follows before the data:
@@ -977,7 +977,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
 					paramTypes[i+i] = fieldTypeString
 					paramTypes[i+i+1] = 0x00
 
-					if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 {
+					if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
 						paramValues = appendLengthEncodedInteger(paramValues,
 							uint64(len(v)),
 						)
@@ -999,7 +999,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
 				paramTypes[i+i] = fieldTypeString
 				paramTypes[i+i+1] = 0x00
 
-				if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 {
+				if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
 					paramValues = appendLengthEncodedInteger(paramValues,
 						uint64(len(v)),
 					)
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
index 04dcb881305e23c0d6fbbf1aae481c152f57cb0f..aa207298f997665117f3ba88e65646f95c83f08a 100644
--- a/vendor/github.com/golang/protobuf/proto/decode.go
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -61,7 +61,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for
 // int32, int64, uint32, uint64, bool, and enum
 // protocol buffer types.
 func DecodeVarint(buf []byte) (x uint64, n int) {
-	// x, n already 0
 	for shift := uint(0); shift < 64; shift += 7 {
 		if n >= len(buf) {
 			return 0, 0
@@ -78,13 +77,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) {
 	return 0, 0
 }
 
-// DecodeVarint reads a varint-encoded integer from the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) DecodeVarint() (x uint64, err error) {
-	// x, err already 0
-
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
 	i := p.index
 	l := len(p.buf)
 
@@ -107,6 +100,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
 	return
 }
 
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+	i := p.index
+	buf := p.buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		p.index++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return p.decodeVarintSlow()
+	}
+
+	var b uint64
+	// we already checked the first byte
+	x = uint64(buf[i]) - 0x80
+	i++
+
+	b = uint64(buf[i])
+	i++
+	x += b << 7
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 7
+
+	b = uint64(buf[i])
+	i++
+	x += b << 14
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 14
+
+	b = uint64(buf[i])
+	i++
+	x += b << 21
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 21
+
+	b = uint64(buf[i])
+	i++
+	x += b << 28
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 28
+
+	b = uint64(buf[i])
+	i++
+	x += b << 35
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 35
+
+	b = uint64(buf[i])
+	i++
+	x += b << 42
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 42
+
+	b = uint64(buf[i])
+	i++
+	x += b << 49
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 49
+
+	b = uint64(buf[i])
+	i++
+	x += b << 56
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 56
+
+	b = uint64(buf[i])
+	i++
+	x += b << 63
+	if b&0x80 == 0 {
+		goto done
+	}
+	// x -= 0x80 << 63 // Always zero.
+
+	return 0, errOverflow
+
+done:
+	p.index = i
+	return x, nil
+}
+
 // DecodeFixed64 reads a 64-bit integer from the Buffer.
 // This is the format for the
 // fixed64, sfixed64, and double protocol buffer types.
@@ -340,6 +434,8 @@ func (p *Buffer) DecodeGroup(pb Message) error {
 // Buffer and places the decoded result in pb.  If the struct
 // underlying pb does not match the data in the buffer, the results can be
 // unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
 func (p *Buffer) Unmarshal(pb Message) error {
 	// If the object can unmarshal itself, let it.
 	if u, ok := pb.(Unmarshaler); ok {
diff --git a/vendor/github.com/google/go-github/github/event_types.go b/vendor/github.com/google/go-github/github/event_types.go
index da18f7105e04625cbe4fff236e874fdde9ac42c1..16f89b04bdc92cb430f05cfb72a139c1dca1d227 100644
--- a/vendor/github.com/google/go-github/github/event_types.go
+++ b/vendor/github.com/google/go-github/github/event_types.go
@@ -172,14 +172,6 @@ type IntegrationInstallationRepositoriesEvent struct {
 	Sender              *User         `json:"sender,omitempty"`
 }
 
-// Installation represents a GitHub integration installation.
-type Installation struct {
-	ID              *int    `json:"id,omitempty"`
-	Account         *User   `json:"account,omitempty"`
-	AccessTokensURL *string `json:"access_tokens_url,omitempty"`
-	RepositoriesURL *string `json:"repositories_url,omitempty"`
-}
-
 // IssueCommentEvent is triggered when an issue comment is created on an issue
 // or pull request.
 // The Webhook event name is "issue_comment".
@@ -459,14 +451,14 @@ type StatusEvent struct {
 	Branches    []*Branch `json:"branches,omitempty"`
 
 	// The following fields are only populated by Webhook events.
-	ID        *int             `json:"id,omitempty"`
-	Name      *string          `json:"name,omitempty"`
-	Context   *string          `json:"context,omitempty"`
-	Commit    *PushEventCommit `json:"commit,omitempty"`
-	CreatedAt *Timestamp       `json:"created_at,omitempty"`
-	UpdatedAt *Timestamp       `json:"updated_at,omitempty"`
-	Repo      *Repository      `json:"repository,omitempty"`
-	Sender    *User            `json:"sender,omitempty"`
+	ID        *int              `json:"id,omitempty"`
+	Name      *string           `json:"name,omitempty"`
+	Context   *string           `json:"context,omitempty"`
+	Commit    *RepositoryCommit `json:"commit,omitempty"`
+	CreatedAt *Timestamp        `json:"created_at,omitempty"`
+	UpdatedAt *Timestamp        `json:"updated_at,omitempty"`
+	Repo      *Repository       `json:"repository,omitempty"`
+	Sender    *User             `json:"sender,omitempty"`
 }
 
 // TeamAddEvent is triggered when a repository is added to a team.
diff --git a/vendor/github.com/google/go-github/github/github.go b/vendor/github.com/google/go-github/github/github.go
index f04a011cf68abada75a68096e42a0e644f978e65..465574b9f2e05d4e7f79c363ece9ebbdac88e2ee 100644
--- a/vendor/github.com/google/go-github/github/github.go
+++ b/vendor/github.com/google/go-github/github/github.go
@@ -90,6 +90,9 @@ const (
 
 	// https://developer.github.com/changes/2016-09-14-projects-api/
 	mediaTypeProjectsPreview = "application/vnd.github.inertia-preview+json"
+
+	// https://developer.github.com/changes/2016-09-14-Integrations-Early-Access/
+	mediaTypeIntegrationPreview = "application/vnd.github.machine-man-preview+json"
 )
 
 // A Client manages communication with the GitHub API.
@@ -120,6 +123,7 @@ type Client struct {
 	Gists          *GistsService
 	Git            *GitService
 	Gitignores     *GitignoresService
+	Integrations   *IntegrationsService
 	Issues         *IssuesService
 	Organizations  *OrganizationsService
 	PullRequests   *PullRequestsService
@@ -190,6 +194,7 @@ func NewClient(httpClient *http.Client) *Client {
 	c.Gists = (*GistsService)(&c.common)
 	c.Git = (*GitService)(&c.common)
 	c.Gitignores = (*GitignoresService)(&c.common)
+	c.Integrations = (*IntegrationsService)(&c.common)
 	c.Issues = (*IssuesService)(&c.common)
 	c.Licenses = (*LicensesService)(&c.common)
 	c.Migrations = (*MigrationService)(&c.common)
@@ -494,6 +499,24 @@ func (r *RateLimitError) Error() string {
 		r.Response.StatusCode, r.Message, r.Rate.Reset.Time.Sub(time.Now()))
 }
 
+// AbuseRateLimitError occurs when GitHub returns 403 Forbidden response with the
+// "documentation_url" field value equal to "https://developer.github.com/v3#abuse-rate-limits".
+type AbuseRateLimitError struct {
+	Response *http.Response // HTTP response that caused this error
+	Message  string         `json:"message"` // error message
+
+	// RetryAfter is provided with some abuse rate limit errors. If present,
+	// it is the amount of time that the client should wait before retrying.
+	// Otherwise, the client should try again later (after an unspecified amount of time).
+	RetryAfter *time.Duration
+}
+
+func (r *AbuseRateLimitError) Error() string {
+	return fmt.Sprintf("%v %v: %d %v",
+		r.Response.Request.Method, sanitizeURL(r.Response.Request.URL),
+		r.Response.StatusCode, r.Message)
+}
+
 // sanitizeURL redacts the client_secret parameter from the URL which may be
 // exposed to the user, specifically in the ErrorResponse error message.
 func sanitizeURL(uri *url.URL) *url.URL {
@@ -564,6 +587,20 @@ func CheckResponse(r *http.Response) error {
 			Response: errorResponse.Response,
 			Message:  errorResponse.Message,
 		}
+	case r.StatusCode == http.StatusForbidden && errorResponse.DocumentationURL == "https://developer.github.com/v3#abuse-rate-limits":
+		abuseRateLimitError := &AbuseRateLimitError{
+			Response: errorResponse.Response,
+			Message:  errorResponse.Message,
+		}
+		if v := r.Header["Retry-After"]; len(v) > 0 {
+			// According to GitHub support, the "Retry-After" header value will be
+			// an integer which represents the number of seconds that one should
+			// wait before resuming making requests.
+			retryAfterSeconds, _ := strconv.ParseInt(v[0], 10, 64) // Error handling is noop.
+			retryAfter := time.Duration(retryAfterSeconds) * time.Second
+			abuseRateLimitError.RetryAfter = &retryAfter
+		}
+		return abuseRateLimitError
 	default:
 		return errorResponse
 	}
diff --git a/vendor/github.com/google/go-github/github/integration.go b/vendor/github.com/google/go-github/github/integration.go
new file mode 100644
index 0000000000000000000000000000000000000000..b8d77ca4f85325a61bd3d6ea592569bc8f27bb5b
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/integration.go
@@ -0,0 +1,38 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+// IntegrationsService provides access to the installation related functions
+// in the GitHub API.
+//
+// GitHub API docs: https://developer.github.com/v3/integrations/
+type IntegrationsService service
+
+// ListInstallations lists the installations that the current integration has.
+//
+// GitHub API docs: https://developer.github.com/v3/integrations/#find-installations
+func (s *IntegrationsService) ListInstallations(opt *ListOptions) ([]*Installation, *Response, error) {
+	u, err := addOptions("integration/installations", opt)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	req, err := s.client.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// TODO: remove custom Accept header when this API fully launches.
+	req.Header.Set("Accept", mediaTypeIntegrationPreview)
+
+	i := new([]*Installation)
+	resp, err := s.client.Do(req, &i)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return *i, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/integration_installation.go b/vendor/github.com/google/go-github/github/integration_installation.go
new file mode 100644
index 0000000000000000000000000000000000000000..aa59bfe4b512f4873198c73273052458e851b8e8
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/integration_installation.go
@@ -0,0 +1,46 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+// Installation represents a GitHub integration installation.
+type Installation struct {
+	ID              *int    `json:"id,omitempty"`
+	Account         *User   `json:"account,omitempty"`
+	AccessTokensURL *string `json:"access_tokens_url,omitempty"`
+	RepositoriesURL *string `json:"repositories_url,omitempty"`
+}
+
+func (i Installation) String() string {
+	return Stringify(i)
+}
+
+// ListRepos lists the repositories that the current installation has access to.
+//
+// GitHub API docs: https://developer.github.com/v3/integrations/installations/#list-repositories
+func (s *IntegrationsService) ListRepos(opt *ListOptions) ([]*Repository, *Response, error) {
+	u, err := addOptions("installation/repositories", opt)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	req, err := s.client.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// TODO: remove custom Accept header when this API fully launches.
+	req.Header.Set("Accept", mediaTypeIntegrationPreview)
+
+	var r struct {
+		Repositories []*Repository `json:"repositories"`
+	}
+	resp, err := s.client.Do(req, &r)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return r.Repositories, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/pulls.go b/vendor/github.com/google/go-github/github/pulls.go
index a823c4333e3565e91f0e40585830a9b8f09c87a5..3c883658db4dafd2feca5e43c3fbf4cbc80ad690 100644
--- a/vendor/github.com/google/go-github/github/pulls.go
+++ b/vendor/github.com/google/go-github/github/pulls.go
@@ -18,33 +18,35 @@ type PullRequestsService service
 
 // PullRequest represents a GitHub pull request on a repository.
 type PullRequest struct {
-	ID           *int       `json:"id,omitempty"`
-	Number       *int       `json:"number,omitempty"`
-	State        *string    `json:"state,omitempty"`
-	Title        *string    `json:"title,omitempty"`
-	Body         *string    `json:"body,omitempty"`
-	CreatedAt    *time.Time `json:"created_at,omitempty"`
-	UpdatedAt    *time.Time `json:"updated_at,omitempty"`
-	ClosedAt     *time.Time `json:"closed_at,omitempty"`
-	MergedAt     *time.Time `json:"merged_at,omitempty"`
-	User         *User      `json:"user,omitempty"`
-	Merged       *bool      `json:"merged,omitempty"`
-	Mergeable    *bool      `json:"mergeable,omitempty"`
-	MergedBy     *User      `json:"merged_by,omitempty"`
-	Comments     *int       `json:"comments,omitempty"`
-	Commits      *int       `json:"commits,omitempty"`
-	Additions    *int       `json:"additions,omitempty"`
-	Deletions    *int       `json:"deletions,omitempty"`
-	ChangedFiles *int       `json:"changed_files,omitempty"`
-	URL          *string    `json:"url,omitempty"`
-	HTMLURL      *string    `json:"html_url,omitempty"`
-	IssueURL     *string    `json:"issue_url,omitempty"`
-	StatusesURL  *string    `json:"statuses_url,omitempty"`
-	DiffURL      *string    `json:"diff_url,omitempty"`
-	PatchURL     *string    `json:"patch_url,omitempty"`
-	Assignee     *User      `json:"assignee,omitempty"`
-	Assignees    []*User    `json:"assignees,omitempty"`
-	Milestone    *Milestone `json:"milestone,omitempty"`
+	ID                *int       `json:"id,omitempty"`
+	Number            *int       `json:"number,omitempty"`
+	State             *string    `json:"state,omitempty"`
+	Title             *string    `json:"title,omitempty"`
+	Body              *string    `json:"body,omitempty"`
+	CreatedAt         *time.Time `json:"created_at,omitempty"`
+	UpdatedAt         *time.Time `json:"updated_at,omitempty"`
+	ClosedAt          *time.Time `json:"closed_at,omitempty"`
+	MergedAt          *time.Time `json:"merged_at,omitempty"`
+	User              *User      `json:"user,omitempty"`
+	Merged            *bool      `json:"merged,omitempty"`
+	Mergeable         *bool      `json:"mergeable,omitempty"`
+	MergedBy          *User      `json:"merged_by,omitempty"`
+	Comments          *int       `json:"comments,omitempty"`
+	Commits           *int       `json:"commits,omitempty"`
+	Additions         *int       `json:"additions,omitempty"`
+	Deletions         *int       `json:"deletions,omitempty"`
+	ChangedFiles      *int       `json:"changed_files,omitempty"`
+	URL               *string    `json:"url,omitempty"`
+	HTMLURL           *string    `json:"html_url,omitempty"`
+	IssueURL          *string    `json:"issue_url,omitempty"`
+	StatusesURL       *string    `json:"statuses_url,omitempty"`
+	DiffURL           *string    `json:"diff_url,omitempty"`
+	PatchURL          *string    `json:"patch_url,omitempty"`
+	ReviewCommentsURL *string    `json:"review_comments_url,omitempty"`
+	ReviewCommentURL  *string    `json:"review_comment_url,omitempty"`
+	Assignee          *User      `json:"assignee,omitempty"`
+	Assignees         []*User    `json:"assignees,omitempty"`
+	Milestone         *Milestone `json:"milestone,omitempty"`
 
 	Head *PullRequestBranch `json:"head,omitempty"`
 	Base *PullRequestBranch `json:"base,omitempty"`
diff --git a/vendor/github.com/google/go-github/github/repos.go b/vendor/github.com/google/go-github/github/repos.go
index 2bcaacc3f7ee31c47bbb612fe3bb820239cee594..98e4ac5ac11ef8f6d4892da0fe509ef0f9a07f1b 100644
--- a/vendor/github.com/google/go-github/github/repos.go
+++ b/vendor/github.com/google/go-github/github/repos.go
@@ -51,11 +51,14 @@ type Repository struct {
 	License *License `json:"license,omitempty"`
 
 	// Additional mutable fields when creating and editing a repository
-	Private      *bool `json:"private"`
-	HasIssues    *bool `json:"has_issues"`
-	HasWiki      *bool `json:"has_wiki"`
-	HasPages     *bool `json:"has_pages"`
-	HasDownloads *bool `json:"has_downloads"`
+	Private           *bool   `json:"private"`
+	HasIssues         *bool   `json:"has_issues"`
+	HasWiki           *bool   `json:"has_wiki"`
+	HasPages          *bool   `json:"has_pages"`
+	HasDownloads      *bool   `json:"has_downloads"`
+	LicenseTemplate   *string `json:"license_template,omitempty"`
+	GitignoreTemplate *string `json:"gitignore_template,omitempty"`
+
 	// Creating an organization repository. Required for non-owners.
 	TeamID *int `json:"team_id"`
 
diff --git a/vendor/github.com/google/go-github/github/repos_commits.go b/vendor/github.com/google/go-github/github/repos_commits.go
index b5e6856bdcdc033417e64b65d953040f290c115f..c1359f44999b48a63b38415ca29df1614f4e229f 100644
--- a/vendor/github.com/google/go-github/github/repos_commits.go
+++ b/vendor/github.com/google/go-github/github/repos_commits.go
@@ -15,13 +15,15 @@ import (
 // Note that it's wrapping a Commit, so author/committer information is in two places,
 // but contain different details about them: in RepositoryCommit "github details", in Commit - "git details".
 type RepositoryCommit struct {
-	SHA       *string  `json:"sha,omitempty"`
-	Commit    *Commit  `json:"commit,omitempty"`
-	Author    *User    `json:"author,omitempty"`
-	Committer *User    `json:"committer,omitempty"`
-	Parents   []Commit `json:"parents,omitempty"`
-	Message   *string  `json:"message,omitempty"`
-	HTMLURL   *string  `json:"html_url,omitempty"`
+	SHA         *string  `json:"sha,omitempty"`
+	Commit      *Commit  `json:"commit,omitempty"`
+	Author      *User    `json:"author,omitempty"`
+	Committer   *User    `json:"committer,omitempty"`
+	Parents     []Commit `json:"parents,omitempty"`
+	Message     *string  `json:"message,omitempty"`
+	HTMLURL     *string  `json:"html_url,omitempty"`
+	URL         *string  `json:"url,omitempty"`
+	CommentsURL *string  `json:"comments_url,omitempty"`
 
 	// Details about how many changes were made in this commit. Only filled in during GetCommit!
 	Stats *CommitStats `json:"stats,omitempty"`
diff --git a/vendor/github.com/google/go-github/github/repos_releases.go b/vendor/github.com/google/go-github/github/repos_releases.go
index e889b0d5927b790c1299b1b3affbfad4c1bd9e3e..331a4b7c0f2a8ef23d5cd93d71da08fc07a49b21 100644
--- a/vendor/github.com/google/go-github/github/repos_releases.go
+++ b/vendor/github.com/google/go-github/github/repos_releases.go
@@ -34,7 +34,7 @@ type RepositoryRelease struct {
 	UploadURL       *string        `json:"upload_url,omitempty"`
 	ZipballURL      *string        `json:"zipball_url,omitempty"`
 	TarballURL      *string        `json:"tarball_url,omitempty"`
-	Author          *CommitAuthor  `json:"author,omitempty"`
+	Author          *User          `json:"author,omitempty"`
 }
 
 func (r RepositoryRelease) String() string {
diff --git a/vendor/github.com/google/go-github/github/repos_traffic.go b/vendor/github.com/google/go-github/github/repos_traffic.go
index b6c8d83bcc12fd35dbbcb7368f7a7cd76976d105..9688b58be648d29a851a0db7eb83fb7c6c87a1ab 100644
--- a/vendor/github.com/google/go-github/github/repos_traffic.go
+++ b/vendor/github.com/google/go-github/github/repos_traffic.go
@@ -5,11 +5,7 @@
 
 package github
 
-import (
-	"fmt"
-	"strconv"
-	"time"
-)
+import "fmt"
 
 // TrafficReferrer represent information about traffic from a referrer .
 type TrafficReferrer struct {
@@ -26,30 +22,11 @@ type TrafficPath struct {
 	Uniques *int    `json:"uniques,omitempty"`
 }
 
-// TimestampMS represents a timestamp as used in datapoint.
-//
-// It's only used to parse the result given by the API which are unix timestamp in milliseonds.
-type TimestampMS struct {
-	time.Time
-}
-
-// UnmarshalJSON parse unix timestamp.
-func (t *TimestampMS) UnmarshalJSON(b []byte) error {
-	s := string(b)
-	i, err := strconv.ParseInt(s, 10, 64)
-	if err != nil {
-		return err
-	}
-	// We can drop the reaminder as returned values are days and it will always be 0
-	*t = TimestampMS{time.Unix(i/1000, 0)}
-	return nil
-}
-
 // TrafficData represent information about a specific timestamp in views or clones list.
 type TrafficData struct {
-	Timestamp *TimestampMS `json:"timestamp,omitempty"`
-	Count     *int         `json:"count,omitempty"`
-	Uniques   *int         `json:"uniques,omitempty"`
+	Timestamp *Timestamp `json:"timestamp,omitempty"`
+	Count     *int       `json:"count,omitempty"`
+	Uniques   *int       `json:"uniques,omitempty"`
 }
 
 // TrafficViews represent information about the number of views in the last 14 days.
diff --git a/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md b/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..2827b7d3fa277e2daab95ea3cfaff1c2bfc1389e
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md
@@ -0,0 +1,27 @@
+Want to contribute? Great! First, read this page (including the small print at the end).
+
+### Before you contribute
+Before we can use your code, you must sign the
+[Google Individual Contributor License Agreement]
+(https://cla.developers.google.com/about/google-individual)
+(CLA), which you can do online. The CLA is necessary mainly because you own the
+copyright to your changes, even after your contribution becomes part of our
+codebase, so we need your permission to use and distribute your code. We also
+need to be sure of various other things—for instance that you'll tell us if you
+know that your code infringes on other people's patents. You don't have to sign
+the CLA until after you've submitted your code for review and a member has
+approved it, but you must do it before we can put your code into our codebase.
+Before you start working on a larger contribution, you should get in touch with
+us first through the issue tracker with your idea so that we can help out and
+possibly guide you. Coordinating up front makes it much easier to avoid
+frustration later on.
+
+### Code reviews
+All submissions, including submissions by project members, require review. We
+use Github pull requests for this purpose.
+
+### The small print
+Contributions made by corporations are covered by a different agreement than
+the one above, the
+[Software Grant and Corporate Contributor License Agreement]
+(https://cla.developers.google.com/about/google-corporate).
diff --git a/vendor/github.com/googleapis/gax-go/LICENSE b/vendor/github.com/googleapis/gax-go/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..6d16b6578a2f0482d26c97ab98597e3912f4eaba
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2016, Google Inc.
+All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/googleapis/gax-go/README.md b/vendor/github.com/googleapis/gax-go/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..38ebdcf63de7e89d2415ee6f821ea3744e0b4f89
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/README.md
@@ -0,0 +1,11 @@
+Google API Extensions for Go
+============================
+
+[![Build Status](https://travis-ci.org/googleapis/gax-go.svg?branch=master)](https://travis-ci.org/googleapis/gax-go)
+[![Code Coverage](https://img.shields.io/codecov/c/github/googleapis/gax-go.svg)](https://codecov.io/github/googleapis/gax-go)
+
+Google API Extensions for Go (gax-go) is a set of modules which aids the
+development of APIs for clients and servers based on `gRPC` and Google API
+conventions.
+
+This project is currently experimental and not supported.
diff --git a/vendor/github.com/googleapis/gax-go/call_option.go b/vendor/github.com/googleapis/gax-go/call_option.go
new file mode 100644
index 0000000000000000000000000000000000000000..4ba1cdfe80cd0bfb1ee909a1f220646b1ad212af
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/call_option.go
@@ -0,0 +1,136 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import (
+	"math/rand"
+	"time"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+)
+
+// CallOption is an option used by Invoke to control behaviors of RPC calls.
+// CallOption works by modifying relevant fields of CallSettings.
+type CallOption interface {
+	// Resolve applies the option by modifying cs.
+	Resolve(cs *CallSettings)
+}
+
+// Retryer is used by Invoke to determine retry behavior.
+type Retryer interface {
+	// Retry reports whether a request should be retriedand how long to pause before retrying
+	// if the previous attempt returned with err. Invoke never calls Retry with nil error.
+	Retry(err error) (pause time.Duration, shouldRetry bool)
+}
+
+type retryerOption func() Retryer
+
+func (o retryerOption) Resolve(s *CallSettings) {
+	s.Retry = o
+}
+
+// WithRetry sets CallSettings.Retry to fn.
+func WithRetry(fn func() Retryer) CallOption {
+	return retryerOption(fn)
+}
+
+// OnCodes returns a Retryer that retries if and only if
+// the previous attempt returns a GRPC error whose error code is stored in cc.
+// Pause times between retries are specified by bo.
+//
+// bo is only used for its parameters; each Retryer has its own copy.
+func OnCodes(cc []codes.Code, bo Backoff) Retryer {
+	return &boRetryer{
+		backoff: bo,
+		codes:   append([]codes.Code(nil), cc...),
+	}
+}
+
+type boRetryer struct {
+	backoff Backoff
+	codes   []codes.Code
+}
+
+func (r *boRetryer) Retry(err error) (time.Duration, bool) {
+	c := grpc.Code(err)
+	for _, rc := range r.codes {
+		if c == rc {
+			return r.backoff.Pause(), true
+		}
+	}
+	return 0, false
+}
+
+// Backoff implements exponential backoff.
+// The wait time between retries is a random value between 0 and the "retry envelope".
+// The envelope starts at Initial and increases by the factor of Multiplier every retry,
+// but is capped at Max.
+type Backoff struct {
+	// Initial is the initial value of the retry envelope, defaults to 1 second.
+	Initial time.Duration
+
+	// Max is the maximum value of the retry envelope, defaults to 30 seconds.
+	Max time.Duration
+
+	// Multiplier is the factor by which the retry envelope increases.
+	// It should be greater than 1 and defaults to 2.
+	Multiplier float64
+
+	// cur is the current retry envelope
+	cur time.Duration
+}
+
+func (bo *Backoff) Pause() time.Duration {
+	if bo.Initial == 0 {
+		bo.Initial = time.Second
+	}
+	if bo.cur == 0 {
+		bo.cur = bo.Initial
+	}
+	if bo.Max == 0 {
+		bo.Max = 30 * time.Second
+	}
+	if bo.Multiplier < 1 {
+		bo.Multiplier = 2
+	}
+	d := time.Duration(rand.Int63n(int64(bo.cur)))
+	bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier)
+	if bo.cur > bo.Max {
+		bo.cur = bo.Max
+	}
+	return d
+}
+
+type CallSettings struct {
+	// Retry returns a Retryer to be used to control retry logic of a method call.
+	// If Retry is nil or the returned Retryer is nil, the call will not be retried.
+	Retry func() Retryer
+}
diff --git a/vendor/github.com/googleapis/gax-go/gax.go b/vendor/github.com/googleapis/gax-go/gax.go
new file mode 100644
index 0000000000000000000000000000000000000000..c7e4ce91bb69237fafb477f67d1d9e7f6c142fca
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/gax.go
@@ -0,0 +1,32 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+const Version = "0.1.0"
diff --git a/vendor/github.com/googleapis/gax-go/invoke.go b/vendor/github.com/googleapis/gax-go/invoke.go
new file mode 100644
index 0000000000000000000000000000000000000000..d2134e1df1209aed84d1e74534be9daf0d51ddae
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/invoke.go
@@ -0,0 +1,90 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import (
+	"time"
+
+	"golang.org/x/net/context"
+)
+
+// A user defined call stub.
+type APICall func(context.Context) error
+
+// Invoke calls the given APICall,
+// performing retries as specified by opts, if any.
+func Invoke(ctx context.Context, call APICall, opts ...CallOption) error {
+	var settings CallSettings
+	for _, opt := range opts {
+		opt.Resolve(&settings)
+	}
+	return invoke(ctx, call, settings, Sleep)
+}
+
+// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing.
+// If interrupted, Sleep returns ctx.Err().
+func Sleep(ctx context.Context, d time.Duration) error {
+	t := time.NewTimer(d)
+	select {
+	case <-ctx.Done():
+		t.Stop()
+		return ctx.Err()
+	case <-t.C:
+		return nil
+	}
+}
+
+type sleeper func(ctx context.Context, d time.Duration) error
+
+// invoke implements Invoke, taking an additional sleeper argument for testing.
+func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error {
+	var retryer Retryer
+	for {
+		err := call(ctx)
+		if err == nil {
+			return nil
+		}
+		if settings.Retry == nil {
+			return err
+		}
+		if retryer == nil {
+			if r := settings.Retry(); r != nil {
+				retryer = r
+			} else {
+				return err
+			}
+		}
+		if d, ok := retryer.Retry(err); !ok {
+			return err
+		} else if err = sp(ctx, d); err != nil {
+			return err
+		}
+	}
+}
diff --git a/vendor/github.com/googleapis/gax-go/path_template.go b/vendor/github.com/googleapis/gax-go/path_template.go
new file mode 100644
index 0000000000000000000000000000000000000000..41bda94cb6a85e67aef9b3bdad62b77d3d0bf0e6
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/path_template.go
@@ -0,0 +1,176 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+)
+
+type matcher interface {
+	match([]string) (int, error)
+	String() string
+}
+
+type segment struct {
+	matcher
+	name string
+}
+
+type labelMatcher string
+
+func (ls labelMatcher) match(segments []string) (int, error) {
+	if len(segments) == 0 {
+		return 0, fmt.Errorf("expected %s but no more segments found", ls)
+	}
+	if segments[0] != string(ls) {
+		return 0, fmt.Errorf("expected %s but got %s", ls, segments[0])
+	}
+	return 1, nil
+}
+
+func (ls labelMatcher) String() string {
+	return string(ls)
+}
+
+type wildcardMatcher int
+
+func (wm wildcardMatcher) match(segments []string) (int, error) {
+	if len(segments) == 0 {
+		return 0, errors.New("no more segments found")
+	}
+	return 1, nil
+}
+
+func (wm wildcardMatcher) String() string {
+	return "*"
+}
+
+type pathWildcardMatcher int
+
+func (pwm pathWildcardMatcher) match(segments []string) (int, error) {
+	length := len(segments) - int(pwm)
+	if length <= 0 {
+		return 0, errors.New("not sufficient segments are supplied for path wildcard")
+	}
+	return length, nil
+}
+
+func (pwm pathWildcardMatcher) String() string {
+	return "**"
+}
+
+type ParseError struct {
+	Pos      int
+	Template string
+	Message  string
+}
+
+func (pe ParseError) Error() string {
+	return fmt.Sprintf("at %d of template '%s', %s", pe.Pos, pe.Template, pe.Message)
+}
+
+// PathTemplate manages the template to build and match with paths used
+// by API services. It holds a template and variable names in it, and
+// it can extract matched patterns from a path string or build a path
+// string from a binding.
+//
+// See http.proto in github.com/googleapis/googleapis/ for the details of
+// the template syntax.
+type PathTemplate struct {
+	segments []segment
+}
+
+// NewPathTemplate parses a path template, and returns a PathTemplate
+// instance if successful.
+func NewPathTemplate(template string) (*PathTemplate, error) {
+	return parsePathTemplate(template)
+}
+
+// MustCompilePathTemplate is like NewPathTemplate but panics if the
+// expression cannot be parsed. It simplifies safe initialization of
+// global variables holding compiled regular expressions.
+func MustCompilePathTemplate(template string) *PathTemplate {
+	pt, err := NewPathTemplate(template)
+	if err != nil {
+		panic(err)
+	}
+	return pt
+}
+
+// Match attempts to match the given path with the template, and returns
+// the mapping of the variable name to the matched pattern string.
+func (pt *PathTemplate) Match(path string) (map[string]string, error) {
+	paths := strings.Split(path, "/")
+	values := map[string]string{}
+	for _, segment := range pt.segments {
+		length, err := segment.match(paths)
+		if err != nil {
+			return nil, err
+		}
+		if segment.name != "" {
+			value := strings.Join(paths[:length], "/")
+			if oldValue, ok := values[segment.name]; ok {
+				values[segment.name] = oldValue + "/" + value
+			} else {
+				values[segment.name] = value
+			}
+		}
+		paths = paths[length:]
+	}
+	if len(paths) != 0 {
+		return nil, fmt.Errorf("Trailing path %s remains after the matching", strings.Join(paths, "/"))
+	}
+	return values, nil
+}
+
+// Render creates a path string from its template and the binding from
+// the variable name to the value.
+func (pt *PathTemplate) Render(binding map[string]string) (string, error) {
+	result := make([]string, 0, len(pt.segments))
+	var lastVariableName string
+	for _, segment := range pt.segments {
+		name := segment.name
+		if lastVariableName != "" && name == lastVariableName {
+			continue
+		}
+		lastVariableName = name
+		if name == "" {
+			result = append(result, segment.String())
+		} else if value, ok := binding[name]; ok {
+			result = append(result, value)
+		} else {
+			return "", fmt.Errorf("%s is not found", name)
+		}
+	}
+	built := strings.Join(result, "/")
+	return built, nil
+}
diff --git a/vendor/github.com/googleapis/gax-go/path_template_parser.go b/vendor/github.com/googleapis/gax-go/path_template_parser.go
new file mode 100644
index 0000000000000000000000000000000000000000..79c8e759c9469d23ac8ba948e3663e76a2c57264
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/path_template_parser.go
@@ -0,0 +1,227 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import (
+	"fmt"
+	"io"
+	"strings"
+)
+
+// This parser follows the syntax of path templates, from
+// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto.
+// The differences are that there is no custom verb, we allow the initial slash
+// to be absent, and that we are not strict as
+// https://tools.ietf.org/html/rfc6570 about the characters in identifiers and
+// literals.
+
+type pathTemplateParser struct {
+	r                *strings.Reader
+	runeCount        int             // the number of the current rune in the original string
+	nextVar          int             // the number to use for the next unnamed variable
+	seenName         map[string]bool // names we've seen already
+	seenPathWildcard bool            // have we seen "**" already?
+}
+
+func parsePathTemplate(template string) (pt *PathTemplate, err error) {
+	p := &pathTemplateParser{
+		r:        strings.NewReader(template),
+		seenName: map[string]bool{},
+	}
+
+	// Handle panics with strings like errors.
+	// See pathTemplateParser.error, below.
+	defer func() {
+		if x := recover(); x != nil {
+			errmsg, ok := x.(errString)
+			if !ok {
+				panic(x)
+			}
+			pt = nil
+			err = ParseError{p.runeCount, template, string(errmsg)}
+		}
+	}()
+
+	segs := p.template()
+	// If there is a path wildcard, set its length. We can't do this
+	// until we know how many segments we've got all together.
+	for i, seg := range segs {
+		if _, ok := seg.matcher.(pathWildcardMatcher); ok {
+			segs[i].matcher = pathWildcardMatcher(len(segs) - i - 1)
+			break
+		}
+	}
+	return &PathTemplate{segments: segs}, nil
+
+}
+
+// Used to indicate errors "thrown" by this parser. We don't use string because
+// many parts of the standard library panic with strings.
+type errString string
+
+// Terminates parsing immediately with an error.
+func (p *pathTemplateParser) error(msg string) {
+	panic(errString(msg))
+}
+
+// Template = [ "/" ] Segments
+func (p *pathTemplateParser) template() []segment {
+	var segs []segment
+	if p.consume('/') {
+		// Initial '/' needs an initial empty matcher.
+		segs = append(segs, segment{matcher: labelMatcher("")})
+	}
+	return append(segs, p.segments("")...)
+}
+
+// Segments = Segment { "/" Segment }
+func (p *pathTemplateParser) segments(name string) []segment {
+	var segs []segment
+	for {
+		subsegs := p.segment(name)
+		segs = append(segs, subsegs...)
+		if !p.consume('/') {
+			break
+		}
+	}
+	return segs
+}
+
+// Segment  = "*" | "**" | LITERAL | Variable
+func (p *pathTemplateParser) segment(name string) []segment {
+	if p.consume('*') {
+		if name == "" {
+			name = fmt.Sprintf("$%d", p.nextVar)
+			p.nextVar++
+		}
+		if p.consume('*') {
+			if p.seenPathWildcard {
+				p.error("multiple '**' disallowed")
+			}
+			p.seenPathWildcard = true
+			// We'll change 0 to the right number at the end.
+			return []segment{{name: name, matcher: pathWildcardMatcher(0)}}
+		}
+		return []segment{{name: name, matcher: wildcardMatcher(0)}}
+	}
+	if p.consume('{') {
+		if name != "" {
+			p.error("recursive named bindings are not allowed")
+		}
+		return p.variable()
+	}
+	return []segment{{name: name, matcher: labelMatcher(p.literal())}}
+}
+
+// Variable = "{" FieldPath [ "=" Segments ] "}"
+// "{" is already consumed.
+func (p *pathTemplateParser) variable() []segment {
+	// Simplification: treat FieldPath as LITERAL, instead of IDENT { '.' IDENT }
+	name := p.literal()
+	if p.seenName[name] {
+		p.error(name + " appears multiple times")
+	}
+	p.seenName[name] = true
+	var segs []segment
+	if p.consume('=') {
+		segs = p.segments(name)
+	} else {
+		// "{var}" is equivalent to "{var=*}"
+		segs = []segment{{name: name, matcher: wildcardMatcher(0)}}
+	}
+	if !p.consume('}') {
+		p.error("expected '}'")
+	}
+	return segs
+}
+
+// A literal is any sequence of characters other than a few special ones.
+// The list of stop characters is not quite the same as in the template RFC.
+func (p *pathTemplateParser) literal() string {
+	lit := p.consumeUntil("/*}{=")
+	if lit == "" {
+		p.error("empty literal")
+	}
+	return lit
+}
+
+// Read runes until EOF or one of the runes in stopRunes is encountered.
+// If the latter, unread the stop rune. Return the accumulated runes as a string.
+func (p *pathTemplateParser) consumeUntil(stopRunes string) string {
+	var runes []rune
+	for {
+		r, ok := p.readRune()
+		if !ok {
+			break
+		}
+		if strings.IndexRune(stopRunes, r) >= 0 {
+			p.unreadRune()
+			break
+		}
+		runes = append(runes, r)
+	}
+	return string(runes)
+}
+
+// If the next rune is r, consume it and return true.
+// Otherwise, leave the input unchanged and return false.
+func (p *pathTemplateParser) consume(r rune) bool {
+	rr, ok := p.readRune()
+	if !ok {
+		return false
+	}
+	if r == rr {
+		return true
+	}
+	p.unreadRune()
+	return false
+}
+
+// Read the next rune from the input. Return it.
+// The second return value is false at EOF.
+func (p *pathTemplateParser) readRune() (rune, bool) {
+	r, _, err := p.r.ReadRune()
+	if err == io.EOF {
+		return r, false
+	}
+	if err != nil {
+		p.error(err.Error())
+	}
+	p.runeCount++
+	return r, true
+}
+
+// Put the last rune that was read back on the input.
+func (p *pathTemplateParser) unreadRune() {
+	if err := p.r.UnreadRune(); err != nil {
+		p.error(err.Error())
+	}
+	p.runeCount--
+}
diff --git a/vendor/github.com/gorilla/csrf/README.md b/vendor/github.com/gorilla/csrf/README.md
index 9bcf3f87bf2fa4af00ba23286e62e93cc1123b5e..daa3c8778cac09311b96cda597fe0177a60e3555 100644
--- a/vendor/github.com/gorilla/csrf/README.md
+++ b/vendor/github.com/gorilla/csrf/README.md
@@ -94,7 +94,7 @@ func ShowSignupForm(w http.ResponseWriter, r *http.Request) {
     })
     // We could also retrieve the token directly from csrf.Token(r) and
     // set it in the request header - w.Header.Set("X-CSRF-Token", token)
-    // This is useful if your sending JSON to clients or a front-end JavaScript
+    // This is useful if you're sending JSON to clients or a front-end JavaScript
     // framework.
 }
 
diff --git a/vendor/github.com/gorilla/csrf/csrf.go b/vendor/github.com/gorilla/csrf/csrf.go
index 58ffd5b4d21d1ff64afcd66f268acc15ed9a8275..926be23c8ea08b5ef208c5910d57220d5138a956 100644
--- a/vendor/github.com/gorilla/csrf/csrf.go
+++ b/vendor/github.com/gorilla/csrf/csrf.go
@@ -115,7 +115,7 @@ type options struct {
 //		})
 //		// We could also retrieve the token directly from csrf.Token(r) and
 //		// set it in the request header - w.Header.Set("X-CSRF-Token", token)
-//		// This is useful if your sending JSON to clients or a front-end JavaScript
+//		// This is useful if you're sending JSON to clients or a front-end JavaScript
 //		// framework.
 //	}
 //
diff --git a/vendor/github.com/gorilla/csrf/options.go b/vendor/github.com/gorilla/csrf/options.go
index c644d492d4dba5dcc12f1cd1a7aa1eafaf03c4df..b50ebd4eb4f87d95e3e9f8cc5e2a0f06b5ae49ae 100644
--- a/vendor/github.com/gorilla/csrf/options.go
+++ b/vendor/github.com/gorilla/csrf/options.go
@@ -63,7 +63,7 @@ func HttpOnly(h bool) Option {
 // provide a handler that returns a static HTML file with a HTTP 403 status. By
 // default a HTTP 403 status and a plain text CSRF failure reason are served.
 //
-// Note that a custom error handler can also access the csrf.Failure(r)
+// Note that a custom error handler can also access the csrf.FailureReason(r)
 // function to retrieve the CSRF validation reason from the request context.
 func ErrorHandler(h http.Handler) Option {
 	return func(cs *csrf) {
diff --git a/vendor/github.com/gorilla/handlers/compress.go b/vendor/github.com/gorilla/handlers/compress.go
index 5e140c503086dc8f49d835b0b455c4601004f9ac..e8345d792a37dbb12837a6430892b982d1dd870f 100644
--- a/vendor/github.com/gorilla/handlers/compress.go
+++ b/vendor/github.com/gorilla/handlers/compress.go
@@ -56,6 +56,9 @@ func (w *compressResponseWriter) Flush() {
 
 // CompressHandler gzip compresses HTTP responses for clients that support it
 // via the 'Accept-Encoding' header.
+//
+// Compressing TLS traffic may leak the page contents to an attacker if the
+// page contains user input: http://security.stackexchange.com/a/102015/12208
 func CompressHandler(h http.Handler) http.Handler {
 	return CompressHandlerLevel(h, gzip.DefaultCompression)
 }
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
index 0aa080f9206affb122f93156b8af621df07f52c1..54a6493fb47948894128ef60d64c5e1f6d55853e 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -50,7 +50,7 @@ func (p *Parser) Parse() (*ast.File, error) {
 		scerr = &PosError{Pos: pos, Err: errors.New(msg)}
 	}
 
-	f.Node, err = p.objectList()
+	f.Node, err = p.objectList(false)
 	if scerr != nil {
 		return nil, scerr
 	}
@@ -62,11 +62,23 @@ func (p *Parser) Parse() (*ast.File, error) {
 	return f, nil
 }
 
-func (p *Parser) objectList() (*ast.ObjectList, error) {
+// objectList parses a list of items within an object (generally k/v pairs).
+// The parameter" obj" tells this whether to we are within an object (braces:
+// '{', '}') or just at the top level. If we're within an object, we end
+// at an RBRACE.
+func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
 	defer un(trace(p, "ParseObjectList"))
 	node := &ast.ObjectList{}
 
 	for {
+		if obj {
+			tok := p.scan()
+			p.unscan()
+			if tok.Type == token.RBRACE {
+				break
+			}
+		}
+
 		n, err := p.objectItem()
 		if err == errEofToken {
 			break // we are finished
@@ -288,7 +300,7 @@ func (p *Parser) objectType() (*ast.ObjectType, error) {
 		Lbrace: p.tok.Pos,
 	}
 
-	l, err := p.objectList()
+	l, err := p.objectList(true)
 
 	// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
 	// not a RBRACE, it's an syntax error and we just return it.
@@ -296,9 +308,9 @@ func (p *Parser) objectType() (*ast.ObjectType, error) {
 		return nil, err
 	}
 
-	// If there is no error, we should be at a RBRACE to end the object
-	if p.tok.Type != token.RBRACE {
-		return nil, fmt.Errorf("object expected closing RBRACE got: %s", p.tok.Type)
+	// No error, scan and expect the ending to be a brace
+	if tok := p.scan(); tok.Type != token.RBRACE {
+		return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type)
 	}
 
 	o.List = l
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/flatten.go b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
index 6eb14a253928fe618d1e5c1dd38874ee63d97974..f652d6fe78e4e9aa9c2ae54de693e8d40a8b2780 100644
--- a/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
+++ b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
@@ -48,6 +48,12 @@ func flattenListType(
 	item *ast.ObjectItem,
 	items []*ast.ObjectItem,
 	frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+	// If the list is empty, keep the original list
+	if len(ot.List) == 0 {
+		items = append(items, item)
+		return items, frontier
+	}
+
 	// All the elements of this object must also be objects!
 	for _, subitem := range ot.List {
 		if _, ok := subitem.(*ast.ObjectType); !ok {
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
index acf95941f08d1b44bb45b3c8793d97682dfac8c6..6f46085300624f0330bbef220dc37c875a01ed97 100644
--- a/vendor/github.com/hashicorp/hcl/json/parser/parser.go
+++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
@@ -86,6 +86,7 @@ func (p *Parser) objectList() (*ast.ObjectList, error) {
 			break
 		}
 	}
+
 	return node, nil
 }
 
diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go
index f1cea7ddf7d511f14ff1f022ca951005556e5475..9753e9668f41f7b239cca53e3467aa4e93fe8a51 100644
--- a/vendor/github.com/hashicorp/vault/api/logical.go
+++ b/vendor/github.com/hashicorp/vault/api/logical.go
@@ -119,7 +119,7 @@ func (c *Logical) Delete(path string) (*Secret, error) {
 
 func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) {
 	var data map[string]interface{}
-	if wrappingToken != "" {
+	if wrappingToken != "" && wrappingToken != c.c.Token() {
 		data = map[string]interface{}{
 			"token": wrappingToken,
 		}
@@ -146,7 +146,7 @@ func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) {
 		return nil, nil
 	}
 
-	if wrappingToken == "" {
+	if wrappingToken != "" {
 		origToken := c.c.Token()
 		defer c.c.SetToken(origToken)
 		c.c.SetToken(wrappingToken)
diff --git a/vendor/github.com/mattn/go-sqlite3/backup.go b/vendor/github.com/mattn/go-sqlite3/backup.go
index 4c1e38c878b849f98a56d266acd2ab2b7de9ecb2..05f803871fde41627e4a6102e9ce51b21bb5502c 100644
--- a/vendor/github.com/mattn/go-sqlite3/backup.go
+++ b/vendor/github.com/mattn/go-sqlite3/backup.go
@@ -65,10 +65,15 @@ func (b *SQLiteBackup) Finish() error {
 
 func (b *SQLiteBackup) Close() error {
 	ret := C.sqlite3_backup_finish(b.b)
+
+	// sqlite3_backup_finish() never fails, it just returns the
+	// error code from previous operations, so clean up before
+	// checking and returning an error
+	b.b = nil
+	runtime.SetFinalizer(b, nil)
+
 	if ret != 0 {
 		return Error{Code: ErrNo(ret)}
 	}
-	b.b = nil
-	runtime.SetFinalizer(b, nil)
 	return nil
 }
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c
index 1f085b01cb61705e25740da10097c7e63dd5288f..a8790dece8b36ab6280afe9fb4069259b9eeca1f 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c
@@ -17,6 +17,7 @@
 ** language. The code for the "sqlite3" command-line shell is also in a
 ** separate file. This file contains only code for the core SQLite library.
 */
+#ifndef USE_LIBSQLITE3
 #define SQLITE_CORE 1
 #define SQLITE_AMALGAMATION 1
 #ifndef SQLITE_PRIVATE
@@ -197846,5 +197847,9 @@ static int sqlite3Fts5VocabInit(Fts5Global *pGlobal, sqlite3 *db){
 
     
 #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS5) */
+#else // USE_LIBSQLITE3
+// If users really want to link against the system sqlite3 we
+// need to make this file a noop.
+#endif
 
 /************** End of fts5.c ************************************************/
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h
index 4e2df5ebfab676c4866f386f0da15b1914062581..430ffeee410a805e6e323f9b4183273c34ce5f75 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h
@@ -30,6 +30,7 @@
 ** the version number) and changes its name to "sqlite3.h" as
 ** part of the build process.
 */
+#ifndef USE_LIBSQLITE3
 #ifndef SQLITE3_H
 #define SQLITE3_H
 #include <stdarg.h>     /* Needed for the definition of va_list */
@@ -10338,5 +10339,9 @@ struct fts5_api {
 #endif
 
 #endif /* _FTS5_H */
+#else // USE_LIBSQLITE3
+// If users really want to link against the system sqlite3 we
+// need to make this file a noop.
+#endif
 
 /******** End of fts5.h *********/
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3.go
index af4c68d1d2d4393127157907f6caeb354d8c6a85..de9d7d27ec610b0a99c0d77fa10f46540bb2071a 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3.go
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3.go
@@ -10,7 +10,7 @@ package sqlite3
 #cgo CFLAGS: -DSQLITE_ENABLE_RTREE -DSQLITE_THREADSAFE
 #cgo CFLAGS: -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_FTS3_PARENTHESIS -DSQLITE_ENABLE_FTS4_UNICODE61
 #cgo CFLAGS: -DSQLITE_TRACE_SIZE_LIMIT=15
-#cgo CFLAGS: -Wno-deprecated-declarations -Wno-c99-extensions
+#cgo CFLAGS: -Wno-deprecated-declarations
 #ifndef USE_LIBSQLITE3
 #include <sqlite3-binding.h>
 #else
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h b/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h
index ce87e74690b2ff837c44dd3d0f0a3fa39e6918f9..0c28610f573cacea8cacb7db2c02280b513d4310 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h
@@ -1,3 +1,4 @@
+#ifndef USE_LIBSQLITE3
 /*
 ** 2006 June 7
 **
@@ -12,7 +13,7 @@
 ** This header file defines the SQLite interface for use by
 ** shared libraries that want to be imported as extensions into
 ** an SQLite instance.  Shared libraries that intend to be loaded
-** as extensions by SQLite should #include this file instead of 
+** as extensions by SQLite should #include this file instead of
 ** sqlite3.h.
 */
 #ifndef SQLITE3EXT_H
@@ -543,14 +544,14 @@ typedef int (*sqlite3_loadext_entry)(
 #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
 
 #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
-  /* This case when the file really is being compiled as a loadable 
+  /* This case when the file really is being compiled as a loadable
   ** extension */
 # define SQLITE_EXTENSION_INIT1     const sqlite3_api_routines *sqlite3_api=0;
 # define SQLITE_EXTENSION_INIT2(v)  sqlite3_api=v;
 # define SQLITE_EXTENSION_INIT3     \
     extern const sqlite3_api_routines *sqlite3_api;
 #else
-  /* This case when the file is being statically linked into the 
+  /* This case when the file is being statically linked into the
   ** application */
 # define SQLITE_EXTENSION_INIT1     /*no-op*/
 # define SQLITE_EXTENSION_INIT2(v)  (void)v; /* unused parameter */
@@ -558,3 +559,7 @@ typedef int (*sqlite3_loadext_entry)(
 #endif
 
 #endif /* SQLITE3EXT_H */
+#else // USE_LIBSQLITE3
+ // If users really want to link against the system sqlite3 we
+// need to make this file a noop.
+ #endif
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
index d1cb607a56887b557c0c4e7340ceef3236f78d10..b0ab89b6310f79ca5d69809b8ea31c87c1496ee1 100644
--- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -651,14 +651,6 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
 			fieldType := structType.Field(i)
 			fieldKind := fieldType.Type.Kind()
 
-			if fieldType.Anonymous {
-				if fieldKind != reflect.Struct {
-					errors = appendErrors(errors,
-						fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind))
-					continue
-				}
-			}
-
 			// If "squash" is specified in the tag, we squash the field down.
 			squash := false
 			tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
index 4258f456bf0c4650c0f86e5e279f2519205de069..fa815642ed02b77452ac55ac16171dcc03645d3d 100644
--- a/vendor/github.com/spf13/pflag/flag.go
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -514,7 +514,7 @@ func (f *FlagSet) FlagUsages() string {
 		if len(flag.NoOptDefVal) > 0 {
 			switch flag.Value.Type() {
 			case "string":
-				line += fmt.Sprintf("[=%q]", flag.NoOptDefVal)
+				line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
 			case "bool":
 				if flag.NoOptDefVal != "true" {
 					line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
@@ -534,7 +534,7 @@ func (f *FlagSet) FlagUsages() string {
 		line += usage
 		if !flag.defaultIsZeroValue() {
 			if flag.Value.Type() == "string" {
-				line += fmt.Sprintf(" (default %q)", flag.DefValue)
+				line += fmt.Sprintf(" (default \"%s\")", flag.DefValue)
 			} else {
 				line += fmt.Sprintf(" (default %s)", flag.DefValue)
 			}
diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go
index f320f2ece369d524a49886e78d2a9849207427aa..93b4e43290a04ed8043405bfa739727fb83641a3 100644
--- a/vendor/github.com/spf13/pflag/string_array.go
+++ b/vendor/github.com/spf13/pflag/string_array.go
@@ -2,7 +2,6 @@ package pflag
 
 import (
 	"fmt"
-	"strings"
 )
 
 var _ = fmt.Fprint
@@ -40,7 +39,7 @@ func (s *stringArrayValue) String() string {
 }
 
 func stringArrayConv(sval string) (interface{}, error) {
-	sval = strings.Trim(sval, "[]")
+	sval = sval[1 : len(sval)-1]
 	// An empty string would cause a array with one (empty) string
 	if len(sval) == 0 {
 		return []string{}, nil
diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md
index f4e72f871d6cd85dce96786a3a74a2cd12adbe97..25181dff16bedc9f670cea1ba2f53469730ca113 100644
--- a/vendor/github.com/spf13/viper/README.md
+++ b/vendor/github.com/spf13/viper/README.md
@@ -12,7 +12,7 @@ Many Go projects are built using Viper including:
 * [BloomApi](https://www.bloomapi.com/)
 * [doctl](https://github.com/digitalocean/doctl)
 
- [![Build Status](https://travis-ci.org/spf13/viper.svg)](https://travis-ci.org/spf13/viper) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+[![Build Status](https://travis-ci.org/spf13/viper.svg)](https://travis-ci.org/spf13/viper) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![GoDoc](https://godoc.org/github.com/spf13/viper?status.svg)](https://godoc.org/github.com/spf13/viper)
 
 
 ## What is Viper?
@@ -277,10 +277,10 @@ Viper provides two Go interfaces to bind other flag systems if you don't use `Pf
 
 ```go
 type myFlag struct {}
-func (f myFlag) IsChanged() { return false }
-func (f myFlag) Name() { return "my-flag-name" }
-func (f myFlag) ValueString() { return "my-flag-value" }
-func (f myFlag) ValueType() { return "string" }
+func (f myFlag) HasChanged() bool { return false }
+func (f myFlag) Name() string { return "my-flag-name" }
+func (f myFlag) ValueString() string { return "my-flag-value" }
+func (f myFlag) ValueType() string { return "string" }
 ```
 
 Once your flag implements this interface, you can simply tell Viper to bind it:
diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go
index b0903fb07453ac019c7a868247b59ead6a67236d..3ebada91abdd667b6dd2d0b2fa1659ee272b10b9 100644
--- a/vendor/github.com/spf13/viper/util.go
+++ b/vendor/github.com/spf13/viper/util.go
@@ -39,17 +39,58 @@ func (pe ConfigParseError) Error() string {
 	return fmt.Sprintf("While parsing config: %s", pe.err.Error())
 }
 
+// toCaseInsensitiveValue checks if the value is a  map;
+// if so, create a copy and lower-case the keys recursively.
+func toCaseInsensitiveValue(value interface{}) interface{} {
+	switch v := value.(type) {
+	case map[interface{}]interface{}:
+		value = copyAndInsensitiviseMap(cast.ToStringMap(v))
+	case map[string]interface{}:
+		value = copyAndInsensitiviseMap(v)
+	}
+
+	return value
+}
+
+// copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of
+// any map it makes case insensitive.
+func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} {
+	nm := make(map[string]interface{})
+
+	for key, val := range m {
+		lkey := strings.ToLower(key)
+		switch v := val.(type) {
+		case map[interface{}]interface{}:
+			nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v))
+		case map[string]interface{}:
+			nm[lkey] = copyAndInsensitiviseMap(v)
+		default:
+			nm[lkey] = v
+		}
+	}
+
+	return nm
+}
+
 func insensitiviseMap(m map[string]interface{}) {
 	for key, val := range m {
+		switch val.(type) {
+		case map[interface{}]interface{}:
+			// nested map: cast and recursively insensitivise
+			val = cast.ToStringMap(val)
+			insensitiviseMap(val.(map[string]interface{}))
+		case map[string]interface{}:
+			// nested map: recursively insensitivise
+			insensitiviseMap(val.(map[string]interface{}))
+		}
+
 		lower := strings.ToLower(key)
 		if key != lower {
+			// remove old key (not lower-cased)
 			delete(m, key)
-			m[lower] = val
-			if m2, ok := val.(map[string]interface{}); ok {
-				// nested map: recursively insensitivise
-				insensitiviseMap(m2)
-			}
 		}
+		// update map
+		m[lower] = val
 	}
 }
 
diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go
index 8f2784944f5bb5c7861e7c70be3f1bc799fb4f45..4ed2d4039d039157980cf6b5c6fc672c946914f9 100644
--- a/vendor/github.com/spf13/viper/viper.go
+++ b/vendor/github.com/spf13/viper/viper.go
@@ -241,7 +241,13 @@ func (v *Viper) WatchConfig() {
 		defer watcher.Close()
 
 		// we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way
-		configFile := filepath.Clean(v.getConfigFile())
+		filename, err := v.getConfigFile()
+		if err != nil {
+			log.Println("error:", err)
+			return
+		}
+
+		configFile := filepath.Clean(filename)
 		configDir, _ := filepath.Split(configFile)
 
 		done := make(chan bool)
@@ -401,22 +407,20 @@ func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool {
 
 // searchMap recursively searches for a value for path in source map.
 // Returns nil if not found.
+// Note: This assumes that the path entries and map keys are lower cased.
 func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} {
 	if len(path) == 0 {
 		return source
 	}
 
-	var ok bool
-	var next interface{}
-	for k, v := range source {
-		if strings.ToLower(k) == strings.ToLower(path[0]) {
-			ok = true
-			next = v
-			break
+	next, ok := source[path[0]]
+	if ok {
+		// Fast path
+		if len(path) == 1 {
+			return next
 		}
-	}
 
-	if ok {
+		// Nested case
 		switch next.(type) {
 		case map[interface{}]interface{}:
 			return v.searchMap(cast.ToStringMap(next), path[1:])
@@ -425,9 +429,6 @@ func (v *Viper) searchMap(source map[string]interface{}, path []string) interfac
 			// if the type of `next` is the same as the type being asserted
 			return v.searchMap(next.(map[string]interface{}), path[1:])
 		default:
-			if len(path) == 1 {
-				return next
-			}
 			// got a value but nested key expected, return "nil" for not found
 			return nil
 		}
@@ -444,6 +445,8 @@ func (v *Viper) searchMap(source map[string]interface{}, path []string) interfac
 //
 // This should be useful only at config level (other maps may not contain dots
 // in their keys).
+//
+// Note: This assumes that the path entries and map keys are lower cased.
 func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []string) interface{} {
 	if len(path) == 0 {
 		return source
@@ -453,17 +456,14 @@ func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []
 	for i := len(path); i > 0; i-- {
 		prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim))
 
-		var ok bool
-		var next interface{}
-		for k, v := range source {
-			if strings.ToLower(k) == prefixKey {
-				ok = true
-				next = v
-				break
+		next, ok := source[prefixKey]
+		if ok {
+			// Fast path
+			if i == len(path) {
+				return next
 			}
-		}
 
-		if ok {
+			// Nested case
 			var val interface{}
 			switch next.(type) {
 			case map[interface{}]interface{}:
@@ -473,9 +473,6 @@ func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []
 				// if the type of `next` is the same as the type being asserted
 				val = v.searchMapWithPathPrefixes(next.(map[string]interface{}), path[i:])
 			default:
-				if len(path) == i {
-					val = next
-				}
 				// got a value but nested key expected, do nothing and look for next prefix
 			}
 			if val != nil {
@@ -579,6 +576,7 @@ func GetViper() *Viper {
 }
 
 // Get can retrieve any value given the key to use.
+// Get is case-insensitive for a key.
 // Get has the behavior of returning the value associated with the first
 // place from where it is set. Viper will check in the following order:
 // override, flag, env, config file, key/value store, default
@@ -594,6 +592,7 @@ func (v *Viper) Get(key string) interface{} {
 
 	valType := val
 	if v.typeByDefValue {
+		// TODO(bep) this branch isn't covered by a single test.
 		path := strings.Split(lcaseKey, v.keyDelim)
 		defVal := v.searchMap(v.defaults, path)
 		if defVal != nil {
@@ -621,6 +620,7 @@ func (v *Viper) Get(key string) interface{} {
 }
 
 // Sub returns new Viper instance representing a sub tree of this instance.
+// Sub is case-insensitive for a key.
 func Sub(key string) *Viper { return v.Sub(key) }
 func (v *Viper) Sub(key string) *Viper {
 	subv := New()
@@ -841,32 +841,37 @@ func (v *Viper) BindEnv(input ...string) error {
 // Viper will check in the following order:
 // flag, env, config file, key/value store, default.
 // Viper will check to see if an alias exists first.
-func (v *Viper) find(key string) interface{} {
-	var val interface{}
-	var exists bool
+// Note: this assumes a lower-cased key given.
+func (v *Viper) find(lcaseKey string) interface{} {
+
+	var (
+		val    interface{}
+		exists bool
+		path   = strings.Split(lcaseKey, v.keyDelim)
+		nested = len(path) > 1
+	)
 
 	// compute the path through the nested maps to the nested value
-	path := strings.Split(key, v.keyDelim)
-	if shadow := v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)); shadow != "" {
+	if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" {
 		return nil
 	}
 
 	// if the requested key is an alias, then return the proper key
-	key = v.realKey(key)
-	// re-compute the path
-	path = strings.Split(key, v.keyDelim)
+	lcaseKey = v.realKey(lcaseKey)
+	path = strings.Split(lcaseKey, v.keyDelim)
+	nested = len(path) > 1
 
 	// Set() override first
 	val = v.searchMap(v.override, path)
 	if val != nil {
 		return val
 	}
-	if shadow := v.isPathShadowedInDeepMap(path, v.override); shadow != "" {
+	if nested && v.isPathShadowedInDeepMap(path, v.override) != "" {
 		return nil
 	}
 
 	// PFlag override next
-	flag, exists := v.pflags[key]
+	flag, exists := v.pflags[lcaseKey]
 	if exists && flag.HasChanged() {
 		switch flag.ValueType() {
 		case "int", "int8", "int16", "int32", "int64":
@@ -880,7 +885,7 @@ func (v *Viper) find(key string) interface{} {
 			return flag.ValueString()
 		}
 	}
-	if shadow := v.isPathShadowedInFlatMap(path, v.pflags); shadow != "" {
+	if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" {
 		return nil
 	}
 
@@ -888,20 +893,20 @@ func (v *Viper) find(key string) interface{} {
 	if v.automaticEnvApplied {
 		// even if it hasn't been registered, if automaticEnv is used,
 		// check any Get request
-		if val = v.getEnv(v.mergeWithEnvPrefix(key)); val != "" {
+		if val = v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); val != "" {
 			return val
 		}
-		if shadow := v.isPathShadowedInAutoEnv(path); shadow != "" {
+		if nested && v.isPathShadowedInAutoEnv(path) != "" {
 			return nil
 		}
 	}
-	envkey, exists := v.env[key]
+	envkey, exists := v.env[lcaseKey]
 	if exists {
 		if val = v.getEnv(envkey); val != "" {
 			return val
 		}
 	}
-	if shadow := v.isPathShadowedInFlatMap(path, v.env); shadow != "" {
+	if nested && v.isPathShadowedInFlatMap(path, v.env) != "" {
 		return nil
 	}
 
@@ -910,7 +915,7 @@ func (v *Viper) find(key string) interface{} {
 	if val != nil {
 		return val
 	}
-	if shadow := v.isPathShadowedInDeepMap(path, v.config); shadow != "" {
+	if nested && v.isPathShadowedInDeepMap(path, v.config) != "" {
 		return nil
 	}
 
@@ -919,7 +924,7 @@ func (v *Viper) find(key string) interface{} {
 	if val != nil {
 		return val
 	}
-	if shadow := v.isPathShadowedInDeepMap(path, v.kvstore); shadow != "" {
+	if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" {
 		return nil
 	}
 
@@ -928,13 +933,13 @@ func (v *Viper) find(key string) interface{} {
 	if val != nil {
 		return val
 	}
-	if shadow := v.isPathShadowedInDeepMap(path, v.defaults); shadow != "" {
+	if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" {
 		return nil
 	}
 
 	// last chance: if no other value is returned and a flag does exist for the value,
 	// get the flag's value even if the flag's value has not changed
-	if flag, exists := v.pflags[key]; exists {
+	if flag, exists := v.pflags[lcaseKey]; exists {
 		switch flag.ValueType() {
 		case "int", "int8", "int16", "int32", "int64":
 			return cast.ToInt(flag.ValueString())
@@ -953,6 +958,7 @@ func (v *Viper) find(key string) interface{} {
 }
 
 // IsSet checks to see if the key has been set in any of the data locations.
+// IsSet is case-insensitive for a key.
 func IsSet(key string) bool { return v.IsSet(key) }
 func (v *Viper) IsSet(key string) bool {
 	lcaseKey := strings.ToLower(key)
@@ -1034,11 +1040,13 @@ func (v *Viper) InConfig(key string) bool {
 }
 
 // SetDefault sets the default value for this key.
+// SetDefault is case-insensitive for a key.
 // Default only used when no value is provided by the user via flag, config or ENV.
 func SetDefault(key string, value interface{}) { v.SetDefault(key, value) }
 func (v *Viper) SetDefault(key string, value interface{}) {
 	// If alias passed in, then set the proper default
 	key = v.realKey(strings.ToLower(key))
+	value = toCaseInsensitiveValue(value)
 
 	path := strings.Split(key, v.keyDelim)
 	lastKey := strings.ToLower(path[len(path)-1])
@@ -1049,12 +1057,14 @@ func (v *Viper) SetDefault(key string, value interface{}) {
 }
 
 // Set sets the value for the key in the override regiser.
+// Set is case-insensitive for a key.
 // Will be used instead of values obtained via
 // flags, config file, ENV, default, or key/value store.
 func Set(key string, value interface{}) { v.Set(key, value) }
 func (v *Viper) Set(key string, value interface{}) {
 	// If alias passed in, then set the proper override
 	key = v.realKey(strings.ToLower(key))
+	value = toCaseInsensitiveValue(value)
 
 	path := strings.Split(key, v.keyDelim)
 	lastKey := strings.ToLower(path[len(path)-1])
@@ -1069,11 +1079,16 @@ func (v *Viper) Set(key string, value interface{}) {
 func ReadInConfig() error { return v.ReadInConfig() }
 func (v *Viper) ReadInConfig() error {
 	jww.INFO.Println("Attempting to read in config file")
+	filename, err := v.getConfigFile()
+	if err != nil {
+		return err
+	}
+
 	if !stringInSlice(v.getConfigType(), SupportedExts) {
 		return UnsupportedConfigError(v.getConfigType())
 	}
 
-	file, err := afero.ReadFile(v.fs, v.getConfigFile())
+	file, err := afero.ReadFile(v.fs, filename)
 	if err != nil {
 		return err
 	}
@@ -1091,7 +1106,12 @@ func (v *Viper) MergeInConfig() error {
 		return UnsupportedConfigError(v.getConfigType())
 	}
 
-	file, err := afero.ReadFile(v.fs, v.getConfigFile())
+	filename, err := v.getConfigFile()
+	if err != nil {
+		return err
+	}
+
+	file, err := afero.ReadFile(v.fs, filename)
 	if err != nil {
 		return err
 	}
@@ -1149,6 +1169,14 @@ func castMapStringToMapInterface(src map[string]string) map[string]interface{} {
 	return tgt
 }
 
+func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} {
+	tgt := map[string]interface{}{}
+	for k, v := range src {
+		tgt[k] = v
+	}
+	return tgt
+}
+
 // mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's
 // insistence on parsing nested structures as `map[interface{}]interface{}`
 // instead of using a `string` as the key for nest structures beyond one level
@@ -1294,8 +1322,8 @@ func (v *Viper) AllKeys() []string {
 	// add all paths, by order of descending priority to ensure correct shadowing
 	m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "")
 	m = v.flattenAndMergeMap(m, v.override, "")
-	m = v.mergeFlatMap(m, v.pflags)
-	m = v.mergeFlatMap(m, v.env)
+	m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags))
+	m = v.mergeFlatMap(m, castMapStringToMapInterface(v.env))
 	m = v.flattenAndMergeMap(m, v.config, "")
 	m = v.flattenAndMergeMap(m, v.kvstore, "")
 	m = v.flattenAndMergeMap(m, v.defaults, "")
@@ -1347,16 +1375,7 @@ func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interfac
 
 // mergeFlatMap merges the given maps, excluding values of the second map
 // shadowed by values from the first map.
-func (v *Viper) mergeFlatMap(shadow map[string]bool, mi interface{}) map[string]bool {
-	// unify input map
-	var m map[string]interface{}
-	switch mi.(type) {
-	case map[string]string, map[string]FlagValue:
-		m = cast.ToStringMap(mi)
-	default:
-		return shadow
-	}
-
+func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool {
 	// scan keys
 outer:
 	for k, _ := range m {
@@ -1427,7 +1446,11 @@ func (v *Viper) getConfigType() string {
 		return v.configType
 	}
 
-	cf := v.getConfigFile()
+	cf, err := v.getConfigFile()
+	if err != nil {
+		return ""
+	}
+
 	ext := filepath.Ext(cf)
 
 	if len(ext) > 1 {
@@ -1437,15 +1460,15 @@ func (v *Viper) getConfigType() string {
 	return ""
 }
 
-func (v *Viper) getConfigFile() string {
+func (v *Viper) getConfigFile() (string, error) {
 	// if explicitly set, then use it
 	if v.configFile != "" {
-		return v.configFile
+		return v.configFile, nil
 	}
 
 	cf, err := v.findConfigFile()
 	if err != nil {
-		return ""
+		return "", err
 	}
 
 	v.configFile = cf
diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
index 37599fac043bed54773dd3fede8057f84de1a3b0..932800b8d1b1dd144dde90d4fd15c0243318eef3 100644
--- a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
+++ b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
@@ -8,22 +8,9 @@
 // +build amd64,!gccgo,!appengine
 
 // func freeze(inout *[5]uint64)
-TEXT ·freeze(SB),7,$96-8
+TEXT ·freeze(SB),7,$0-8
 	MOVQ inout+0(FP), DI
 
-	MOVQ SP,R11
-	MOVQ $31,CX
-	NOTQ CX
-	ANDQ CX,SP
-	ADDQ $32,SP
-
-	MOVQ R11,0(SP)
-	MOVQ R12,8(SP)
-	MOVQ R13,16(SP)
-	MOVQ R14,24(SP)
-	MOVQ R15,32(SP)
-	MOVQ BX,40(SP)
-	MOVQ BP,48(SP)
 	MOVQ 0(DI),SI
 	MOVQ 8(DI),DX
 	MOVQ 16(DI),CX
@@ -81,14 +68,4 @@ REDUCELOOP:
 	MOVQ CX,16(DI)
 	MOVQ R8,24(DI)
 	MOVQ R9,32(DI)
-	MOVQ 0(SP),R11
-	MOVQ 8(SP),R12
-	MOVQ 16(SP),R13
-	MOVQ 24(SP),R14
-	MOVQ 32(SP),R15
-	MOVQ 40(SP),BX
-	MOVQ 48(SP),BP
-	MOVQ R11,SP
-	MOVQ DI,AX
-	MOVQ SI,DX
 	RET
diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s
index 3949f9cfaf4d075035ca1502e6e4c27d0ad079d7..ee7b36c36844c6530c1c3d6ca27593bd04198f00 100644
--- a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s
+++ b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s
@@ -8,22 +8,9 @@
 // +build amd64,!gccgo,!appengine
 
 // func ladderstep(inout *[5][5]uint64)
-TEXT ·ladderstep(SB),0,$384-8
+TEXT ·ladderstep(SB),0,$296-8
 	MOVQ inout+0(FP),DI
 
-	MOVQ SP,R11
-	MOVQ $31,CX
-	NOTQ CX
-	ANDQ CX,SP
-	ADDQ $32,SP
-
-	MOVQ R11,0(SP)
-	MOVQ R12,8(SP)
-	MOVQ R13,16(SP)
-	MOVQ R14,24(SP)
-	MOVQ R15,32(SP)
-	MOVQ BX,40(SP)
-	MOVQ BP,48(SP)
 	MOVQ 40(DI),SI
 	MOVQ 48(DI),DX
 	MOVQ 56(DI),CX
@@ -49,86 +36,86 @@ TEXT ·ladderstep(SB),0,$384-8
 	SUBQ 96(DI),R11
 	SUBQ 104(DI),R12
 	SUBQ 112(DI),R13
-	MOVQ SI,56(SP)
-	MOVQ DX,64(SP)
-	MOVQ CX,72(SP)
-	MOVQ R8,80(SP)
-	MOVQ R9,88(SP)
-	MOVQ AX,96(SP)
-	MOVQ R10,104(SP)
-	MOVQ R11,112(SP)
-	MOVQ R12,120(SP)
-	MOVQ R13,128(SP)
-	MOVQ 96(SP),AX
-	MULQ 96(SP)
+	MOVQ SI,0(SP)
+	MOVQ DX,8(SP)
+	MOVQ CX,16(SP)
+	MOVQ R8,24(SP)
+	MOVQ R9,32(SP)
+	MOVQ AX,40(SP)
+	MOVQ R10,48(SP)
+	MOVQ R11,56(SP)
+	MOVQ R12,64(SP)
+	MOVQ R13,72(SP)
+	MOVQ 40(SP),AX
+	MULQ 40(SP)
 	MOVQ AX,SI
 	MOVQ DX,CX
-	MOVQ 96(SP),AX
+	MOVQ 40(SP),AX
 	SHLQ $1,AX
-	MULQ 104(SP)
+	MULQ 48(SP)
 	MOVQ AX,R8
 	MOVQ DX,R9
-	MOVQ 96(SP),AX
+	MOVQ 40(SP),AX
 	SHLQ $1,AX
-	MULQ 112(SP)
+	MULQ 56(SP)
 	MOVQ AX,R10
 	MOVQ DX,R11
-	MOVQ 96(SP),AX
+	MOVQ 40(SP),AX
 	SHLQ $1,AX
-	MULQ 120(SP)
+	MULQ 64(SP)
 	MOVQ AX,R12
 	MOVQ DX,R13
-	MOVQ 96(SP),AX
+	MOVQ 40(SP),AX
 	SHLQ $1,AX
-	MULQ 128(SP)
+	MULQ 72(SP)
 	MOVQ AX,R14
 	MOVQ DX,R15
-	MOVQ 104(SP),AX
-	MULQ 104(SP)
+	MOVQ 48(SP),AX
+	MULQ 48(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 104(SP),AX
+	MOVQ 48(SP),AX
 	SHLQ $1,AX
-	MULQ 112(SP)
+	MULQ 56(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 104(SP),AX
+	MOVQ 48(SP),AX
 	SHLQ $1,AX
-	MULQ 120(SP)
+	MULQ 64(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 104(SP),DX
+	MOVQ 48(SP),DX
 	IMUL3Q $38,DX,AX
-	MULQ 128(SP)
+	MULQ 72(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 112(SP),AX
-	MULQ 112(SP)
+	MOVQ 56(SP),AX
+	MULQ 56(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 112(SP),DX
+	MOVQ 56(SP),DX
 	IMUL3Q $38,DX,AX
-	MULQ 120(SP)
+	MULQ 64(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 112(SP),DX
+	MOVQ 56(SP),DX
 	IMUL3Q $38,DX,AX
-	MULQ 128(SP)
+	MULQ 72(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 120(SP),DX
+	MOVQ 64(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 120(SP)
+	MULQ 64(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 120(SP),DX
+	MOVQ 64(SP),DX
 	IMUL3Q $38,DX,AX
-	MULQ 128(SP)
+	MULQ 72(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 128(SP),DX
+	MOVQ 72(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 128(SP)
+	MULQ 72(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
 	MOVQ ·REDMASK51(SB),DX
@@ -169,81 +156,81 @@ TEXT ·ladderstep(SB),0,$384-8
 	IMUL3Q $19,CX,CX
 	ADDQ CX,SI
 	ANDQ DX,R10
-	MOVQ SI,136(SP)
-	MOVQ R8,144(SP)
-	MOVQ R9,152(SP)
-	MOVQ AX,160(SP)
-	MOVQ R10,168(SP)
-	MOVQ 56(SP),AX
-	MULQ 56(SP)
+	MOVQ SI,80(SP)
+	MOVQ R8,88(SP)
+	MOVQ R9,96(SP)
+	MOVQ AX,104(SP)
+	MOVQ R10,112(SP)
+	MOVQ 0(SP),AX
+	MULQ 0(SP)
 	MOVQ AX,SI
 	MOVQ DX,CX
-	MOVQ 56(SP),AX
+	MOVQ 0(SP),AX
 	SHLQ $1,AX
-	MULQ 64(SP)
+	MULQ 8(SP)
 	MOVQ AX,R8
 	MOVQ DX,R9
-	MOVQ 56(SP),AX
+	MOVQ 0(SP),AX
 	SHLQ $1,AX
-	MULQ 72(SP)
+	MULQ 16(SP)
 	MOVQ AX,R10
 	MOVQ DX,R11
-	MOVQ 56(SP),AX
+	MOVQ 0(SP),AX
 	SHLQ $1,AX
-	MULQ 80(SP)
+	MULQ 24(SP)
 	MOVQ AX,R12
 	MOVQ DX,R13
-	MOVQ 56(SP),AX
+	MOVQ 0(SP),AX
 	SHLQ $1,AX
-	MULQ 88(SP)
+	MULQ 32(SP)
 	MOVQ AX,R14
 	MOVQ DX,R15
-	MOVQ 64(SP),AX
-	MULQ 64(SP)
+	MOVQ 8(SP),AX
+	MULQ 8(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 64(SP),AX
+	MOVQ 8(SP),AX
 	SHLQ $1,AX
-	MULQ 72(SP)
+	MULQ 16(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 64(SP),AX
+	MOVQ 8(SP),AX
 	SHLQ $1,AX
-	MULQ 80(SP)
+	MULQ 24(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 64(SP),DX
+	MOVQ 8(SP),DX
 	IMUL3Q $38,DX,AX
-	MULQ 88(SP)
+	MULQ 32(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 72(SP),AX
-	MULQ 72(SP)
+	MOVQ 16(SP),AX
+	MULQ 16(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 72(SP),DX
+	MOVQ 16(SP),DX
 	IMUL3Q $38,DX,AX
-	MULQ 80(SP)
+	MULQ 24(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 72(SP),DX
+	MOVQ 16(SP),DX
 	IMUL3Q $38,DX,AX
-	MULQ 88(SP)
+	MULQ 32(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 80(SP),DX
+	MOVQ 24(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 80(SP)
+	MULQ 24(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 80(SP),DX
+	MOVQ 24(SP),DX
 	IMUL3Q $38,DX,AX
-	MULQ 88(SP)
+	MULQ 32(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 88(SP),DX
+	MOVQ 32(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 88(SP)
+	MULQ 32(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
 	MOVQ ·REDMASK51(SB),DX
@@ -284,11 +271,11 @@ TEXT ·ladderstep(SB),0,$384-8
 	IMUL3Q $19,CX,CX
 	ADDQ CX,SI
 	ANDQ DX,R10
-	MOVQ SI,176(SP)
-	MOVQ R8,184(SP)
-	MOVQ R9,192(SP)
-	MOVQ AX,200(SP)
-	MOVQ R10,208(SP)
+	MOVQ SI,120(SP)
+	MOVQ R8,128(SP)
+	MOVQ R9,136(SP)
+	MOVQ AX,144(SP)
+	MOVQ R10,152(SP)
 	MOVQ SI,SI
 	MOVQ R8,DX
 	MOVQ R9,CX
@@ -299,16 +286,16 @@ TEXT ·ladderstep(SB),0,$384-8
 	ADDQ ·_2P1234(SB),CX
 	ADDQ ·_2P1234(SB),R8
 	ADDQ ·_2P1234(SB),R9
-	SUBQ 136(SP),SI
-	SUBQ 144(SP),DX
-	SUBQ 152(SP),CX
-	SUBQ 160(SP),R8
-	SUBQ 168(SP),R9
-	MOVQ SI,216(SP)
-	MOVQ DX,224(SP)
-	MOVQ CX,232(SP)
-	MOVQ R8,240(SP)
-	MOVQ R9,248(SP)
+	SUBQ 80(SP),SI
+	SUBQ 88(SP),DX
+	SUBQ 96(SP),CX
+	SUBQ 104(SP),R8
+	SUBQ 112(SP),R9
+	MOVQ SI,160(SP)
+	MOVQ DX,168(SP)
+	MOVQ CX,176(SP)
+	MOVQ R8,184(SP)
+	MOVQ R9,192(SP)
 	MOVQ 120(DI),SI
 	MOVQ 128(DI),DX
 	MOVQ 136(DI),CX
@@ -334,121 +321,121 @@ TEXT ·ladderstep(SB),0,$384-8
 	SUBQ 176(DI),R11
 	SUBQ 184(DI),R12
 	SUBQ 192(DI),R13
-	MOVQ SI,256(SP)
-	MOVQ DX,264(SP)
-	MOVQ CX,272(SP)
-	MOVQ R8,280(SP)
-	MOVQ R9,288(SP)
-	MOVQ AX,296(SP)
-	MOVQ R10,304(SP)
-	MOVQ R11,312(SP)
-	MOVQ R12,320(SP)
-	MOVQ R13,328(SP)
-	MOVQ 280(SP),SI
+	MOVQ SI,200(SP)
+	MOVQ DX,208(SP)
+	MOVQ CX,216(SP)
+	MOVQ R8,224(SP)
+	MOVQ R9,232(SP)
+	MOVQ AX,240(SP)
+	MOVQ R10,248(SP)
+	MOVQ R11,256(SP)
+	MOVQ R12,264(SP)
+	MOVQ R13,272(SP)
+	MOVQ 224(SP),SI
 	IMUL3Q $19,SI,AX
-	MOVQ AX,336(SP)
-	MULQ 112(SP)
+	MOVQ AX,280(SP)
+	MULQ 56(SP)
 	MOVQ AX,SI
 	MOVQ DX,CX
-	MOVQ 288(SP),DX
+	MOVQ 232(SP),DX
 	IMUL3Q $19,DX,AX
-	MOVQ AX,344(SP)
-	MULQ 104(SP)
+	MOVQ AX,288(SP)
+	MULQ 48(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 256(SP),AX
-	MULQ 96(SP)
+	MOVQ 200(SP),AX
+	MULQ 40(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 256(SP),AX
-	MULQ 104(SP)
+	MOVQ 200(SP),AX
+	MULQ 48(SP)
 	MOVQ AX,R8
 	MOVQ DX,R9
-	MOVQ 256(SP),AX
-	MULQ 112(SP)
+	MOVQ 200(SP),AX
+	MULQ 56(SP)
 	MOVQ AX,R10
 	MOVQ DX,R11
-	MOVQ 256(SP),AX
-	MULQ 120(SP)
+	MOVQ 200(SP),AX
+	MULQ 64(SP)
 	MOVQ AX,R12
 	MOVQ DX,R13
-	MOVQ 256(SP),AX
-	MULQ 128(SP)
+	MOVQ 200(SP),AX
+	MULQ 72(SP)
 	MOVQ AX,R14
 	MOVQ DX,R15
-	MOVQ 264(SP),AX
-	MULQ 96(SP)
+	MOVQ 208(SP),AX
+	MULQ 40(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 264(SP),AX
-	MULQ 104(SP)
+	MOVQ 208(SP),AX
+	MULQ 48(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 264(SP),AX
-	MULQ 112(SP)
+	MOVQ 208(SP),AX
+	MULQ 56(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 264(SP),AX
-	MULQ 120(SP)
+	MOVQ 208(SP),AX
+	MULQ 64(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 264(SP),DX
+	MOVQ 208(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 128(SP)
+	MULQ 72(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 272(SP),AX
-	MULQ 96(SP)
+	MOVQ 216(SP),AX
+	MULQ 40(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 272(SP),AX
-	MULQ 104(SP)
+	MOVQ 216(SP),AX
+	MULQ 48(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 272(SP),AX
-	MULQ 112(SP)
+	MOVQ 216(SP),AX
+	MULQ 56(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 272(SP),DX
+	MOVQ 216(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 120(SP)
+	MULQ 64(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 272(SP),DX
+	MOVQ 216(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 128(SP)
+	MULQ 72(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 280(SP),AX
-	MULQ 96(SP)
+	MOVQ 224(SP),AX
+	MULQ 40(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 280(SP),AX
-	MULQ 104(SP)
+	MOVQ 224(SP),AX
+	MULQ 48(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 336(SP),AX
-	MULQ 120(SP)
+	MOVQ 280(SP),AX
+	MULQ 64(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 336(SP),AX
-	MULQ 128(SP)
+	MOVQ 280(SP),AX
+	MULQ 72(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 288(SP),AX
-	MULQ 96(SP)
+	MOVQ 232(SP),AX
+	MULQ 40(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 344(SP),AX
-	MULQ 112(SP)
+	MOVQ 288(SP),AX
+	MULQ 56(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 344(SP),AX
-	MULQ 120(SP)
+	MOVQ 288(SP),AX
+	MULQ 64(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 344(SP),AX
-	MULQ 128(SP)
+	MOVQ 288(SP),AX
+	MULQ 72(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
 	MOVQ ·REDMASK51(SB),DX
@@ -489,116 +476,116 @@ TEXT ·ladderstep(SB),0,$384-8
 	IMUL3Q $19,CX,CX
 	ADDQ CX,SI
 	ANDQ DX,R10
-	MOVQ SI,96(SP)
-	MOVQ R8,104(SP)
-	MOVQ R9,112(SP)
-	MOVQ AX,120(SP)
-	MOVQ R10,128(SP)
-	MOVQ 320(SP),SI
+	MOVQ SI,40(SP)
+	MOVQ R8,48(SP)
+	MOVQ R9,56(SP)
+	MOVQ AX,64(SP)
+	MOVQ R10,72(SP)
+	MOVQ 264(SP),SI
 	IMUL3Q $19,SI,AX
-	MOVQ AX,256(SP)
-	MULQ 72(SP)
+	MOVQ AX,200(SP)
+	MULQ 16(SP)
 	MOVQ AX,SI
 	MOVQ DX,CX
-	MOVQ 328(SP),DX
+	MOVQ 272(SP),DX
 	IMUL3Q $19,DX,AX
-	MOVQ AX,264(SP)
-	MULQ 64(SP)
+	MOVQ AX,208(SP)
+	MULQ 8(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 296(SP),AX
-	MULQ 56(SP)
+	MOVQ 240(SP),AX
+	MULQ 0(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 296(SP),AX
-	MULQ 64(SP)
+	MOVQ 240(SP),AX
+	MULQ 8(SP)
 	MOVQ AX,R8
 	MOVQ DX,R9
-	MOVQ 296(SP),AX
-	MULQ 72(SP)
+	MOVQ 240(SP),AX
+	MULQ 16(SP)
 	MOVQ AX,R10
 	MOVQ DX,R11
-	MOVQ 296(SP),AX
-	MULQ 80(SP)
+	MOVQ 240(SP),AX
+	MULQ 24(SP)
 	MOVQ AX,R12
 	MOVQ DX,R13
-	MOVQ 296(SP),AX
-	MULQ 88(SP)
+	MOVQ 240(SP),AX
+	MULQ 32(SP)
 	MOVQ AX,R14
 	MOVQ DX,R15
-	MOVQ 304(SP),AX
-	MULQ 56(SP)
+	MOVQ 248(SP),AX
+	MULQ 0(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 304(SP),AX
-	MULQ 64(SP)
+	MOVQ 248(SP),AX
+	MULQ 8(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 304(SP),AX
-	MULQ 72(SP)
+	MOVQ 248(SP),AX
+	MULQ 16(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 304(SP),AX
-	MULQ 80(SP)
+	MOVQ 248(SP),AX
+	MULQ 24(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 304(SP),DX
+	MOVQ 248(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 88(SP)
+	MULQ 32(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 312(SP),AX
-	MULQ 56(SP)
+	MOVQ 256(SP),AX
+	MULQ 0(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 312(SP),AX
-	MULQ 64(SP)
+	MOVQ 256(SP),AX
+	MULQ 8(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 312(SP),AX
-	MULQ 72(SP)
+	MOVQ 256(SP),AX
+	MULQ 16(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 312(SP),DX
+	MOVQ 256(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 80(SP)
+	MULQ 24(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 312(SP),DX
+	MOVQ 256(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 88(SP)
+	MULQ 32(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 320(SP),AX
-	MULQ 56(SP)
+	MOVQ 264(SP),AX
+	MULQ 0(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 320(SP),AX
-	MULQ 64(SP)
+	MOVQ 264(SP),AX
+	MULQ 8(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 256(SP),AX
-	MULQ 80(SP)
+	MOVQ 200(SP),AX
+	MULQ 24(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 256(SP),AX
-	MULQ 88(SP)
+	MOVQ 200(SP),AX
+	MULQ 32(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 328(SP),AX
-	MULQ 56(SP)
+	MOVQ 272(SP),AX
+	MULQ 0(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 264(SP),AX
-	MULQ 72(SP)
+	MOVQ 208(SP),AX
+	MULQ 16(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 264(SP),AX
-	MULQ 80(SP)
+	MOVQ 208(SP),AX
+	MULQ 24(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 264(SP),AX
-	MULQ 88(SP)
+	MOVQ 208(SP),AX
+	MULQ 32(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
 	MOVQ ·REDMASK51(SB),DX
@@ -649,16 +636,16 @@ TEXT ·ladderstep(SB),0,$384-8
 	ADDQ ·_2P1234(SB),R11
 	ADDQ ·_2P1234(SB),R12
 	ADDQ ·_2P1234(SB),R13
-	ADDQ 96(SP),SI
-	ADDQ 104(SP),R8
-	ADDQ 112(SP),R9
-	ADDQ 120(SP),AX
-	ADDQ 128(SP),R10
-	SUBQ 96(SP),DX
-	SUBQ 104(SP),CX
-	SUBQ 112(SP),R11
-	SUBQ 120(SP),R12
-	SUBQ 128(SP),R13
+	ADDQ 40(SP),SI
+	ADDQ 48(SP),R8
+	ADDQ 56(SP),R9
+	ADDQ 64(SP),AX
+	ADDQ 72(SP),R10
+	SUBQ 40(SP),DX
+	SUBQ 48(SP),CX
+	SUBQ 56(SP),R11
+	SUBQ 64(SP),R12
+	SUBQ 72(SP),R13
 	MOVQ SI,120(DI)
 	MOVQ R8,128(DI)
 	MOVQ R9,136(DI)
@@ -901,13 +888,13 @@ TEXT ·ladderstep(SB),0,$384-8
 	MOVQ R10,192(DI)
 	MOVQ 184(DI),SI
 	IMUL3Q $19,SI,AX
-	MOVQ AX,56(SP)
+	MOVQ AX,0(SP)
 	MULQ 16(DI)
 	MOVQ AX,SI
 	MOVQ DX,CX
 	MOVQ 192(DI),DX
 	IMUL3Q $19,DX,AX
-	MOVQ AX,64(SP)
+	MOVQ AX,8(SP)
 	MULQ 8(DI)
 	ADDQ AX,SI
 	ADCQ DX,CX
@@ -982,11 +969,11 @@ TEXT ·ladderstep(SB),0,$384-8
 	MULQ 8(DI)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 56(SP),AX
+	MOVQ 0(SP),AX
 	MULQ 24(DI)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 56(SP),AX
+	MOVQ 0(SP),AX
 	MULQ 32(DI)
 	ADDQ AX,R10
 	ADCQ DX,R11
@@ -994,15 +981,15 @@ TEXT ·ladderstep(SB),0,$384-8
 	MULQ 0(DI)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 64(SP),AX
+	MOVQ 8(SP),AX
 	MULQ 16(DI)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 64(SP),AX
+	MOVQ 8(SP),AX
 	MULQ 24(DI)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 64(SP),AX
+	MOVQ 8(SP),AX
 	MULQ 32(DI)
 	ADDQ AX,R12
 	ADCQ DX,R13
@@ -1049,111 +1036,111 @@ TEXT ·ladderstep(SB),0,$384-8
 	MOVQ R9,176(DI)
 	MOVQ AX,184(DI)
 	MOVQ R10,192(DI)
-	MOVQ 200(SP),SI
+	MOVQ 144(SP),SI
 	IMUL3Q $19,SI,AX
-	MOVQ AX,56(SP)
-	MULQ 152(SP)
+	MOVQ AX,0(SP)
+	MULQ 96(SP)
 	MOVQ AX,SI
 	MOVQ DX,CX
-	MOVQ 208(SP),DX
+	MOVQ 152(SP),DX
 	IMUL3Q $19,DX,AX
-	MOVQ AX,64(SP)
-	MULQ 144(SP)
+	MOVQ AX,8(SP)
+	MULQ 88(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 176(SP),AX
-	MULQ 136(SP)
+	MOVQ 120(SP),AX
+	MULQ 80(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 176(SP),AX
-	MULQ 144(SP)
+	MOVQ 120(SP),AX
+	MULQ 88(SP)
 	MOVQ AX,R8
 	MOVQ DX,R9
-	MOVQ 176(SP),AX
-	MULQ 152(SP)
+	MOVQ 120(SP),AX
+	MULQ 96(SP)
 	MOVQ AX,R10
 	MOVQ DX,R11
-	MOVQ 176(SP),AX
-	MULQ 160(SP)
+	MOVQ 120(SP),AX
+	MULQ 104(SP)
 	MOVQ AX,R12
 	MOVQ DX,R13
-	MOVQ 176(SP),AX
-	MULQ 168(SP)
+	MOVQ 120(SP),AX
+	MULQ 112(SP)
 	MOVQ AX,R14
 	MOVQ DX,R15
-	MOVQ 184(SP),AX
-	MULQ 136(SP)
+	MOVQ 128(SP),AX
+	MULQ 80(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 184(SP),AX
-	MULQ 144(SP)
+	MOVQ 128(SP),AX
+	MULQ 88(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 184(SP),AX
-	MULQ 152(SP)
+	MOVQ 128(SP),AX
+	MULQ 96(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 184(SP),AX
-	MULQ 160(SP)
+	MOVQ 128(SP),AX
+	MULQ 104(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 184(SP),DX
+	MOVQ 128(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 168(SP)
+	MULQ 112(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 192(SP),AX
-	MULQ 136(SP)
+	MOVQ 136(SP),AX
+	MULQ 80(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 192(SP),AX
-	MULQ 144(SP)
+	MOVQ 136(SP),AX
+	MULQ 88(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 192(SP),AX
-	MULQ 152(SP)
+	MOVQ 136(SP),AX
+	MULQ 96(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 192(SP),DX
+	MOVQ 136(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 160(SP)
+	MULQ 104(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 192(SP),DX
+	MOVQ 136(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 168(SP)
+	MULQ 112(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 200(SP),AX
-	MULQ 136(SP)
+	MOVQ 144(SP),AX
+	MULQ 80(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 200(SP),AX
-	MULQ 144(SP)
+	MOVQ 144(SP),AX
+	MULQ 88(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 56(SP),AX
-	MULQ 160(SP)
+	MOVQ 0(SP),AX
+	MULQ 104(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 56(SP),AX
-	MULQ 168(SP)
+	MOVQ 0(SP),AX
+	MULQ 112(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 208(SP),AX
-	MULQ 136(SP)
+	MOVQ 152(SP),AX
+	MULQ 80(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 64(SP),AX
-	MULQ 152(SP)
+	MOVQ 8(SP),AX
+	MULQ 96(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 64(SP),AX
-	MULQ 160(SP)
+	MOVQ 8(SP),AX
+	MULQ 104(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 64(SP),AX
-	MULQ 168(SP)
+	MOVQ 8(SP),AX
+	MULQ 112(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
 	MOVQ ·REDMASK51(SB),DX
@@ -1199,37 +1186,37 @@ TEXT ·ladderstep(SB),0,$384-8
 	MOVQ R9,56(DI)
 	MOVQ AX,64(DI)
 	MOVQ R10,72(DI)
-	MOVQ 216(SP),AX
+	MOVQ 160(SP),AX
 	MULQ ·_121666_213(SB)
 	SHRQ $13,AX
 	MOVQ AX,SI
 	MOVQ DX,CX
-	MOVQ 224(SP),AX
+	MOVQ 168(SP),AX
 	MULQ ·_121666_213(SB)
 	SHRQ $13,AX
 	ADDQ AX,CX
 	MOVQ DX,R8
-	MOVQ 232(SP),AX
+	MOVQ 176(SP),AX
 	MULQ ·_121666_213(SB)
 	SHRQ $13,AX
 	ADDQ AX,R8
 	MOVQ DX,R9
-	MOVQ 240(SP),AX
+	MOVQ 184(SP),AX
 	MULQ ·_121666_213(SB)
 	SHRQ $13,AX
 	ADDQ AX,R9
 	MOVQ DX,R10
-	MOVQ 248(SP),AX
+	MOVQ 192(SP),AX
 	MULQ ·_121666_213(SB)
 	SHRQ $13,AX
 	ADDQ AX,R10
 	IMUL3Q $19,DX,DX
 	ADDQ DX,SI
-	ADDQ 136(SP),SI
-	ADDQ 144(SP),CX
-	ADDQ 152(SP),R8
-	ADDQ 160(SP),R9
-	ADDQ 168(SP),R10
+	ADDQ 80(SP),SI
+	ADDQ 88(SP),CX
+	ADDQ 96(SP),R8
+	ADDQ 104(SP),R9
+	ADDQ 112(SP),R10
 	MOVQ SI,80(DI)
 	MOVQ CX,88(DI)
 	MOVQ R8,96(DI)
@@ -1237,109 +1224,109 @@ TEXT ·ladderstep(SB),0,$384-8
 	MOVQ R10,112(DI)
 	MOVQ 104(DI),SI
 	IMUL3Q $19,SI,AX
-	MOVQ AX,56(SP)
-	MULQ 232(SP)
+	MOVQ AX,0(SP)
+	MULQ 176(SP)
 	MOVQ AX,SI
 	MOVQ DX,CX
 	MOVQ 112(DI),DX
 	IMUL3Q $19,DX,AX
-	MOVQ AX,64(SP)
-	MULQ 224(SP)
+	MOVQ AX,8(SP)
+	MULQ 168(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
 	MOVQ 80(DI),AX
-	MULQ 216(SP)
+	MULQ 160(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
 	MOVQ 80(DI),AX
-	MULQ 224(SP)
+	MULQ 168(SP)
 	MOVQ AX,R8
 	MOVQ DX,R9
 	MOVQ 80(DI),AX
-	MULQ 232(SP)
+	MULQ 176(SP)
 	MOVQ AX,R10
 	MOVQ DX,R11
 	MOVQ 80(DI),AX
-	MULQ 240(SP)
+	MULQ 184(SP)
 	MOVQ AX,R12
 	MOVQ DX,R13
 	MOVQ 80(DI),AX
-	MULQ 248(SP)
+	MULQ 192(SP)
 	MOVQ AX,R14
 	MOVQ DX,R15
 	MOVQ 88(DI),AX
-	MULQ 216(SP)
+	MULQ 160(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
 	MOVQ 88(DI),AX
-	MULQ 224(SP)
+	MULQ 168(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
 	MOVQ 88(DI),AX
-	MULQ 232(SP)
+	MULQ 176(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
 	MOVQ 88(DI),AX
-	MULQ 240(SP)
+	MULQ 184(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
 	MOVQ 88(DI),DX
 	IMUL3Q $19,DX,AX
-	MULQ 248(SP)
+	MULQ 192(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
 	MOVQ 96(DI),AX
-	MULQ 216(SP)
+	MULQ 160(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
 	MOVQ 96(DI),AX
-	MULQ 224(SP)
+	MULQ 168(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
 	MOVQ 96(DI),AX
-	MULQ 232(SP)
+	MULQ 176(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
 	MOVQ 96(DI),DX
 	IMUL3Q $19,DX,AX
-	MULQ 240(SP)
+	MULQ 184(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
 	MOVQ 96(DI),DX
 	IMUL3Q $19,DX,AX
-	MULQ 248(SP)
+	MULQ 192(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
 	MOVQ 104(DI),AX
-	MULQ 216(SP)
+	MULQ 160(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
 	MOVQ 104(DI),AX
-	MULQ 224(SP)
+	MULQ 168(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 56(SP),AX
-	MULQ 240(SP)
+	MOVQ 0(SP),AX
+	MULQ 184(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 56(SP),AX
-	MULQ 248(SP)
+	MOVQ 0(SP),AX
+	MULQ 192(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
 	MOVQ 112(DI),AX
-	MULQ 216(SP)
+	MULQ 160(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 64(SP),AX
-	MULQ 232(SP)
+	MOVQ 8(SP),AX
+	MULQ 176(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 64(SP),AX
-	MULQ 240(SP)
+	MOVQ 8(SP),AX
+	MULQ 184(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 64(SP),AX
-	MULQ 248(SP)
+	MOVQ 8(SP),AX
+	MULQ 192(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
 	MOVQ ·REDMASK51(SB),DX
@@ -1385,14 +1372,4 @@ TEXT ·ladderstep(SB),0,$384-8
 	MOVQ R9,96(DI)
 	MOVQ AX,104(DI)
 	MOVQ R10,112(DI)
-	MOVQ 0(SP),R11
-	MOVQ 8(SP),R12
-	MOVQ 16(SP),R13
-	MOVQ 24(SP),R14
-	MOVQ 32(SP),R15
-	MOVQ 40(SP),BX
-	MOVQ 48(SP),BP
-	MOVQ R11,SP
-	MOVQ DI,AX
-	MOVQ SI,DX
 	RET
diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s
index e48d183ee567411901728d1f92272cf9a9692a8a..33ce57dcded44a9a7c7e457a5e6b2693d802c98e 100644
--- a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s
+++ b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s
@@ -8,35 +8,21 @@
 // +build amd64,!gccgo,!appengine
 
 // func mul(dest, a, b *[5]uint64)
-TEXT ·mul(SB),0,$128-24
+TEXT ·mul(SB),0,$16-24
 	MOVQ dest+0(FP), DI
 	MOVQ a+8(FP), SI
 	MOVQ b+16(FP), DX
 
-	MOVQ SP,R11
-	MOVQ $31,CX
-	NOTQ CX
-	ANDQ CX,SP
-	ADDQ $32,SP
-
-	MOVQ R11,0(SP)
-	MOVQ R12,8(SP)
-	MOVQ R13,16(SP)
-	MOVQ R14,24(SP)
-	MOVQ R15,32(SP)
-	MOVQ BX,40(SP)
-	MOVQ BP,48(SP)
-	MOVQ DI,56(SP)
 	MOVQ DX,CX
 	MOVQ 24(SI),DX
 	IMUL3Q $19,DX,AX
-	MOVQ AX,64(SP)
+	MOVQ AX,0(SP)
 	MULQ 16(CX)
 	MOVQ AX,R8
 	MOVQ DX,R9
 	MOVQ 32(SI),DX
 	IMUL3Q $19,DX,AX
-	MOVQ AX,72(SP)
+	MOVQ AX,8(SP)
 	MULQ 8(CX)
 	ADDQ AX,R8
 	ADCQ DX,R9
@@ -111,11 +97,11 @@ TEXT ·mul(SB),0,$128-24
 	MULQ 8(CX)
 	ADDQ AX,BX
 	ADCQ DX,BP
-	MOVQ 64(SP),AX
+	MOVQ 0(SP),AX
 	MULQ 24(CX)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 64(SP),AX
+	MOVQ 0(SP),AX
 	MULQ 32(CX)
 	ADDQ AX,R12
 	ADCQ DX,R13
@@ -123,15 +109,15 @@ TEXT ·mul(SB),0,$128-24
 	MULQ 0(CX)
 	ADDQ AX,BX
 	ADCQ DX,BP
-	MOVQ 72(SP),AX
+	MOVQ 8(SP),AX
 	MULQ 16(CX)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 72(SP),AX
+	MOVQ 8(SP),AX
 	MULQ 24(CX)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 72(SP),AX
+	MOVQ 8(SP),AX
 	MULQ 32(CX)
 	ADDQ AX,R14
 	ADCQ DX,R15
@@ -178,14 +164,4 @@ TEXT ·mul(SB),0,$128-24
 	MOVQ R9,16(DI)
 	MOVQ AX,24(DI)
 	MOVQ R10,32(DI)
-	MOVQ 0(SP),R11
-	MOVQ 8(SP),R12
-	MOVQ 16(SP),R13
-	MOVQ 24(SP),R14
-	MOVQ 32(SP),R15
-	MOVQ 40(SP),BX
-	MOVQ 48(SP),BP
-	MOVQ R11,SP
-	MOVQ DI,AX
-	MOVQ SI,DX
 	RET
diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s
index 78d1a50ddca139c0dfe98abb478c8434394176bc..3a92804ddf380df22d98317ae5fa8f0d92bdb176 100644
--- a/vendor/golang.org/x/crypto/curve25519/square_amd64.s
+++ b/vendor/golang.org/x/crypto/curve25519/square_amd64.s
@@ -8,23 +8,10 @@
 // +build amd64,!gccgo,!appengine
 
 // func square(out, in *[5]uint64)
-TEXT ·square(SB),7,$96-16
+TEXT ·square(SB),7,$0-16
 	MOVQ out+0(FP), DI
 	MOVQ in+8(FP), SI
 
-	MOVQ SP,R11
-	MOVQ $31,CX
-	NOTQ CX
-	ANDQ CX,SP
-	ADDQ $32, SP
-
-	MOVQ R11,0(SP)
-	MOVQ R12,8(SP)
-	MOVQ R13,16(SP)
-	MOVQ R14,24(SP)
-	MOVQ R15,32(SP)
-	MOVQ BX,40(SP)
-	MOVQ BP,48(SP)
 	MOVQ 0(SI),AX
 	MULQ 0(SI)
 	MOVQ AX,CX
@@ -140,14 +127,4 @@ TEXT ·square(SB),7,$96-16
 	MOVQ R9,16(DI)
 	MOVQ AX,24(DI)
 	MOVQ R10,32(DI)
-	MOVQ 0(SP),R11
-	MOVQ 8(SP),R12
-	MOVQ 16(SP),R13
-	MOVQ 24(SP),R14
-	MOVQ 32(SP),R15
-	MOVQ 40(SP),BX
-	MOVQ 48(SP),BP
-	MOVQ R11,SP
-	MOVQ DI,AX
-	MOVQ SI,DX
 	RET
diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go
index 9285ee31deec3a3efc8f96b3568fcc4f7d96a1d9..c87fbebfde88a2be08fdd9440241cb1a3c4c145a 100644
--- a/vendor/golang.org/x/crypto/ssh/kex.go
+++ b/vendor/golang.org/x/crypto/ssh/kex.go
@@ -77,11 +77,11 @@ type kexAlgorithm interface {
 
 // dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement.
 type dhGroup struct {
-	g, p *big.Int
+	g, p, pMinus1 *big.Int
 }
 
 func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) {
-	if theirPublic.Sign() <= 0 || theirPublic.Cmp(group.p) >= 0 {
+	if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 {
 		return nil, errors.New("ssh: DH parameter out of bounds")
 	}
 	return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil
@@ -90,10 +90,17 @@ func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int,
 func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
 	hashFunc := crypto.SHA1
 
-	x, err := rand.Int(randSource, group.p)
-	if err != nil {
-		return nil, err
+	var x *big.Int
+	for {
+		var err error
+		if x, err = rand.Int(randSource, group.pMinus1); err != nil {
+			return nil, err
+		}
+		if x.Sign() > 0 {
+			break
+		}
 	}
+
 	X := new(big.Int).Exp(group.g, x, group.p)
 	kexDHInit := kexDHInitMsg{
 		X: X,
@@ -146,9 +153,14 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha
 		return
 	}
 
-	y, err := rand.Int(randSource, group.p)
-	if err != nil {
-		return
+	var y *big.Int
+	for {
+		if y, err = rand.Int(randSource, group.pMinus1); err != nil {
+			return
+		}
+		if y.Sign() > 0 {
+			break
+		}
 	}
 
 	Y := new(big.Int).Exp(group.g, y, group.p)
@@ -373,6 +385,7 @@ func init() {
 	kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{
 		g: new(big.Int).SetInt64(2),
 		p: p,
+		pMinus1: new(big.Int).Sub(p, bigOne),
 	}
 
 	// This is the group called diffie-hellman-group14-sha1 in RFC
@@ -382,6 +395,7 @@ func init() {
 	kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{
 		g: new(big.Int).SetInt64(2),
 		p: p,
+		pMinus1: new(big.Int).Sub(p, bigOne),
 	}
 
 	kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()}
diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go
index e13cf9ce151b07f2b5008c1867e3b59d5351f770..f2fc9b6c99d88d06c3991f355d4e3bd205a64bd7 100644
--- a/vendor/golang.org/x/crypto/ssh/keys.go
+++ b/vendor/golang.org/x/crypto/ssh/keys.go
@@ -722,8 +722,8 @@ func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
 }
 
 // NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey,
-// ed25519.PublicKey, or any other crypto.Signer and returns a corresponding
-// Signer instance. ECDSA keys must use P-256, P-384 or P-521.
+// or ed25519.PublicKey returns a corresponding PublicKey instance.
+// ECDSA keys must use P-256, P-384 or P-521.
 func NewPublicKey(key interface{}) (PublicKey, error) {
 	switch key := key.(type) {
 	case *rsa.PublicKey:
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
index 134654cf7e2bf2ba2aaf2206e2e6f04460f412c7..d1ed420ce07e4fcfdd304b15c3c7feacada0590e 100644
--- a/vendor/golang.org/x/net/context/context.go
+++ b/vendor/golang.org/x/net/context/context.go
@@ -36,103 +36,6 @@
 // Contexts.
 package context // import "golang.org/x/net/context"
 
-import "time"
-
-// A Context carries a deadline, a cancelation signal, and other values across
-// API boundaries.
-//
-// Context's methods may be called by multiple goroutines simultaneously.
-type Context interface {
-	// Deadline returns the time when work done on behalf of this context
-	// should be canceled.  Deadline returns ok==false when no deadline is
-	// set.  Successive calls to Deadline return the same results.
-	Deadline() (deadline time.Time, ok bool)
-
-	// Done returns a channel that's closed when work done on behalf of this
-	// context should be canceled.  Done may return nil if this context can
-	// never be canceled.  Successive calls to Done return the same value.
-	//
-	// WithCancel arranges for Done to be closed when cancel is called;
-	// WithDeadline arranges for Done to be closed when the deadline
-	// expires; WithTimeout arranges for Done to be closed when the timeout
-	// elapses.
-	//
-	// Done is provided for use in select statements:
-	//
-	//  // Stream generates values with DoSomething and sends them to out
-	//  // until DoSomething returns an error or ctx.Done is closed.
-	//  func Stream(ctx context.Context, out chan<- Value) error {
-	//  	for {
-	//  		v, err := DoSomething(ctx)
-	//  		if err != nil {
-	//  			return err
-	//  		}
-	//  		select {
-	//  		case <-ctx.Done():
-	//  			return ctx.Err()
-	//  		case out <- v:
-	//  		}
-	//  	}
-	//  }
-	//
-	// See http://blog.golang.org/pipelines for more examples of how to use
-	// a Done channel for cancelation.
-	Done() <-chan struct{}
-
-	// Err returns a non-nil error value after Done is closed.  Err returns
-	// Canceled if the context was canceled or DeadlineExceeded if the
-	// context's deadline passed.  No other values for Err are defined.
-	// After Done is closed, successive calls to Err return the same value.
-	Err() error
-
-	// Value returns the value associated with this context for key, or nil
-	// if no value is associated with key.  Successive calls to Value with
-	// the same key returns the same result.
-	//
-	// Use context values only for request-scoped data that transits
-	// processes and API boundaries, not for passing optional parameters to
-	// functions.
-	//
-	// A key identifies a specific value in a Context.  Functions that wish
-	// to store values in Context typically allocate a key in a global
-	// variable then use that key as the argument to context.WithValue and
-	// Context.Value.  A key can be any type that supports equality;
-	// packages should define keys as an unexported type to avoid
-	// collisions.
-	//
-	// Packages that define a Context key should provide type-safe accessors
-	// for the values stores using that key:
-	//
-	// 	// Package user defines a User type that's stored in Contexts.
-	// 	package user
-	//
-	// 	import "golang.org/x/net/context"
-	//
-	// 	// User is the type of value stored in the Contexts.
-	// 	type User struct {...}
-	//
-	// 	// key is an unexported type for keys defined in this package.
-	// 	// This prevents collisions with keys defined in other packages.
-	// 	type key int
-	//
-	// 	// userKey is the key for user.User values in Contexts.  It is
-	// 	// unexported; clients use user.NewContext and user.FromContext
-	// 	// instead of using this key directly.
-	// 	var userKey key = 0
-	//
-	// 	// NewContext returns a new Context that carries value u.
-	// 	func NewContext(ctx context.Context, u *User) context.Context {
-	// 		return context.WithValue(ctx, userKey, u)
-	// 	}
-	//
-	// 	// FromContext returns the User value stored in ctx, if any.
-	// 	func FromContext(ctx context.Context) (*User, bool) {
-	// 		u, ok := ctx.Value(userKey).(*User)
-	// 		return u, ok
-	// 	}
-	Value(key interface{}) interface{}
-}
-
 // Background returns a non-nil, empty Context. It is never canceled, has no
 // values, and has no deadline.  It is typically used by the main function,
 // initialization, and tests, and as the top-level Context for incoming
@@ -149,8 +52,3 @@ func Background() Context {
 func TODO() Context {
 	return todo
 }
-
-// A CancelFunc tells an operation to abandon its work.
-// A CancelFunc does not wait for the work to stop.
-// After the first call, subsequent calls to a CancelFunc do nothing.
-type CancelFunc func()
diff --git a/vendor/golang.org/x/net/context/go18.go b/vendor/golang.org/x/net/context/go18.go
new file mode 100644
index 0000000000000000000000000000000000000000..35360c50dbeafeda8e145b61abfa8ff4ec38bed3
--- /dev/null
+++ b/vendor/golang.org/x/net/context/go18.go
@@ -0,0 +1,22 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package context
+
+import (
+	"context" // standard library's context, as of Go 1.7
+)
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context => context.Context
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc => context.CancelFunc
diff --git a/vendor/golang.org/x/net/context/pre_go18.go b/vendor/golang.org/x/net/context/pre_go18.go
new file mode 100644
index 0000000000000000000000000000000000000000..41bd8ba666b913a661145770e10ee79155c4caaf
--- /dev/null
+++ b/vendor/golang.org/x/net/context/pre_go18.go
@@ -0,0 +1,109 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package context
+
+import "time"
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+	// Deadline returns the time when work done on behalf of this context
+	// should be canceled.  Deadline returns ok==false when no deadline is
+	// set.  Successive calls to Deadline return the same results.
+	Deadline() (deadline time.Time, ok bool)
+
+	// Done returns a channel that's closed when work done on behalf of this
+	// context should be canceled.  Done may return nil if this context can
+	// never be canceled.  Successive calls to Done return the same value.
+	//
+	// WithCancel arranges for Done to be closed when cancel is called;
+	// WithDeadline arranges for Done to be closed when the deadline
+	// expires; WithTimeout arranges for Done to be closed when the timeout
+	// elapses.
+	//
+	// Done is provided for use in select statements:
+	//
+	//  // Stream generates values with DoSomething and sends them to out
+	//  // until DoSomething returns an error or ctx.Done is closed.
+	//  func Stream(ctx context.Context, out chan<- Value) error {
+	//  	for {
+	//  		v, err := DoSomething(ctx)
+	//  		if err != nil {
+	//  			return err
+	//  		}
+	//  		select {
+	//  		case <-ctx.Done():
+	//  			return ctx.Err()
+	//  		case out <- v:
+	//  		}
+	//  	}
+	//  }
+	//
+	// See http://blog.golang.org/pipelines for more examples of how to use
+	// a Done channel for cancelation.
+	Done() <-chan struct{}
+
+	// Err returns a non-nil error value after Done is closed.  Err returns
+	// Canceled if the context was canceled or DeadlineExceeded if the
+	// context's deadline passed.  No other values for Err are defined.
+	// After Done is closed, successive calls to Err return the same value.
+	Err() error
+
+	// Value returns the value associated with this context for key, or nil
+	// if no value is associated with key.  Successive calls to Value with
+	// the same key returns the same result.
+	//
+	// Use context values only for request-scoped data that transits
+	// processes and API boundaries, not for passing optional parameters to
+	// functions.
+	//
+	// A key identifies a specific value in a Context.  Functions that wish
+	// to store values in Context typically allocate a key in a global
+	// variable then use that key as the argument to context.WithValue and
+	// Context.Value.  A key can be any type that supports equality;
+	// packages should define keys as an unexported type to avoid
+	// collisions.
+	//
+	// Packages that define a Context key should provide type-safe accessors
+	// for the values stores using that key:
+	//
+	// 	// Package user defines a User type that's stored in Contexts.
+	// 	package user
+	//
+	// 	import "golang.org/x/net/context"
+	//
+	// 	// User is the type of value stored in the Contexts.
+	// 	type User struct {...}
+	//
+	// 	// key is an unexported type for keys defined in this package.
+	// 	// This prevents collisions with keys defined in other packages.
+	// 	type key int
+	//
+	// 	// userKey is the key for user.User values in Contexts.  It is
+	// 	// unexported; clients use user.NewContext and user.FromContext
+	// 	// instead of using this key directly.
+	// 	var userKey key = 0
+	//
+	// 	// NewContext returns a new Context that carries value u.
+	// 	func NewContext(ctx context.Context, u *User) context.Context {
+	// 		return context.WithValue(ctx, userKey, u)
+	// 	}
+	//
+	// 	// FromContext returns the User value stored in ctx, if any.
+	// 	func FromContext(ctx context.Context) (*User, bool) {
+	// 		u, ok := ctx.Value(userKey).(*User)
+	// 		return u, ok
+	// 	}
+	Value(key interface{}) interface{}
+}
diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go
index c2ae167316cabf4c4e7ebfec1a7b97c5377b66c0..e0002036ff83a1e3f1da16ea71c119a345ba1b34 100644
--- a/vendor/golang.org/x/net/http2/go18.go
+++ b/vendor/golang.org/x/net/http2/go18.go
@@ -6,6 +6,32 @@
 
 package http2
 
-import "crypto/tls"
+import (
+	"crypto/tls"
+	"net/http"
+)
 
 func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() }
+
+var _ http.Pusher = (*responseWriter)(nil)
+
+// Push implements http.Pusher.
+func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
+	internalOpts := pushOptions{}
+	if opts != nil {
+		internalOpts.Method = opts.Method
+		internalOpts.Header = opts.Header
+	}
+	return w.push(target, internalOpts)
+}
+
+func configureServer18(h1 *http.Server, h2 *Server) error {
+	if h2.IdleTimeout == 0 {
+		if h1.IdleTimeout != 0 {
+			h2.IdleTimeout = h1.IdleTimeout
+		} else {
+			h2.IdleTimeout = h1.ReadTimeout
+		}
+	}
+	return nil
+}
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index 2e27b093c58f9db1389bab159248b168ba69813d..b6b0f9ad153a857bf2d16b9b64c9665b321712e1 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -36,6 +36,7 @@ var (
 	VerboseLogs    bool
 	logFrameWrites bool
 	logFrameReads  bool
+	inTests        bool
 )
 
 func init() {
@@ -77,13 +78,23 @@ var (
 
 type streamState int
 
+// HTTP/2 stream states.
+//
+// See http://tools.ietf.org/html/rfc7540#section-5.1.
+//
+// For simplicity, the server code merges "reserved (local)" into
+// "half-closed (remote)". This is one less state transition to track.
+// The only downside is that we send PUSH_PROMISEs slightly less
+// liberally than allowable. More discussion here:
+// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
+//
+// "reserved (remote)" is omitted since the client code does not
+// support server push.
 const (
 	stateIdle streamState = iota
 	stateOpen
 	stateHalfClosedLocal
 	stateHalfClosedRemote
-	stateResvLocal
-	stateResvRemote
 	stateClosed
 )
 
@@ -92,8 +103,6 @@ var stateName = [...]string{
 	stateOpen:             "Open",
 	stateHalfClosedLocal:  "HalfClosedLocal",
 	stateHalfClosedRemote: "HalfClosedRemote",
-	stateResvLocal:        "ResvLocal",
-	stateResvRemote:       "ResvRemote",
 	stateClosed:           "Closed",
 }
 
@@ -253,14 +262,27 @@ func newBufferedWriter(w io.Writer) *bufferedWriter {
 	return &bufferedWriter{w: w}
 }
 
+// bufWriterPoolBufferSize is the size of bufio.Writer's
+// buffers created using bufWriterPool.
+//
+// TODO: pick a less arbitrary value? this is a bit under
+// (3 x typical 1500 byte MTU) at least. Other than that,
+// not much thought went into it.
+const bufWriterPoolBufferSize = 4 << 10
+
 var bufWriterPool = sync.Pool{
 	New: func() interface{} {
-		// TODO: pick something better? this is a bit under
-		// (3 x typical 1500 byte MTU) at least.
-		return bufio.NewWriterSize(nil, 4<<10)
+		return bufio.NewWriterSize(nil, bufWriterPoolBufferSize)
 	},
 }
 
+func (w *bufferedWriter) Available() int {
+	if w.bw == nil {
+		return bufWriterPoolBufferSize
+	}
+	return w.bw.Available()
+}
+
 func (w *bufferedWriter) Write(p []byte) (n int, err error) {
 	if w.bw == nil {
 		bw := bufWriterPool.Get().(*bufio.Writer)
diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go
new file mode 100644
index 0000000000000000000000000000000000000000..c1fa5910fed4da1ea88037cd722671fe220453bd
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go18.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package http2
+
+import "net/http"
+
+func configureServer18(h1 *http.Server, h2 *Server) error {
+	// No IdleTimeout to sync prior to Go 1.8.
+	return nil
+}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index c986bc1b5b4093a09ddc32a26d3a8627d4cec888..370e42e8310669564426ffd8dfccf3eb95e26b43 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -2,17 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// TODO: replace all <-sc.doneServing with reads from the stream's cw
-// instead, and make sure that on close we close all open
-// streams. then remove doneServing?
-
-// TODO: re-audit GOAWAY support. Consider each incoming frame type and
-// whether it should be ignored during graceful shutdown.
-
-// TODO: disconnect idle clients. GFE seems to do 4 minutes. make
-// configurable?  or maximum number of idle clients and remove the
-// oldest?
-
 // TODO: turn off the serve goroutine when idle, so
 // an idle conn only has the readFrames goroutine active. (which could
 // also be optimized probably to pin less memory in crypto/tls). This
@@ -44,6 +33,7 @@ import (
 	"fmt"
 	"io"
 	"log"
+	"math"
 	"net"
 	"net/http"
 	"net/textproto"
@@ -114,6 +104,15 @@ type Server struct {
 	// PermitProhibitedCipherSuites, if true, permits the use of
 	// cipher suites prohibited by the HTTP/2 spec.
 	PermitProhibitedCipherSuites bool
+
+	// IdleTimeout specifies how long until idle clients should be
+	// closed with a GOAWAY frame. PING frames are not considered
+	// activity for the purposes of IdleTimeout.
+	IdleTimeout time.Duration
+
+	// NewWriteScheduler constructs a write scheduler for a connection.
+	// If nil, a default scheduler is chosen.
+	NewWriteScheduler func() WriteScheduler
 }
 
 func (s *Server) maxReadFrameSize() uint32 {
@@ -130,15 +129,25 @@ func (s *Server) maxConcurrentStreams() uint32 {
 	return defaultMaxStreams
 }
 
+// List of funcs for ConfigureServer to run. Both h1 and h2 are guaranteed
+// to be non-nil.
+var configServerFuncs []func(h1 *http.Server, h2 *Server) error
+
 // ConfigureServer adds HTTP/2 support to a net/http Server.
 //
 // The configuration conf may be nil.
 //
 // ConfigureServer must be called before s begins serving.
 func ConfigureServer(s *http.Server, conf *Server) error {
+	if s == nil {
+		panic("nil *http.Server")
+	}
 	if conf == nil {
 		conf = new(Server)
 	}
+	if err := configureServer18(s, conf); err != nil {
+		return err
+	}
 
 	if s.TLSConfig == nil {
 		s.TLSConfig = new(tls.Config)
@@ -204,6 +213,13 @@ func ConfigureServer(s *http.Server, conf *Server) error {
 	return nil
 }
 
+// h1ServerShutdownChan if non-nil provides a func to return a channel
+// that will be closed when the provided *http.Server wants to shut
+// down. This is initialized via an init func in net/http (via its
+// mangled name from x/tools/cmd/bundle). This is only used when http2
+// is bundled into std for now.
+var h1ServerShutdownChan func(*http.Server) <-chan struct{}
+
 // ServeConnOpts are options for the Server.ServeConn method.
 type ServeConnOpts struct {
 	// BaseConfig optionally sets the base configuration
@@ -254,29 +270,35 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
 	defer cancel()
 
 	sc := &serverConn{
-		srv:              s,
-		hs:               opts.baseConfig(),
-		conn:             c,
-		baseCtx:          baseCtx,
-		remoteAddrStr:    c.RemoteAddr().String(),
-		bw:               newBufferedWriter(c),
-		handler:          opts.handler(),
-		streams:          make(map[uint32]*stream),
-		readFrameCh:      make(chan readFrameResult),
-		wantWriteFrameCh: make(chan frameWriteMsg, 8),
-		wroteFrameCh:     make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
-		bodyReadCh:       make(chan bodyReadMsg),         // buffering doesn't matter either way
-		doneServing:      make(chan struct{}),
-		advMaxStreams:    s.maxConcurrentStreams(),
-		writeSched: writeScheduler{
-			maxFrameSize: initialMaxFrameSize,
-		},
+		srv:               s,
+		hs:                opts.baseConfig(),
+		conn:              c,
+		baseCtx:           baseCtx,
+		remoteAddrStr:     c.RemoteAddr().String(),
+		bw:                newBufferedWriter(c),
+		handler:           opts.handler(),
+		streams:           make(map[uint32]*stream),
+		readFrameCh:       make(chan readFrameResult),
+		wantWriteFrameCh:  make(chan FrameWriteRequest, 8),
+		wantStartPushCh:   make(chan startPushRequest, 8),
+		wroteFrameCh:      make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
+		bodyReadCh:        make(chan bodyReadMsg),         // buffering doesn't matter either way
+		doneServing:       make(chan struct{}),
+		clientMaxStreams:  math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
+		advMaxStreams:     s.maxConcurrentStreams(),
 		initialWindowSize: initialWindowSize,
+		maxFrameSize:      initialMaxFrameSize,
 		headerTableSize:   initialHeaderTableSize,
 		serveG:            newGoroutineLock(),
 		pushEnabled:       true,
 	}
 
+	if s.NewWriteScheduler != nil {
+		sc.writeSched = s.NewWriteScheduler()
+	} else {
+		sc.writeSched = NewRandomWriteScheduler()
+	}
+
 	sc.flow.add(initialWindowSize)
 	sc.inflow.add(initialWindowSize)
 	sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
@@ -356,16 +378,18 @@ type serverConn struct {
 	handler          http.Handler
 	baseCtx          contextContext
 	framer           *Framer
-	doneServing      chan struct{}         // closed when serverConn.serve ends
-	readFrameCh      chan readFrameResult  // written by serverConn.readFrames
-	wantWriteFrameCh chan frameWriteMsg    // from handlers -> serve
-	wroteFrameCh     chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
-	bodyReadCh       chan bodyReadMsg      // from handlers -> serve
-	testHookCh       chan func(int)        // code to run on the serve loop
-	flow             flow                  // conn-wide (not stream-specific) outbound flow control
-	inflow           flow                  // conn-wide inbound flow control
-	tlsState         *tls.ConnectionState  // shared by all handlers, like net/http
+	doneServing      chan struct{}          // closed when serverConn.serve ends
+	readFrameCh      chan readFrameResult   // written by serverConn.readFrames
+	wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
+	wantStartPushCh  chan startPushRequest  // from handlers -> serve
+	wroteFrameCh     chan frameWriteResult  // from writeFrameAsync -> serve, tickles more frame writes
+	bodyReadCh       chan bodyReadMsg       // from handlers -> serve
+	testHookCh       chan func(int)         // code to run on the serve loop
+	flow             flow                   // conn-wide (not stream-specific) outbound flow control
+	inflow           flow                   // conn-wide inbound flow control
+	tlsState         *tls.ConnectionState   // shared by all handlers, like net/http
 	remoteAddrStr    string
+	writeSched       WriteScheduler
 
 	// Everything following is owned by the serve loop; use serveG.check():
 	serveG                goroutineLock // used to verify funcs are on serve()
@@ -375,22 +399,27 @@ type serverConn struct {
 	unackedSettings       int    // how many SETTINGS have we sent without ACKs?
 	clientMaxStreams      uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
 	advMaxStreams         uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
-	curOpenStreams        uint32 // client's number of open streams
-	maxStreamID           uint32 // max ever seen
+	curClientStreams      uint32 // number of open streams initiated by the client
+	curPushedStreams      uint32 // number of open streams initiated by server push
+	maxStreamID           uint32 // max ever seen from client
+	maxPushPromiseID      uint32 // ID of the last push promise, or 0 if there have been no pushes
 	streams               map[uint32]*stream
 	initialWindowSize     int32
+	maxFrameSize          int32
 	headerTableSize       uint32
 	peerMaxHeaderListSize uint32            // zero means unknown (default)
 	canonHeader           map[string]string // http2-lower-case -> Go-Canonical-Case
-	writingFrame          bool              // started write goroutine but haven't heard back on wroteFrameCh
+	writingFrame          bool              // started writing a frame (on serve goroutine or separate)
+	writingFrameAsync     bool              // started a frame on its own goroutine but haven't heard back on wroteFrameCh
 	needsFrameFlush       bool              // last frame write wasn't a flush
-	writeSched            writeScheduler
-	inGoAway              bool // we've started to or sent GOAWAY
-	needToSendGoAway      bool // we need to schedule a GOAWAY frame write
+	inGoAway              bool              // we've started to or sent GOAWAY
+	inFrameScheduleLoop   bool              // whether we're in the scheduleFrameWrite loop
+	needToSendGoAway      bool              // we need to schedule a GOAWAY frame write
 	goAwayCode            ErrCode
 	shutdownTimerCh       <-chan time.Time // nil until used
 	shutdownTimer         *time.Timer      // nil until used
-	freeRequestBodyBuf    []byte           // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf
+	idleTimer             *time.Timer      // nil if unused
+	idleTimerCh           <-chan time.Time // nil if unused
 
 	// Owned by the writeFrameAsync goroutine:
 	headerWriteBuf bytes.Buffer
@@ -434,11 +463,11 @@ type stream struct {
 	numTrailerValues int64
 	weight           uint8
 	state            streamState
-	sentReset        bool // only true once detached from streams map
-	gotReset         bool // only true once detacted from streams map
-	gotTrailerHeader bool // HEADER frame for trailers was seen
-	wroteHeaders     bool // whether we wrote headers (not status 100)
-	reqBuf           []byte
+	sentReset        bool   // only true once detached from streams map
+	gotReset         bool   // only true once detacted from streams map
+	gotTrailerHeader bool   // HEADER frame for trailers was seen
+	wroteHeaders     bool   // whether we wrote headers (not status 100)
+	reqBuf           []byte // if non-nil, body pipe buffer to return later at EOF
 
 	trailer    http.Header // accumulated trailers
 	reqTrailer http.Header // handler's Request.Trailer
@@ -453,7 +482,7 @@ func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
 
 func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
 	sc.serveG.check()
-	// http://http2.github.io/http2-spec/#rfc.section.5.1
+	// http://tools.ietf.org/html/rfc7540#section-5.1
 	if st, ok := sc.streams[streamID]; ok {
 		return st.state, st
 	}
@@ -603,17 +632,17 @@ func (sc *serverConn) readFrames() {
 
 // frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
 type frameWriteResult struct {
-	wm  frameWriteMsg // what was written (or attempted)
-	err error         // result of the writeFrame call
+	wr  FrameWriteRequest // what was written (or attempted)
+	err error             // result of the writeFrame call
 }
 
 // writeFrameAsync runs in its own goroutine and writes a single frame
 // and then reports when it's done.
 // At most one goroutine can be running writeFrameAsync at a time per
 // serverConn.
-func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) {
-	err := wm.write.writeFrame(sc)
-	sc.wroteFrameCh <- frameWriteResult{wm, err}
+func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) {
+	err := wr.write.writeFrame(sc)
+	sc.wroteFrameCh <- frameWriteResult{wr, err}
 }
 
 func (sc *serverConn) closeAllStreamsOnConnClose() {
@@ -657,7 +686,7 @@ func (sc *serverConn) serve() {
 		sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
 	}
 
-	sc.writeFrame(frameWriteMsg{
+	sc.writeFrame(FrameWriteRequest{
 		write: writeSettings{
 			{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
 			{SettingMaxConcurrentStreams, sc.advMaxStreams},
@@ -682,6 +711,17 @@ func (sc *serverConn) serve() {
 	sc.setConnState(http.StateActive)
 	sc.setConnState(http.StateIdle)
 
+	if sc.srv.IdleTimeout != 0 {
+		sc.idleTimer = time.NewTimer(sc.srv.IdleTimeout)
+		defer sc.idleTimer.Stop()
+		sc.idleTimerCh = sc.idleTimer.C
+	}
+
+	var gracefulShutdownCh <-chan struct{}
+	if sc.hs != nil && h1ServerShutdownChan != nil {
+		gracefulShutdownCh = h1ServerShutdownChan(sc.hs)
+	}
+
 	go sc.readFrames() // closed by defer sc.conn.Close above
 
 	settingsTimer := time.NewTimer(firstSettingsTimeout)
@@ -689,8 +729,10 @@ func (sc *serverConn) serve() {
 	for {
 		loopNum++
 		select {
-		case wm := <-sc.wantWriteFrameCh:
-			sc.writeFrame(wm)
+		case wr := <-sc.wantWriteFrameCh:
+			sc.writeFrame(wr)
+		case spr := <-sc.wantStartPushCh:
+			sc.startPush(spr)
 		case res := <-sc.wroteFrameCh:
 			sc.wroteFrame(res)
 		case res := <-sc.readFrameCh:
@@ -707,12 +749,22 @@ func (sc *serverConn) serve() {
 		case <-settingsTimer.C:
 			sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
 			return
+		case <-gracefulShutdownCh:
+			gracefulShutdownCh = nil
+			sc.goAwayIn(ErrCodeNo, 0)
 		case <-sc.shutdownTimerCh:
 			sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
 			return
+		case <-sc.idleTimerCh:
+			sc.vlogf("connection is idle")
+			sc.goAway(ErrCodeNo)
 		case fn := <-sc.testHookCh:
 			fn(loopNum)
 		}
+
+		if sc.inGoAway && sc.curClientStreams == 0 && !sc.needToSendGoAway && !sc.writingFrame {
+			return
+		}
 	}
 }
 
@@ -760,7 +812,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea
 	ch := errChanPool.Get().(chan error)
 	writeArg := writeDataPool.Get().(*writeData)
 	*writeArg = writeData{stream.id, data, endStream}
-	err := sc.writeFrameFromHandler(frameWriteMsg{
+	err := sc.writeFrameFromHandler(FrameWriteRequest{
 		write:  writeArg,
 		stream: stream,
 		done:   ch,
@@ -796,17 +848,17 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea
 	return err
 }
 
-// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts
+// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts
 // if the connection has gone away.
 //
 // This must not be run from the serve goroutine itself, else it might
 // deadlock writing to sc.wantWriteFrameCh (which is only mildly
 // buffered and is read by serve itself). If you're on the serve
 // goroutine, call writeFrame instead.
-func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error {
+func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error {
 	sc.serveG.checkNotOn() // NOT
 	select {
-	case sc.wantWriteFrameCh <- wm:
+	case sc.wantWriteFrameCh <- wr:
 		return nil
 	case <-sc.doneServing:
 		// Serve loop is gone.
@@ -823,38 +875,38 @@ func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error {
 // make it onto the wire
 //
 // If you're not on the serve goroutine, use writeFrameFromHandler instead.
-func (sc *serverConn) writeFrame(wm frameWriteMsg) {
+func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
 	sc.serveG.check()
 
 	var ignoreWrite bool
 
 	// Don't send a 100-continue response if we've already sent headers.
 	// See golang.org/issue/14030.
-	switch wm.write.(type) {
+	switch wr.write.(type) {
 	case *writeResHeaders:
-		wm.stream.wroteHeaders = true
+		wr.stream.wroteHeaders = true
 	case write100ContinueHeadersFrame:
-		if wm.stream.wroteHeaders {
+		if wr.stream.wroteHeaders {
 			ignoreWrite = true
 		}
 	}
 
 	if !ignoreWrite {
-		sc.writeSched.add(wm)
+		sc.writeSched.Push(wr)
 	}
 	sc.scheduleFrameWrite()
 }
 
-// startFrameWrite starts a goroutine to write wm (in a separate
+// startFrameWrite starts a goroutine to write wr (in a separate
 // goroutine since that might block on the network), and updates the
-// serve goroutine's state about the world, updated from info in wm.
-func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
+// serve goroutine's state about the world, updated from info in wr.
+func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
 	sc.serveG.check()
 	if sc.writingFrame {
 		panic("internal error: can only be writing one frame at a time")
 	}
 
-	st := wm.stream
+	st := wr.stream
 	if st != nil {
 		switch st.state {
 		case stateHalfClosedLocal:
@@ -865,13 +917,31 @@ func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
 				sc.scheduleFrameWrite()
 				return
 			}
-			panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm))
+			panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wr))
+		}
+	}
+	if wpp, ok := wr.write.(*writePushPromise); ok {
+		var err error
+		wpp.promisedID, err = wpp.allocatePromisedID()
+		if err != nil {
+			sc.writingFrameAsync = false
+			if wr.done != nil {
+				wr.done <- err
+			}
+			return
 		}
 	}
 
 	sc.writingFrame = true
 	sc.needsFrameFlush = true
-	go sc.writeFrameAsync(wm)
+	if wr.write.staysWithinBuffer(sc.bw.Available()) {
+		sc.writingFrameAsync = false
+		err := wr.write.writeFrame(sc)
+		sc.wroteFrame(frameWriteResult{wr, err})
+	} else {
+		sc.writingFrameAsync = true
+		go sc.writeFrameAsync(wr)
+	}
 }
 
 // errHandlerPanicked is the error given to any callers blocked in a read from
@@ -887,25 +957,26 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
 		panic("internal error: expected to be already writing a frame")
 	}
 	sc.writingFrame = false
+	sc.writingFrameAsync = false
 
-	wm := res.wm
-	st := wm.stream
+	wr := res.wr
+	st := wr.stream
 
-	closeStream := endsStream(wm.write)
+	closeStream := endsStream(wr.write)
 
-	if _, ok := wm.write.(handlerPanicRST); ok {
+	if _, ok := wr.write.(handlerPanicRST); ok {
 		sc.closeStream(st, errHandlerPanicked)
 	}
 
 	// Reply (if requested) to the blocked ServeHTTP goroutine.
-	if ch := wm.done; ch != nil {
+	if ch := wr.done; ch != nil {
 		select {
 		case ch <- res.err:
 		default:
-			panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write))
+			panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
 		}
 	}
-	wm.write = nil // prevent use (assume it's tainted after wm.done send)
+	wr.write = nil // prevent use (assume it's tainted after wr.done send)
 
 	if closeStream {
 		if st == nil {
@@ -916,11 +987,11 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
 			// Here we would go to stateHalfClosedLocal in
 			// theory, but since our handler is done and
 			// the net/http package provides no mechanism
-			// for finishing writing to a ResponseWriter
-			// while still reading data (see possible TODO
-			// at top of this file), we go into closed
-			// state here anyway, after telling the peer
-			// we're hanging up on them.
+			// for closing a ResponseWriter while still
+			// reading data (see possible TODO at top of
+			// this file), we go into closed state here
+			// anyway, after telling the peer we're
+			// hanging up on them.
 			st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream
 			errCancel := streamError(st.id, ErrCodeCancel)
 			sc.resetStream(errCancel)
@@ -946,47 +1017,61 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
 // flush the write buffer.
 func (sc *serverConn) scheduleFrameWrite() {
 	sc.serveG.check()
-	if sc.writingFrame {
-		return
-	}
-	if sc.needToSendGoAway {
-		sc.needToSendGoAway = false
-		sc.startFrameWrite(frameWriteMsg{
-			write: &writeGoAway{
-				maxStreamID: sc.maxStreamID,
-				code:        sc.goAwayCode,
-			},
-		})
-		return
-	}
-	if sc.needToSendSettingsAck {
-		sc.needToSendSettingsAck = false
-		sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}})
+	if sc.writingFrame || sc.inFrameScheduleLoop {
 		return
 	}
-	if !sc.inGoAway {
-		if wm, ok := sc.writeSched.take(); ok {
-			sc.startFrameWrite(wm)
-			return
+	sc.inFrameScheduleLoop = true
+	for !sc.writingFrameAsync {
+		if sc.needToSendGoAway {
+			sc.needToSendGoAway = false
+			sc.startFrameWrite(FrameWriteRequest{
+				write: &writeGoAway{
+					maxStreamID: sc.maxStreamID,
+					code:        sc.goAwayCode,
+				},
+			})
+			continue
 		}
+		if sc.needToSendSettingsAck {
+			sc.needToSendSettingsAck = false
+			sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}})
+			continue
+		}
+		if !sc.inGoAway || sc.goAwayCode == ErrCodeNo {
+			if wr, ok := sc.writeSched.Pop(); ok {
+				sc.startFrameWrite(wr)
+				continue
+			}
+		}
+		if sc.needsFrameFlush {
+			sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}})
+			sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
+			continue
+		}
+		break
 	}
-	if sc.needsFrameFlush {
-		sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}})
-		sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
-		return
-	}
+	sc.inFrameScheduleLoop = false
 }
 
 func (sc *serverConn) goAway(code ErrCode) {
 	sc.serveG.check()
-	if sc.inGoAway {
-		return
-	}
+	var forceCloseIn time.Duration
 	if code != ErrCodeNo {
-		sc.shutDownIn(250 * time.Millisecond)
+		forceCloseIn = 250 * time.Millisecond
 	} else {
 		// TODO: configurable
-		sc.shutDownIn(1 * time.Second)
+		forceCloseIn = 1 * time.Second
+	}
+	sc.goAwayIn(code, forceCloseIn)
+}
+
+func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) {
+	sc.serveG.check()
+	if sc.inGoAway {
+		return
+	}
+	if forceCloseIn != 0 {
+		sc.shutDownIn(forceCloseIn)
 	}
 	sc.inGoAway = true
 	sc.needToSendGoAway = true
@@ -1002,7 +1087,7 @@ func (sc *serverConn) shutDownIn(d time.Duration) {
 
 func (sc *serverConn) resetStream(se StreamError) {
 	sc.serveG.check()
-	sc.writeFrame(frameWriteMsg{write: se})
+	sc.writeFrame(FrameWriteRequest{write: se})
 	if st, ok := sc.streams[se.StreamID]; ok {
 		st.sentReset = true
 		sc.closeStream(st, se)
@@ -1115,15 +1200,28 @@ func (sc *serverConn) processPing(f *PingFrame) error {
 		// PROTOCOL_ERROR."
 		return ConnectionError(ErrCodeProtocol)
 	}
-	sc.writeFrame(frameWriteMsg{write: writePingAck{f}})
+	if sc.inGoAway {
+		return nil
+	}
+	sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
 	return nil
 }
 
 func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
 	sc.serveG.check()
+	if sc.inGoAway {
+		return nil
+	}
 	switch {
 	case f.StreamID != 0: // stream-level flow control
-		st := sc.streams[f.StreamID]
+		state, st := sc.state(f.StreamID)
+		if state == stateIdle {
+			// Section 5.1: "Receiving any frame other than HEADERS
+			// or PRIORITY on a stream in this state MUST be
+			// treated as a connection error (Section 5.4.1) of
+			// type PROTOCOL_ERROR."
+			return ConnectionError(ErrCodeProtocol)
+		}
 		if st == nil {
 			// "WINDOW_UPDATE can be sent by a peer that has sent a
 			// frame bearing the END_STREAM flag. This means that a
@@ -1146,6 +1244,9 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
 
 func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
 	sc.serveG.check()
+	if sc.inGoAway {
+		return nil
+	}
 
 	state, st := sc.state(f.StreamID)
 	if state == stateIdle {
@@ -1170,11 +1271,18 @@ func (sc *serverConn) closeStream(st *stream, err error) {
 		panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
 	}
 	st.state = stateClosed
-	sc.curOpenStreams--
-	if sc.curOpenStreams == 0 {
+	if st.isPushed() {
+		sc.curPushedStreams--
+	} else {
+		sc.curClientStreams--
+	}
+	if sc.curClientStreams+sc.curPushedStreams == 0 {
 		sc.setConnState(http.StateIdle)
 	}
 	delete(sc.streams, st.id)
+	if len(sc.streams) == 0 && sc.srv.IdleTimeout != 0 {
+		sc.idleTimer.Reset(sc.srv.IdleTimeout)
+	}
 	if p := st.body; p != nil {
 		// Return any buffered unread bytes worth of conn-level flow control.
 		// See golang.org/issue/16481
@@ -1183,19 +1291,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
 		p.CloseWithError(err)
 	}
 	st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
-	sc.writeSched.forgetStream(st.id)
-	if st.reqBuf != nil {
-		// Stash this request body buffer (64k) away for reuse
-		// by a future POST/PUT/etc.
-		//
-		// TODO(bradfitz): share on the server? sync.Pool?
-		// Server requires locks and might hurt contention.
-		// sync.Pool might work, or might be worse, depending
-		// on goroutine CPU migrations. (get and put on
-		// separate CPUs).  Maybe a mix of strategies. But
-		// this is an easy win for now.
-		sc.freeRequestBodyBuf = st.reqBuf
-	}
+	sc.writeSched.CloseStream(st.id)
 }
 
 func (sc *serverConn) processSettings(f *SettingsFrame) error {
@@ -1210,6 +1306,9 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
 		}
 		return nil
 	}
+	if sc.inGoAway {
+		return nil
+	}
 	if err := f.ForeachSetting(sc.processSetting); err != nil {
 		return err
 	}
@@ -1237,7 +1336,7 @@ func (sc *serverConn) processSetting(s Setting) error {
 	case SettingInitialWindowSize:
 		return sc.processSettingInitialWindowSize(s.Val)
 	case SettingMaxFrameSize:
-		sc.writeSched.maxFrameSize = s.Val
+		sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
 	case SettingMaxHeaderListSize:
 		sc.peerMaxHeaderListSize = s.Val
 	default:
@@ -1281,14 +1380,24 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
 
 func (sc *serverConn) processData(f *DataFrame) error {
 	sc.serveG.check()
+	if sc.inGoAway {
+		return nil
+	}
 	data := f.Data()
 
 	// "If a DATA frame is received whose stream is not in "open"
 	// or "half closed (local)" state, the recipient MUST respond
 	// with a stream error (Section 5.4.2) of type STREAM_CLOSED."
 	id := f.Header().StreamID
-	st, ok := sc.streams[id]
-	if !ok || st.state != stateOpen || st.gotTrailerHeader {
+	state, st := sc.state(id)
+	if id == 0 || state == stateIdle {
+		// Section 5.1: "Receiving any frame other than HEADERS
+		// or PRIORITY on a stream in this state MUST be
+		// treated as a connection error (Section 5.4.1) of
+		// type PROTOCOL_ERROR."
+		return ConnectionError(ErrCodeProtocol)
+	}
+	if st == nil || state != stateOpen || st.gotTrailerHeader {
 		// This includes sending a RST_STREAM if the stream is
 		// in stateHalfClosedLocal (which currently means that
 		// the http.Handler returned, so it's done reading &
@@ -1350,6 +1459,11 @@ func (sc *serverConn) processData(f *DataFrame) error {
 	return nil
 }
 
+// isPushed reports whether the stream is server-initiated.
+func (st *stream) isPushed() bool {
+	return st.id%2 == 0
+}
+
 // endStream closes a Request.Body's pipe. It is called when a DATA
 // frame says a request body is over (or after trailers).
 func (st *stream) endStream() {
@@ -1379,12 +1493,12 @@ func (st *stream) copyTrailersToHandlerRequest() {
 
 func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
 	sc.serveG.check()
-	id := f.Header().StreamID
+	id := f.StreamID
 	if sc.inGoAway {
 		// Ignore.
 		return nil
 	}
-	// http://http2.github.io/http2-spec/#rfc.section.5.1.1
+	// http://tools.ietf.org/html/rfc7540#section-5.1.1
 	// Streams initiated by a client MUST use odd-numbered stream
 	// identifiers. [...] An endpoint that receives an unexpected
 	// stream identifier MUST respond with a connection error
@@ -1396,8 +1510,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
 	// send a trailer for an open one. If we already have a stream
 	// open, let it process its own HEADERS frame (trailers at this
 	// point, if it's valid).
-	st := sc.streams[f.Header().StreamID]
-	if st != nil {
+	if st := sc.streams[f.StreamID]; st != nil {
 		return st.processTrailerHeaders(f)
 	}
 
@@ -1411,49 +1524,40 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
 	}
 	sc.maxStreamID = id
 
-	ctx, cancelCtx := contextWithCancel(sc.baseCtx)
-	st = &stream{
-		sc:        sc,
-		id:        id,
-		state:     stateOpen,
-		ctx:       ctx,
-		cancelCtx: cancelCtx,
+	if sc.idleTimer != nil {
+		sc.idleTimer.Stop()
 	}
-	if f.StreamEnded() {
-		st.state = stateHalfClosedRemote
-	}
-	st.cw.Init()
 
-	st.flow.conn = &sc.flow // link to conn-level counter
-	st.flow.add(sc.initialWindowSize)
-	st.inflow.conn = &sc.inflow      // link to conn-level counter
-	st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
-
-	sc.streams[id] = st
-	if f.HasPriority() {
-		adjustStreamPriority(sc.streams, st.id, f.Priority)
-	}
-	sc.curOpenStreams++
-	if sc.curOpenStreams == 1 {
-		sc.setConnState(http.StateActive)
-	}
-	if sc.curOpenStreams > sc.advMaxStreams {
-		// "Endpoints MUST NOT exceed the limit set by their
-		// peer. An endpoint that receives a HEADERS frame
-		// that causes their advertised concurrent stream
-		// limit to be exceeded MUST treat this as a stream
-		// error (Section 5.4.2) of type PROTOCOL_ERROR or
-		// REFUSED_STREAM."
+	// http://tools.ietf.org/html/rfc7540#section-5.1.2
+	// [...] Endpoints MUST NOT exceed the limit set by their peer. An
+	// endpoint that receives a HEADERS frame that causes their
+	// advertised concurrent stream limit to be exceeded MUST treat
+	// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
+	// or REFUSED_STREAM.
+	if sc.curClientStreams+1 > sc.advMaxStreams {
 		if sc.unackedSettings == 0 {
 			// They should know better.
-			return streamError(st.id, ErrCodeProtocol)
+			return streamError(id, ErrCodeProtocol)
 		}
 		// Assume it's a network race, where they just haven't
 		// received our last SETTINGS update. But actually
 		// this can't happen yet, because we don't yet provide
 		// a way for users to adjust server parameters at
 		// runtime.
-		return streamError(st.id, ErrCodeRefusedStream)
+		return streamError(id, ErrCodeRefusedStream)
+	}
+
+	initialState := stateOpen
+	if f.StreamEnded() {
+		initialState = stateHalfClosedRemote
+	}
+	st := sc.newStream(id, 0, initialState)
+
+	if f.HasPriority() {
+		if err := checkPriority(f.StreamID, f.Priority); err != nil {
+			return err
+		}
+		sc.writeSched.AdjustStream(st.id, f.Priority)
 	}
 
 	rw, req, err := sc.newWriterAndRequest(st, f)
@@ -1471,19 +1575,17 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
 	if f.Truncated {
 		// Their header list was too long. Send a 431 error.
 		handler = handleHeaderListTooLong
-	} else if err := checkValidHTTP2Request(req); err != nil {
+	} else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil {
 		handler = new400Handler(err)
 	}
 
 	// The net/http package sets the read deadline from the
 	// http.Server.ReadTimeout during the TLS handshake, but then
 	// passes the connection off to us with the deadline already
-	// set. Disarm it here after the request headers are read, similar
-	// to how the http1 server works.
-	// Unlike http1, though, we never re-arm it yet, though.
-	// TODO(bradfitz): figure out golang.org/issue/14204
-	// (IdleTimeout) and how this relates. Maybe the default
-	// IdleTimeout is ReadTimeout.
+	// set. Disarm it here after the request headers are read,
+	// similar to how the http1 server works. Here it's
+	// technically more like the http1 Server's ReadHeaderTimeout
+	// (in Go 1.8), though. That's a more sane option anyway.
 	if sc.hs.ReadTimeout != 0 {
 		sc.conn.SetReadDeadline(time.Time{})
 	}
@@ -1522,62 +1624,78 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
 	return nil
 }
 
+func checkPriority(streamID uint32, p PriorityParam) error {
+	if streamID == p.StreamDep {
+		// Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
+		// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
+		// Section 5.3.3 says that a stream can depend on one of its dependencies,
+		// so it's only self-dependencies that are forbidden.
+		return streamError(streamID, ErrCodeProtocol)
+	}
+	return nil
+}
+
 func (sc *serverConn) processPriority(f *PriorityFrame) error {
-	adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam)
+	if sc.inGoAway {
+		return nil
+	}
+	if err := checkPriority(f.StreamID, f.PriorityParam); err != nil {
+		return err
+	}
+	sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
 	return nil
 }
 
-func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) {
-	st, ok := streams[streamID]
-	if !ok {
-		// TODO: not quite correct (this streamID might
-		// already exist in the dep tree, but be closed), but
-		// close enough for now.
-		return
+func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream {
+	sc.serveG.check()
+	if id == 0 {
+		panic("internal error: cannot create stream with id 0")
 	}
-	st.weight = priority.Weight
-	parent := streams[priority.StreamDep] // might be nil
-	if parent == st {
-		// if client tries to set this stream to be the parent of itself
-		// ignore and keep going
-		return
+
+	ctx, cancelCtx := contextWithCancel(sc.baseCtx)
+	st := &stream{
+		sc:        sc,
+		id:        id,
+		state:     state,
+		ctx:       ctx,
+		cancelCtx: cancelCtx,
 	}
+	st.cw.Init()
+	st.flow.conn = &sc.flow // link to conn-level counter
+	st.flow.add(sc.initialWindowSize)
+	st.inflow.conn = &sc.inflow      // link to conn-level counter
+	st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
 
-	// section 5.3.3: If a stream is made dependent on one of its
-	// own dependencies, the formerly dependent stream is first
-	// moved to be dependent on the reprioritized stream's previous
-	// parent. The moved dependency retains its weight.
-	for piter := parent; piter != nil; piter = piter.parent {
-		if piter == st {
-			parent.parent = st.parent
-			break
-		}
+	sc.streams[id] = st
+	sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
+	if st.isPushed() {
+		sc.curPushedStreams++
+	} else {
+		sc.curClientStreams++
 	}
-	st.parent = parent
-	if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) {
-		for _, openStream := range streams {
-			if openStream != st && openStream.parent == st.parent {
-				openStream.parent = st
-			}
-		}
+	if sc.curClientStreams+sc.curPushedStreams == 1 {
+		sc.setConnState(http.StateActive)
 	}
+
+	return st
 }
 
 func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
 	sc.serveG.check()
 
-	method := f.PseudoValue("method")
-	path := f.PseudoValue("path")
-	scheme := f.PseudoValue("scheme")
-	authority := f.PseudoValue("authority")
+	rp := requestParam{
+		method:    f.PseudoValue("method"),
+		scheme:    f.PseudoValue("scheme"),
+		authority: f.PseudoValue("authority"),
+		path:      f.PseudoValue("path"),
+	}
 
-	isConnect := method == "CONNECT"
+	isConnect := rp.method == "CONNECT"
 	if isConnect {
-		if path != "" || scheme != "" || authority == "" {
+		if rp.path != "" || rp.scheme != "" || rp.authority == "" {
 			return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
 		}
-	} else if method == "" || path == "" ||
-		(scheme != "https" && scheme != "http") {
+	} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
 		// See 8.1.2.6 Malformed Requests and Responses:
 		//
 		// Malformed requests or responses that are detected
@@ -1592,36 +1710,64 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
 	}
 
 	bodyOpen := !f.StreamEnded()
-	if method == "HEAD" && bodyOpen {
+	if rp.method == "HEAD" && bodyOpen {
 		// HEAD requests can't have bodies
 		return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
 	}
-	var tlsState *tls.ConnectionState // nil if not scheme https
 
-	if scheme == "https" {
-		tlsState = sc.tlsState
+	rp.header = make(http.Header)
+	for _, hf := range f.RegularFields() {
+		rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+	}
+	if rp.authority == "" {
+		rp.authority = rp.header.Get("Host")
 	}
 
-	header := make(http.Header)
-	for _, hf := range f.RegularFields() {
-		header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+	rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
+	if err != nil {
+		return nil, nil, err
+	}
+	if bodyOpen {
+		st.reqBuf = getRequestBodyBuf()
+		req.Body.(*requestBody).pipe = &pipe{
+			b: &fixedBuffer{buf: st.reqBuf},
+		}
+
+		if vv, ok := rp.header["Content-Length"]; ok {
+			req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
+		} else {
+			req.ContentLength = -1
+		}
 	}
+	return rw, req, nil
+}
+
+type requestParam struct {
+	method                  string
+	scheme, authority, path string
+	header                  http.Header
+}
 
-	if authority == "" {
-		authority = header.Get("Host")
+func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
+	sc.serveG.check()
+
+	var tlsState *tls.ConnectionState // nil if not scheme https
+	if rp.scheme == "https" {
+		tlsState = sc.tlsState
 	}
-	needsContinue := header.Get("Expect") == "100-continue"
+
+	needsContinue := rp.header.Get("Expect") == "100-continue"
 	if needsContinue {
-		header.Del("Expect")
+		rp.header.Del("Expect")
 	}
 	// Merge Cookie headers into one "; "-delimited value.
-	if cookies := header["Cookie"]; len(cookies) > 1 {
-		header.Set("Cookie", strings.Join(cookies, "; "))
+	if cookies := rp.header["Cookie"]; len(cookies) > 1 {
+		rp.header.Set("Cookie", strings.Join(cookies, "; "))
 	}
 
 	// Setup Trailers
 	var trailer http.Header
-	for _, v := range header["Trailer"] {
+	for _, v := range rp.header["Trailer"] {
 		for _, key := range strings.Split(v, ",") {
 			key = http.CanonicalHeaderKey(strings.TrimSpace(key))
 			switch key {
@@ -1636,57 +1782,42 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
 			}
 		}
 	}
-	delete(header, "Trailer")
+	delete(rp.header, "Trailer")
 
-	body := &requestBody{
-		conn:          sc,
-		stream:        st,
-		needsContinue: needsContinue,
-	}
 	var url_ *url.URL
 	var requestURI string
-	if isConnect {
-		url_ = &url.URL{Host: authority}
-		requestURI = authority // mimic HTTP/1 server behavior
+	if rp.method == "CONNECT" {
+		url_ = &url.URL{Host: rp.authority}
+		requestURI = rp.authority // mimic HTTP/1 server behavior
 	} else {
 		var err error
-		url_, err = url.ParseRequestURI(path)
+		url_, err = url.ParseRequestURI(rp.path)
 		if err != nil {
-			return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+			return nil, nil, streamError(st.id, ErrCodeProtocol)
 		}
-		requestURI = path
+		requestURI = rp.path
+	}
+
+	body := &requestBody{
+		conn:          sc,
+		stream:        st,
+		needsContinue: needsContinue,
 	}
 	req := &http.Request{
-		Method:     method,
+		Method:     rp.method,
 		URL:        url_,
 		RemoteAddr: sc.remoteAddrStr,
-		Header:     header,
+		Header:     rp.header,
 		RequestURI: requestURI,
 		Proto:      "HTTP/2.0",
 		ProtoMajor: 2,
 		ProtoMinor: 0,
 		TLS:        tlsState,
-		Host:       authority,
+		Host:       rp.authority,
 		Body:       body,
 		Trailer:    trailer,
 	}
 	req = requestWithContext(req, st.ctx)
-	if bodyOpen {
-		// Disabled, per golang.org/issue/14960:
-		// st.reqBuf = sc.getRequestBodyBuf()
-		// TODO: remove this 64k of garbage per request (again, but without a data race):
-		buf := make([]byte, initialWindowSize)
-
-		body.pipe = &pipe{
-			b: &fixedBuffer{buf: buf},
-		}
-
-		if vv, ok := header["Content-Length"]; ok {
-			req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
-		} else {
-			req.ContentLength = -1
-		}
-	}
 
 	rws := responseWriterStatePool.Get().(*responseWriterState)
 	bwSave := rws.bw
@@ -1702,13 +1833,22 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
 	return rw, req, nil
 }
 
-func (sc *serverConn) getRequestBodyBuf() []byte {
-	sc.serveG.check()
-	if buf := sc.freeRequestBodyBuf; buf != nil {
-		sc.freeRequestBodyBuf = nil
-		return buf
+var reqBodyCache = make(chan []byte, 8)
+
+func getRequestBodyBuf() []byte {
+	select {
+	case b := <-reqBodyCache:
+		return b
+	default:
+		return make([]byte, initialWindowSize)
+	}
+}
+
+func putRequestBodyBuf(b []byte) {
+	select {
+	case reqBodyCache <- b:
+	default:
 	}
-	return make([]byte, initialWindowSize)
 }
 
 // Run on its own goroutine.
@@ -1722,7 +1862,7 @@ func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler
 			const size = 64 << 10
 			buf := make([]byte, size)
 			buf = buf[:runtime.Stack(buf, false)]
-			sc.writeFrameFromHandler(frameWriteMsg{
+			sc.writeFrameFromHandler(FrameWriteRequest{
 				write:  handlerPanicRST{rw.rws.stream.id},
 				stream: rw.rws.stream,
 			})
@@ -1757,7 +1897,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
 		// mutates it.
 		errc = errChanPool.Get().(chan error)
 	}
-	if err := sc.writeFrameFromHandler(frameWriteMsg{
+	if err := sc.writeFrameFromHandler(FrameWriteRequest{
 		write:  headerData,
 		stream: st,
 		done:   errc,
@@ -1780,7 +1920,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
 
 // called from handler goroutines.
 func (sc *serverConn) write100ContinueHeaders(st *stream) {
-	sc.writeFrameFromHandler(frameWriteMsg{
+	sc.writeFrameFromHandler(FrameWriteRequest{
 		write:  write100ContinueHeadersFrame{st.id},
 		stream: st,
 	})
@@ -1796,11 +1936,19 @@ type bodyReadMsg struct {
 // called from handler goroutines.
 // Notes that the handler for the given stream ID read n bytes of its body
 // and schedules flow control tokens to be sent.
-func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) {
+func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
 	sc.serveG.checkNotOn() // NOT on
-	select {
-	case sc.bodyReadCh <- bodyReadMsg{st, n}:
-	case <-sc.doneServing:
+	if n > 0 {
+		select {
+		case sc.bodyReadCh <- bodyReadMsg{st, n}:
+		case <-sc.doneServing:
+		}
+	}
+	if err == io.EOF {
+		if buf := st.reqBuf; buf != nil {
+			st.reqBuf = nil // shouldn't matter; field unused by other
+			putRequestBodyBuf(buf)
+		}
 	}
 }
 
@@ -1843,7 +1991,7 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
 	if st != nil {
 		streamID = st.id
 	}
-	sc.writeFrame(frameWriteMsg{
+	sc.writeFrame(FrameWriteRequest{
 		write:  writeWindowUpdate{streamID: streamID, n: uint32(n)},
 		stream: st,
 	})
@@ -1858,16 +2006,19 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
 	}
 }
 
+// requestBody is the Handler's Request.Body type.
+// Read and Close may be called concurrently.
 type requestBody struct {
 	stream        *stream
 	conn          *serverConn
-	closed        bool
+	closed        bool  // for use by Close only
+	sawEOF        bool  // for use by Read only
 	pipe          *pipe // non-nil if we have a HTTP entity message body
 	needsContinue bool  // need to send a 100-continue
 }
 
 func (b *requestBody) Close() error {
-	if b.pipe != nil {
+	if b.pipe != nil && !b.closed {
 		b.pipe.BreakWithError(errClosedBody)
 	}
 	b.closed = true
@@ -1879,13 +2030,17 @@ func (b *requestBody) Read(p []byte) (n int, err error) {
 		b.needsContinue = false
 		b.conn.write100ContinueHeaders(b.stream)
 	}
-	if b.pipe == nil {
+	if b.pipe == nil || b.sawEOF {
 		return 0, io.EOF
 	}
 	n, err = b.pipe.Read(p)
-	if n > 0 {
-		b.conn.noteBodyReadFromHandler(b.stream, n)
+	if err == io.EOF {
+		b.sawEOF = true
 	}
+	if b.conn == nil && inTests {
+		return
+	}
+	b.conn.noteBodyReadFromHandler(b.stream, n, err)
 	return
 }
 
@@ -2220,6 +2375,194 @@ func (w *responseWriter) handlerDone() {
 	responseWriterStatePool.Put(rws)
 }
 
+// Push errors.
+var (
+	ErrRecursivePush    = errors.New("http2: recursive push not allowed")
+	ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS")
+)
+
+// pushOptions is the internal version of http.PushOptions, which we
+// cannot include here because it's only defined in Go 1.8 and later.
+type pushOptions struct {
+	Method string
+	Header http.Header
+}
+
+func (w *responseWriter) push(target string, opts pushOptions) error {
+	st := w.rws.stream
+	sc := st.sc
+	sc.serveG.checkNotOn()
+
+	// No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
+	// http://tools.ietf.org/html/rfc7540#section-6.6
+	if st.isPushed() {
+		return ErrRecursivePush
+	}
+
+	// Default options.
+	if opts.Method == "" {
+		opts.Method = "GET"
+	}
+	if opts.Header == nil {
+		opts.Header = http.Header{}
+	}
+	wantScheme := "http"
+	if w.rws.req.TLS != nil {
+		wantScheme = "https"
+	}
+
+	// Validate the request.
+	u, err := url.Parse(target)
+	if err != nil {
+		return err
+	}
+	if u.Scheme == "" {
+		if !strings.HasPrefix(target, "/") {
+			return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target)
+		}
+		u.Scheme = wantScheme
+		u.Host = w.rws.req.Host
+	} else {
+		if u.Scheme != wantScheme {
+			return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme)
+		}
+		if u.Host == "" {
+			return errors.New("URL must have a host")
+		}
+	}
+	for k := range opts.Header {
+		if strings.HasPrefix(k, ":") {
+			return fmt.Errorf("promised request headers cannot include psuedo header %q", k)
+		}
+		// These headers are meaningful only if the request has a body,
+		// but PUSH_PROMISE requests cannot have a body.
+		// http://tools.ietf.org/html/rfc7540#section-8.2
+		// Also disallow Host, since the promised URL must be absolute.
+		switch strings.ToLower(k) {
+		case "content-length", "content-encoding", "trailer", "te", "expect", "host":
+			return fmt.Errorf("promised request headers cannot include %q", k)
+		}
+	}
+	if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil {
+		return err
+	}
+
+	// The RFC effectively limits promised requests to GET and HEAD:
+	// "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
+	// http://tools.ietf.org/html/rfc7540#section-8.2
+	if opts.Method != "GET" && opts.Method != "HEAD" {
+		return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
+	}
+
+	msg := startPushRequest{
+		parent: st,
+		method: opts.Method,
+		url:    u,
+		header: cloneHeader(opts.Header),
+		done:   errChanPool.Get().(chan error),
+	}
+
+	select {
+	case <-sc.doneServing:
+		return errClientDisconnected
+	case <-st.cw:
+		return errStreamClosed
+	case sc.wantStartPushCh <- msg:
+	}
+
+	select {
+	case <-sc.doneServing:
+		return errClientDisconnected
+	case <-st.cw:
+		return errStreamClosed
+	case err := <-msg.done:
+		errChanPool.Put(msg.done)
+		return err
+	}
+}
+
+type startPushRequest struct {
+	parent *stream
+	method string
+	url    *url.URL
+	header http.Header
+	done   chan error
+}
+
+func (sc *serverConn) startPush(msg startPushRequest) {
+	sc.serveG.check()
+
+	// http://tools.ietf.org/html/rfc7540#section-6.6.
+	// PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
+	// is in either the "open" or "half-closed (remote)" state.
+	if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote {
+		// responseWriter.Push checks that the stream is peer-initiaed.
+		msg.done <- errStreamClosed
+		return
+	}
+
+	// http://tools.ietf.org/html/rfc7540#section-6.6.
+	if !sc.pushEnabled {
+		msg.done <- http.ErrNotSupported
+		return
+	}
+
+	// PUSH_PROMISE frames must be sent in increasing order by stream ID, so
+	// we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
+	// is written. Once the ID is allocated, we start the request handler.
+	allocatePromisedID := func() (uint32, error) {
+		sc.serveG.check()
+
+		// Check this again, just in case. Technically, we might have received
+		// an updated SETTINGS by the time we got around to writing this frame.
+		if !sc.pushEnabled {
+			return 0, http.ErrNotSupported
+		}
+		// http://tools.ietf.org/html/rfc7540#section-6.5.2.
+		if sc.curPushedStreams+1 > sc.clientMaxStreams {
+			return 0, ErrPushLimitReached
+		}
+
+		// http://tools.ietf.org/html/rfc7540#section-5.1.1.
+		// Streams initiated by the server MUST use even-numbered identifiers.
+		sc.maxPushPromiseID += 2
+		promisedID := sc.maxPushPromiseID
+
+		// http://tools.ietf.org/html/rfc7540#section-8.2.
+		// Strictly speaking, the new stream should start in "reserved (local)", then
+		// transition to "half closed (remote)" after sending the initial HEADERS, but
+		// we start in "half closed (remote)" for simplicity.
+		// See further comments at the definition of stateHalfClosedRemote.
+		promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
+		rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
+			method:    msg.method,
+			scheme:    msg.url.Scheme,
+			authority: msg.url.Host,
+			path:      msg.url.RequestURI(),
+			header:    msg.header,
+		})
+		if err != nil {
+			// Should not happen, since we've already validated msg.url.
+			panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
+		}
+
+		go sc.runHandler(rw, req, sc.handler.ServeHTTP)
+		return promisedID, nil
+	}
+
+	sc.writeFrame(FrameWriteRequest{
+		write: &writePushPromise{
+			streamID:           msg.parent.id,
+			method:             msg.method,
+			url:                msg.url,
+			h:                  msg.header,
+			allocatePromisedID: allocatePromisedID,
+		},
+		stream: msg.parent,
+		done:   msg.done,
+	})
+}
+
 // foreachHeaderElement splits v according to the "#rule" construction
 // in RFC 2616 section 2.1 and calls fn for each non-empty element.
 func foreachHeaderElement(v string, fn func(string)) {
@@ -2247,16 +2590,16 @@ var connHeaders = []string{
 	"Upgrade",
 }
 
-// checkValidHTTP2Request checks whether req is a valid HTTP/2 request,
+// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,
 // per RFC 7540 Section 8.1.2.2.
 // The returned error is reported to users.
-func checkValidHTTP2Request(req *http.Request) error {
-	for _, h := range connHeaders {
-		if _, ok := req.Header[h]; ok {
-			return fmt.Errorf("request header %q is not valid in HTTP/2", h)
+func checkValidHTTP2RequestHeaders(h http.Header) error {
+	for _, k := range connHeaders {
+		if _, ok := h[k]; ok {
+			return fmt.Errorf("request header %q is not valid in HTTP/2", k)
 		}
 	}
-	te := req.Header["Te"]
+	te := h["Te"]
 	if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
 		return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
 	}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index b939fed2eac24794db42865a4fd932ad35556463..129c8e02b8bfd7e137e2840e4596d4439d1cc091 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -635,39 +635,17 @@ func checkConnHeaders(req *http.Request) error {
 	return nil
 }
 
-func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) {
-	body = req.Body
-	if body == nil {
-		return nil, 0
+// actualContentLength returns a sanitized version of
+// req.ContentLength, where 0 actually means zero (not unknown) and -1
+// means unknown.
+func actualContentLength(req *http.Request) int64 {
+	if req.Body == nil {
+		return 0
 	}
 	if req.ContentLength != 0 {
-		return req.Body, req.ContentLength
-	}
-	// Don't try to sniff the size if they're doing an expect
-	// request (Issue 16002):
-	if req.Header.Get("Expect") == "100-continue" {
-		return req.Body, -1
-	}
-
-	// We have a body but a zero content length. Test to see if
-	// it's actually zero or just unset.
-	var buf [1]byte
-	n, rerr := body.Read(buf[:])
-	if rerr != nil && rerr != io.EOF {
-		return errorReader{rerr}, -1
-	}
-	if n == 1 {
-		// Oh, guess there is data in this Body Reader after all.
-		// The ContentLength field just wasn't set.
-		// Stitch the Body back together again, re-attaching our
-		// consumed byte.
-		if rerr == io.EOF {
-			return bytes.NewReader(buf[:]), 1
-		}
-		return io.MultiReader(bytes.NewReader(buf[:]), body), -1
+		return req.ContentLength
 	}
-	// Body is actually zero bytes.
-	return nil, 0
+	return -1
 }
 
 func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
@@ -691,8 +669,9 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
 		return nil, errClientConnUnusable
 	}
 
-	body, contentLen := bodyAndLength(req)
+	body := req.Body
 	hasBody := body != nil
+	contentLen := actualContentLength(req)
 
 	// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
 	var requestedGzip bool
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
index 27ef0dd4d728c06702ea342bef37d65aba2d6a1a..1c135fdf7f0b82ce719e33102d3c2a1116badf96 100644
--- a/vendor/golang.org/x/net/http2/write.go
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -9,6 +9,7 @@ import (
 	"fmt"
 	"log"
 	"net/http"
+	"net/url"
 	"time"
 
 	"golang.org/x/net/http2/hpack"
@@ -18,6 +19,11 @@ import (
 // writeFramer is implemented by any type that is used to write frames.
 type writeFramer interface {
 	writeFrame(writeContext) error
+
+	// staysWithinBuffer reports whether this writer promises that
+	// it will only write less than or equal to size bytes, and it
+	// won't Flush the write context.
+	staysWithinBuffer(size int) bool
 }
 
 // writeContext is the interface needed by the various frame writer
@@ -62,8 +68,16 @@ func (flushFrameWriter) writeFrame(ctx writeContext) error {
 	return ctx.Flush()
 }
 
+func (flushFrameWriter) staysWithinBuffer(max int) bool { return false }
+
 type writeSettings []Setting
 
+func (s writeSettings) staysWithinBuffer(max int) bool {
+	const settingSize = 6 // uint16 + uint32
+	return frameHeaderLen+settingSize*len(s) <= max
+
+}
+
 func (s writeSettings) writeFrame(ctx writeContext) error {
 	return ctx.Framer().WriteSettings([]Setting(s)...)
 }
@@ -83,6 +97,8 @@ func (p *writeGoAway) writeFrame(ctx writeContext) error {
 	return err
 }
 
+func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
+
 type writeData struct {
 	streamID  uint32
 	p         []byte
@@ -97,6 +113,10 @@ func (w *writeData) writeFrame(ctx writeContext) error {
 	return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
 }
 
+func (w *writeData) staysWithinBuffer(max int) bool {
+	return frameHeaderLen+len(w.p) <= max
+}
+
 // handlerPanicRST is the message sent from handler goroutines when
 // the handler panics.
 type handlerPanicRST struct {
@@ -107,22 +127,57 @@ func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
 	return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
 }
 
+func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
 func (se StreamError) writeFrame(ctx writeContext) error {
 	return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
 }
 
+func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
 type writePingAck struct{ pf *PingFrame }
 
 func (w writePingAck) writeFrame(ctx writeContext) error {
 	return ctx.Framer().WritePing(true, w.pf.Data)
 }
 
+func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max }
+
 type writeSettingsAck struct{}
 
 func (writeSettingsAck) writeFrame(ctx writeContext) error {
 	return ctx.Framer().WriteSettingsAck()
 }
 
+func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max }
+
+// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
+// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
+// for the first/last fragment, respectively.
+func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
+	// For now we're lazy and just pick the minimum MAX_FRAME_SIZE
+	// that all peers must support (16KB). Later we could care
+	// more and send larger frames if the peer advertised it, but
+	// there's little point. Most headers are small anyway (so we
+	// generally won't have CONTINUATION frames), and extra frames
+	// only waste 9 bytes anyway.
+	const maxFrameSize = 16384
+
+	first := true
+	for len(headerBlock) > 0 {
+		frag := headerBlock
+		if len(frag) > maxFrameSize {
+			frag = frag[:maxFrameSize]
+		}
+		headerBlock = headerBlock[len(frag):]
+		if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
+			return err
+		}
+		first = false
+	}
+	return nil
+}
+
 // writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
 // for HTTP response headers or trailers from a server handler.
 type writeResHeaders struct {
@@ -144,6 +199,17 @@ func encKV(enc *hpack.Encoder, k, v string) {
 	enc.WriteField(hpack.HeaderField{Name: k, Value: v})
 }
 
+func (w *writeResHeaders) staysWithinBuffer(max int) bool {
+	// TODO: this is a common one. It'd be nice to return true
+	// here and get into the fast path if we could be clever and
+	// calculate the size fast enough, or at least a conservative
+	// uppper bound that usually fires. (Maybe if w.h and
+	// w.trailers are nil, so we don't need to enumerate it.)
+	// Otherwise I'm afraid that just calculating the length to
+	// answer this question would be slower than the ~2µs benefit.
+	return false
+}
+
 func (w *writeResHeaders) writeFrame(ctx writeContext) error {
 	enc, buf := ctx.HeaderEncoder()
 	buf.Reset()
@@ -169,39 +235,69 @@ func (w *writeResHeaders) writeFrame(ctx writeContext) error {
 		panic("unexpected empty hpack")
 	}
 
-	// For now we're lazy and just pick the minimum MAX_FRAME_SIZE
-	// that all peers must support (16KB). Later we could care
-	// more and send larger frames if the peer advertised it, but
-	// there's little point. Most headers are small anyway (so we
-	// generally won't have CONTINUATION frames), and extra frames
-	// only waste 9 bytes anyway.
-	const maxFrameSize = 16384
+	return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
+}
 
-	first := true
-	for len(headerBlock) > 0 {
-		frag := headerBlock
-		if len(frag) > maxFrameSize {
-			frag = frag[:maxFrameSize]
-		}
-		headerBlock = headerBlock[len(frag):]
-		endHeaders := len(headerBlock) == 0
-		var err error
-		if first {
-			first = false
-			err = ctx.Framer().WriteHeaders(HeadersFrameParam{
-				StreamID:      w.streamID,
-				BlockFragment: frag,
-				EndStream:     w.endStream,
-				EndHeaders:    endHeaders,
-			})
-		} else {
-			err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag)
-		}
-		if err != nil {
-			return err
-		}
+func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
+	if firstFrag {
+		return ctx.Framer().WriteHeaders(HeadersFrameParam{
+			StreamID:      w.streamID,
+			BlockFragment: frag,
+			EndStream:     w.endStream,
+			EndHeaders:    lastFrag,
+		})
+	} else {
+		return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
+	}
+}
+
+// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
+type writePushPromise struct {
+	streamID uint32   // pusher stream
+	method   string   // for :method
+	url      *url.URL // for :scheme, :authority, :path
+	h        http.Header
+
+	// Creates an ID for a pushed stream. This runs on serveG just before
+	// the frame is written. The returned ID is copied to promisedID.
+	allocatePromisedID func() (uint32, error)
+	promisedID         uint32
+}
+
+func (w *writePushPromise) staysWithinBuffer(max int) bool {
+	// TODO: see writeResHeaders.staysWithinBuffer
+	return false
+}
+
+func (w *writePushPromise) writeFrame(ctx writeContext) error {
+	enc, buf := ctx.HeaderEncoder()
+	buf.Reset()
+
+	encKV(enc, ":method", w.method)
+	encKV(enc, ":scheme", w.url.Scheme)
+	encKV(enc, ":authority", w.url.Host)
+	encKV(enc, ":path", w.url.RequestURI())
+	encodeHeaders(enc, w.h, nil)
+
+	headerBlock := buf.Bytes()
+	if len(headerBlock) == 0 {
+		panic("unexpected empty hpack")
+	}
+
+	return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
+}
+
+func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
+	if firstFrag {
+		return ctx.Framer().WritePushPromise(PushPromiseParam{
+			StreamID:      w.streamID,
+			PromiseID:     w.promisedID,
+			BlockFragment: frag,
+			EndHeaders:    lastFrag,
+		})
+	} else {
+		return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
 	}
-	return nil
 }
 
 type write100ContinueHeadersFrame struct {
@@ -220,15 +316,24 @@ func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
 	})
 }
 
+func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
+	// Sloppy but conservative:
+	return 9+2*(len(":status")+len("100")) <= max
+}
+
 type writeWindowUpdate struct {
 	streamID uint32 // or 0 for conn-level
 	n        uint32
 }
 
+func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
 func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
 	return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
 }
 
+// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
+// is encoded only only if k is in keys.
 func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
 	if keys == nil {
 		sorter := sorterPool.Get().(*sorter)
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
index c24316ce7b2fd0cdec3d3c183b3780ac77b68ece..9f3e1b3207fff3ec8919e480652a422816c54aef 100644
--- a/vendor/golang.org/x/net/http2/writesched.go
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -6,14 +6,51 @@ package http2
 
 import "fmt"
 
-// frameWriteMsg is a request to write a frame.
-type frameWriteMsg struct {
+// WriteScheduler is the interface implemented by HTTP/2 write schedulers.
+// Methods are never called concurrently.
+type WriteScheduler interface {
+	// OpenStream opens a new stream in the write scheduler.
+	// It is illegal to call this with streamID=0 or with a streamID that is
+	// already open -- the call may panic.
+	OpenStream(streamID uint32, options OpenStreamOptions)
+
+	// CloseStream closes a stream in the write scheduler. Any frames queued on
+	// this stream should be discarded. It is illegal to call this on a stream
+	// that is not open -- the call may panic.
+	CloseStream(streamID uint32)
+
+	// AdjustStream adjusts the priority of the given stream. This may be called
+	// on a stream that has not yet been opened or has been closed. Note that
+	// RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:
+	// https://tools.ietf.org/html/rfc7540#section-5.1
+	AdjustStream(streamID uint32, priority PriorityParam)
+
+	// Push queues a frame in the scheduler.
+	Push(wr FrameWriteRequest)
+
+	// Pop dequeues the next frame to write. Returns false if no frames can
+	// be written. Frames with a given wr.StreamID() are Pop'd in the same
+	// order they are Push'd.
+	Pop() (wr FrameWriteRequest, ok bool)
+}
+
+// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.
+type OpenStreamOptions struct {
+	// PusherID is zero if the stream was initiated by the client. Otherwise,
+	// PusherID names the stream that pushed the newly opened stream.
+	PusherID uint32
+}
+
+// FrameWriteRequest is a request to write a frame.
+type FrameWriteRequest struct {
 	// write is the interface value that does the writing, once the
-	// writeScheduler (below) has decided to select this frame
-	// to write. The write functions are all defined in write.go.
+	// WriteScheduler has selected this frame to write. The write
+	// functions are all defined in write.go.
 	write writeFramer
 
-	stream *stream // used for prioritization. nil for non-stream frames.
+	// stream is the stream on which this frame will be written.
+	// nil for non-stream frames like PING and SETTINGS.
+	stream *stream
 
 	// done, if non-nil, must be a buffered channel with space for
 	// 1 message and is sent the return value from write (or an
@@ -21,263 +58,166 @@ type frameWriteMsg struct {
 	done chan error
 }
 
-// for debugging only:
-func (wm frameWriteMsg) String() string {
-	var streamID uint32
-	if wm.stream != nil {
-		streamID = wm.stream.id
-	}
-	var des string
-	if s, ok := wm.write.(fmt.Stringer); ok {
-		des = s.String()
-	} else {
-		des = fmt.Sprintf("%T", wm.write)
-	}
-	return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des)
-}
-
-// writeScheduler tracks pending frames to write, priorities, and decides
-// the next one to use. It is not thread-safe.
-type writeScheduler struct {
-	// zero are frames not associated with a specific stream.
-	// They're sent before any stream-specific freams.
-	zero writeQueue
-
-	// maxFrameSize is the maximum size of a DATA frame
-	// we'll write. Must be non-zero and between 16K-16M.
-	maxFrameSize uint32
-
-	// sq contains the stream-specific queues, keyed by stream ID.
-	// when a stream is idle, it's deleted from the map.
-	sq map[uint32]*writeQueue
-
-	// canSend is a slice of memory that's reused between frame
-	// scheduling decisions to hold the list of writeQueues (from sq)
-	// which have enough flow control data to send. After canSend is
-	// built, the best is selected.
-	canSend []*writeQueue
-
-	// pool of empty queues for reuse.
-	queuePool []*writeQueue
-}
-
-func (ws *writeScheduler) putEmptyQueue(q *writeQueue) {
-	if len(q.s) != 0 {
-		panic("queue must be empty")
-	}
-	ws.queuePool = append(ws.queuePool, q)
-}
-
-func (ws *writeScheduler) getEmptyQueue() *writeQueue {
-	ln := len(ws.queuePool)
-	if ln == 0 {
-		return new(writeQueue)
-	}
-	q := ws.queuePool[ln-1]
-	ws.queuePool = ws.queuePool[:ln-1]
-	return q
-}
-
-func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 }
-
-func (ws *writeScheduler) add(wm frameWriteMsg) {
-	st := wm.stream
-	if st == nil {
-		ws.zero.push(wm)
-	} else {
-		ws.streamQueue(st.id).push(wm)
-	}
-}
-
-func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue {
-	if q, ok := ws.sq[streamID]; ok {
-		return q
-	}
-	if ws.sq == nil {
-		ws.sq = make(map[uint32]*writeQueue)
-	}
-	q := ws.getEmptyQueue()
-	ws.sq[streamID] = q
-	return q
-}
-
-// take returns the most important frame to write and removes it from the scheduler.
-// It is illegal to call this if the scheduler is empty or if there are no connection-level
-// flow control bytes available.
-func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) {
-	if ws.maxFrameSize == 0 {
-		panic("internal error: ws.maxFrameSize not initialized or invalid")
-	}
-
-	// If there any frames not associated with streams, prefer those first.
-	// These are usually SETTINGS, etc.
-	if !ws.zero.empty() {
-		return ws.zero.shift(), true
-	}
-	if len(ws.sq) == 0 {
-		return
-	}
-
-	// Next, prioritize frames on streams that aren't DATA frames (no cost).
-	for id, q := range ws.sq {
-		if q.firstIsNoCost() {
-			return ws.takeFrom(id, q)
-		}
-	}
-
-	// Now, all that remains are DATA frames with non-zero bytes to
-	// send. So pick the best one.
-	if len(ws.canSend) != 0 {
-		panic("should be empty")
-	}
-	for _, q := range ws.sq {
-		if n := ws.streamWritableBytes(q); n > 0 {
-			ws.canSend = append(ws.canSend, q)
-		}
-	}
-	if len(ws.canSend) == 0 {
-		return
-	}
-	defer ws.zeroCanSend()
-
-	// TODO: find the best queue
-	q := ws.canSend[0]
-
-	return ws.takeFrom(q.streamID(), q)
-}
-
-// zeroCanSend is defered from take.
-func (ws *writeScheduler) zeroCanSend() {
-	for i := range ws.canSend {
-		ws.canSend[i] = nil
-	}
-	ws.canSend = ws.canSend[:0]
-}
-
-// streamWritableBytes returns the number of DATA bytes we could write
-// from the given queue's stream, if this stream/queue were
-// selected. It is an error to call this if q's head isn't a
-// *writeData.
-func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 {
-	wm := q.head()
-	ret := wm.stream.flow.available() // max we can write
-	if ret == 0 {
+// StreamID returns the id of the stream this frame will be written to.
+// 0 is used for non-stream frames such as PING and SETTINGS.
+func (wr FrameWriteRequest) StreamID() uint32 {
+	if wr.stream == nil {
 		return 0
 	}
-	if int32(ws.maxFrameSize) < ret {
-		ret = int32(ws.maxFrameSize)
-	}
-	if ret == 0 {
-		panic("internal error: ws.maxFrameSize not initialized or invalid")
-	}
-	wd := wm.write.(*writeData)
-	if len(wd.p) < int(ret) {
-		ret = int32(len(wd.p))
-	}
-	return ret
-}
-
-func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) {
-	wm = q.head()
-	// If the first item in this queue costs flow control tokens
-	// and we don't have enough, write as much as we can.
-	if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 {
-		allowed := wm.stream.flow.available() // max we can write
-		if allowed == 0 {
-			// No quota available. Caller can try the next stream.
-			return frameWriteMsg{}, false
+	return wr.stream.id
+}
+
+// DataSize returns the number of flow control bytes that must be consumed
+// to write this entire frame. This is 0 for non-DATA frames.
+func (wr FrameWriteRequest) DataSize() int {
+	if wd, ok := wr.write.(*writeData); ok {
+		return len(wd.p)
+	}
+	return 0
+}
+
+// Consume consumes min(n, available) bytes from this frame, where available
+// is the number of flow control bytes available on the stream. Consume returns
+// 0, 1, or 2 frames, where the integer return value gives the number of frames
+// returned.
+//
+// If flow control prevents consuming any bytes, this returns (_, _, 0). If
+// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this
+// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and
+// 'rest' contains the remaining bytes. The consumed bytes are deducted from the
+// underlying stream's flow control budget.
+func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) {
+	var empty FrameWriteRequest
+
+	// Non-DATA frames are always consumed whole.
+	wd, ok := wr.write.(*writeData)
+	if !ok || len(wd.p) == 0 {
+		return wr, empty, 1
+	}
+
+	// Might need to split after applying limits.
+	allowed := wr.stream.flow.available()
+	if n < allowed {
+		allowed = n
+	}
+	if wr.stream.sc.maxFrameSize < allowed {
+		allowed = wr.stream.sc.maxFrameSize
+	}
+	if allowed <= 0 {
+		return empty, empty, 0
+	}
+	if len(wd.p) > int(allowed) {
+		wr.stream.flow.take(allowed)
+		consumed := FrameWriteRequest{
+			stream: wr.stream,
+			write: &writeData{
+				streamID: wd.streamID,
+				p:        wd.p[:allowed],
+				// Even if the original had endStream set, there
+				// are bytes remaining because len(wd.p) > allowed,
+				// so we know endStream is false.
+				endStream: false,
+			},
+			// Our caller is blocking on the final DATA frame, not
+			// this intermediate frame, so no need to wait.
+			done: nil,
 		}
-		if int32(ws.maxFrameSize) < allowed {
-			allowed = int32(ws.maxFrameSize)
-		}
-		// TODO: further restrict the allowed size, because even if
-		// the peer says it's okay to write 16MB data frames, we might
-		// want to write smaller ones to properly weight competing
-		// streams' priorities.
-
-		if len(wd.p) > int(allowed) {
-			wm.stream.flow.take(allowed)
-			chunk := wd.p[:allowed]
-			wd.p = wd.p[allowed:]
-			// Make up a new write message of a valid size, rather
-			// than shifting one off the queue.
-			return frameWriteMsg{
-				stream: wm.stream,
-				write: &writeData{
-					streamID: wd.streamID,
-					p:        chunk,
-					// even if the original had endStream set, there
-					// arebytes remaining because len(wd.p) > allowed,
-					// so we know endStream is false:
-					endStream: false,
-				},
-				// our caller is blocking on the final DATA frame, not
-				// these intermediates, so no need to wait:
-				done: nil,
-			}, true
+		rest := FrameWriteRequest{
+			stream: wr.stream,
+			write: &writeData{
+				streamID:  wd.streamID,
+				p:         wd.p[allowed:],
+				endStream: wd.endStream,
+			},
+			done: wr.done,
 		}
-		wm.stream.flow.take(int32(len(wd.p)))
+		return consumed, rest, 2
 	}
 
-	q.shift()
-	if q.empty() {
-		ws.putEmptyQueue(q)
-		delete(ws.sq, id)
-	}
-	return wm, true
+	// The frame is consumed whole.
+	// NB: This cast cannot overflow because allowed is <= math.MaxInt32.
+	wr.stream.flow.take(int32(len(wd.p)))
+	return wr, empty, 1
 }
 
-func (ws *writeScheduler) forgetStream(id uint32) {
-	q, ok := ws.sq[id]
-	if !ok {
-		return
+// String is for debugging only.
+func (wr FrameWriteRequest) String() string {
+	var streamID uint32
+	if wr.stream != nil {
+		streamID = wr.stream.id
 	}
-	delete(ws.sq, id)
-
-	// But keep it for others later.
-	for i := range q.s {
-		q.s[i] = frameWriteMsg{}
+	var des string
+	if s, ok := wr.write.(fmt.Stringer); ok {
+		des = s.String()
+	} else {
+		des = fmt.Sprintf("%T", wr.write)
 	}
-	q.s = q.s[:0]
-	ws.putEmptyQueue(q)
+	return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", streamID, wr.done != nil, des)
 }
 
+// writeQueue is used by implementations of WriteScheduler.
 type writeQueue struct {
-	s []frameWriteMsg
+	s []FrameWriteRequest
 }
 
-// streamID returns the stream ID for a non-empty stream-specific queue.
-func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id }
-
 func (q *writeQueue) empty() bool { return len(q.s) == 0 }
 
-func (q *writeQueue) push(wm frameWriteMsg) {
-	q.s = append(q.s, wm)
+func (q *writeQueue) push(wr FrameWriteRequest) {
+	q.s = append(q.s, wr)
 }
 
-// head returns the next item that would be removed by shift.
-func (q *writeQueue) head() frameWriteMsg {
+func (q *writeQueue) shift() FrameWriteRequest {
 	if len(q.s) == 0 {
 		panic("invalid use of queue")
 	}
-	return q.s[0]
+	wr := q.s[0]
+	// TODO: less copy-happy queue.
+	copy(q.s, q.s[1:])
+	q.s[len(q.s)-1] = FrameWriteRequest{}
+	q.s = q.s[:len(q.s)-1]
+	return wr
 }
 
-func (q *writeQueue) shift() frameWriteMsg {
+// consume consumes up to n bytes from q.s[0]. If the frame is
+// entirely consumed, it is removed from the queue. If the frame
+// is partially consumed, the frame is kept with the consumed
+// bytes removed. Returns true iff any bytes were consumed.
+func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
 	if len(q.s) == 0 {
-		panic("invalid use of queue")
+		return FrameWriteRequest{}, false
 	}
-	wm := q.s[0]
-	// TODO: less copy-happy queue.
-	copy(q.s, q.s[1:])
-	q.s[len(q.s)-1] = frameWriteMsg{}
-	q.s = q.s[:len(q.s)-1]
-	return wm
+	consumed, rest, numresult := q.s[0].Consume(n)
+	switch numresult {
+	case 0:
+		return FrameWriteRequest{}, false
+	case 1:
+		q.shift()
+	case 2:
+		q.s[0] = rest
+	}
+	return consumed, true
+}
+
+type writeQueuePool []*writeQueue
+
+// put inserts an unused writeQueue into the pool.
+func (p *writeQueuePool) put(q *writeQueue) {
+	for i := range q.s {
+		q.s[i] = FrameWriteRequest{}
+	}
+	q.s = q.s[:0]
+	*p = append(*p, q)
 }
 
-func (q *writeQueue) firstIsNoCost() bool {
-	if df, ok := q.s[0].write.(*writeData); ok {
-		return len(df.p) == 0
+// get returns an empty writeQueue.
+func (p *writeQueuePool) get() *writeQueue {
+	ln := len(*p)
+	if ln == 0 {
+		return new(writeQueue)
 	}
-	return true
+	x := ln - 1
+	q := (*p)[x]
+	(*p)[x] = nil
+	*p = (*p)[:x]
+	return q
 }
diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go
new file mode 100644
index 0000000000000000000000000000000000000000..40108b066458d55377a0182034b808d6e96714a1
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_priority.go
@@ -0,0 +1,444 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+	"fmt"
+	"math"
+	"sort"
+)
+
+// RFC 7540, Section 5.3.5: the default weight is 16.
+const priorityDefaultWeight = 15 // 16 = 15 + 1
+
+// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
+type PriorityWriteSchedulerConfig struct {
+	// MaxClosedNodesInTree controls the maximum number of closed streams to
+	// retain in the priority tree. Setting this to zero saves a small amount
+	// of memory at the cost of performance.
+	//
+	// See RFC 7540, Section 5.3.4:
+	//   "It is possible for a stream to become closed while prioritization
+	//   information ... is in transit. ... This potentially creates suboptimal
+	//   prioritization, since the stream could be given a priority that is
+	//   different from what is intended. To avoid these problems, an endpoint
+	//   SHOULD retain stream prioritization state for a period after streams
+	//   become closed. The longer state is retained, the lower the chance that
+	//   streams are assigned incorrect or default priority values."
+	MaxClosedNodesInTree int
+
+	// MaxIdleNodesInTree controls the maximum number of idle streams to
+	// retain in the priority tree. Setting this to zero saves a small amount
+	// of memory at the cost of performance.
+	//
+	// See RFC 7540, Section 5.3.4:
+	//   Similarly, streams that are in the "idle" state can be assigned
+	//   priority or become a parent of other streams. This allows for the
+	//   creation of a grouping node in the dependency tree, which enables
+	//   more flexible expressions of priority. Idle streams begin with a
+	//   default priority (Section 5.3.5).
+	MaxIdleNodesInTree int
+
+	// ThrottleOutOfOrderWrites enables write throttling to help ensure that
+	// data is delivered in priority order. This works around a race where
+	// stream B depends on stream A and both streams are about to call Write
+	// to queue DATA frames. If B wins the race, a naive scheduler would eagerly
+	// write as much data from B as possible, but this is suboptimal because A
+	// is a higher-priority stream. With throttling enabled, we write a small
+	// amount of data from B to minimize the amount of bandwidth that B can
+	// steal from A.
+	ThrottleOutOfOrderWrites bool
+}
+
+// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
+// frames by following HTTP/2 priorities as described in RFC 7340 Section 5.3.
+// If cfg is nil, default options are used.
+func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
+	if cfg == nil {
+		// For justification of these defaults, see:
+		// https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
+		cfg = &PriorityWriteSchedulerConfig{
+			MaxClosedNodesInTree:     10,
+			MaxIdleNodesInTree:       10,
+			ThrottleOutOfOrderWrites: false,
+		}
+	}
+
+	ws := &priorityWriteScheduler{
+		nodes:                make(map[uint32]*priorityNode),
+		maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
+		maxIdleNodesInTree:   cfg.MaxIdleNodesInTree,
+		enableWriteThrottle:  cfg.ThrottleOutOfOrderWrites,
+	}
+	ws.nodes[0] = &ws.root
+	if cfg.ThrottleOutOfOrderWrites {
+		ws.writeThrottleLimit = 1024
+	} else {
+		ws.writeThrottleLimit = math.MaxInt32
+	}
+	return ws
+}
+
+type priorityNodeState int
+
+const (
+	priorityNodeOpen priorityNodeState = iota
+	priorityNodeClosed
+	priorityNodeIdle
+)
+
+// priorityNode is a node in an HTTP/2 priority tree.
+// Each node is associated with a single stream ID.
+// See RFC 7540, Section 5.3.
+type priorityNode struct {
+	q            writeQueue        // queue of pending frames to write
+	id           uint32            // id of the stream, or 0 for the root of the tree
+	weight       uint8             // the actual weight is weight+1, so the value is in [1,256]
+	state        priorityNodeState // open | closed | idle
+	bytes        int64             // number of bytes written by this node, or 0 if closed
+	subtreeBytes int64             // sum(node.bytes) of all nodes in this subtree
+
+	// These links form the priority tree.
+	parent     *priorityNode
+	kids       *priorityNode // start of the kids list
+	prev, next *priorityNode // doubly-linked list of siblings
+}
+
+func (n *priorityNode) setParent(parent *priorityNode) {
+	if n == parent {
+		panic("setParent to self")
+	}
+	if n.parent == parent {
+		return
+	}
+	// Unlink from current parent.
+	if parent := n.parent; parent != nil {
+		if n.prev == nil {
+			parent.kids = n.next
+		} else {
+			n.prev.next = n.next
+		}
+		if n.next != nil {
+			n.next.prev = n.prev
+		}
+	}
+	// Link to new parent.
+	// If parent=nil, remove n from the tree.
+	// Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
+	n.parent = parent
+	if parent == nil {
+		n.next = nil
+		n.prev = nil
+	} else {
+		n.next = parent.kids
+		n.prev = nil
+		if n.next != nil {
+			n.next.prev = n
+		}
+		parent.kids = n
+	}
+}
+
+func (n *priorityNode) addBytes(b int64) {
+	n.bytes += b
+	for ; n != nil; n = n.parent {
+		n.subtreeBytes += b
+	}
+}
+
+// walkReadyInOrder iterates over the tree in priority order, calling f for each node
+// with a non-empty write queue. When f returns true, this funcion returns true and the
+// walk halts. tmp is used as scratch space for sorting.
+//
+// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
+// if any ancestor p of n is still open (ignoring the root node).
+func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
+	if !n.q.empty() && f(n, openParent) {
+		return true
+	}
+	if n.kids == nil {
+		return false
+	}
+
+	// Don't consider the root "open" when updating openParent since
+	// we can't send data frames on the root stream (only control frames).
+	if n.id != 0 {
+		openParent = openParent || (n.state == priorityNodeOpen)
+	}
+
+	// Common case: only one kid or all kids have the same weight.
+	// Some clients don't use weights; other clients (like web browsers)
+	// use mostly-linear priority trees.
+	w := n.kids.weight
+	needSort := false
+	for k := n.kids.next; k != nil; k = k.next {
+		if k.weight != w {
+			needSort = true
+			break
+		}
+	}
+	if !needSort {
+		for k := n.kids; k != nil; k = k.next {
+			if k.walkReadyInOrder(openParent, tmp, f) {
+				return true
+			}
+		}
+		return false
+	}
+
+	// Uncommon case: sort the child nodes. We remove the kids from the parent,
+	// then re-insert after sorting so we can reuse tmp for future sort calls.
+	*tmp = (*tmp)[:0]
+	for n.kids != nil {
+		*tmp = append(*tmp, n.kids)
+		n.kids.setParent(nil)
+	}
+	sort.Sort(sortPriorityNodeSiblings(*tmp))
+	for i := len(*tmp) - 1; i >= 0; i-- {
+		(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
+	}
+	for k := n.kids; k != nil; k = k.next {
+		if k.walkReadyInOrder(openParent, tmp, f) {
+			return true
+		}
+	}
+	return false
+}
+
+type sortPriorityNodeSiblings []*priorityNode
+
+func (z sortPriorityNodeSiblings) Len() int      { return len(z) }
+func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
+func (z sortPriorityNodeSiblings) Less(i, k int) bool {
+	// Prefer the subtree that has sent fewer bytes relative to its weight.
+	// See sections 5.3.2 and 5.3.4.
+	wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
+	wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
+	if bi == 0 && bk == 0 {
+		return wi >= wk
+	}
+	if bk == 0 {
+		return false
+	}
+	return bi/bk <= wi/wk
+}
+
+type priorityWriteScheduler struct {
+	// root is the root of the priority tree, where root.id = 0.
+	// The root queues control frames that are not associated with any stream.
+	root priorityNode
+
+	// nodes maps stream ids to priority tree nodes.
+	nodes map[uint32]*priorityNode
+
+	// maxID is the maximum stream id in nodes.
+	maxID uint32
+
+	// lists of nodes that have been closed or are idle, but are kept in
+	// the tree for improved prioritization. When the lengths exceed either
+	// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
+	closedNodes, idleNodes []*priorityNode
+
+	// From the config.
+	maxClosedNodesInTree int
+	maxIdleNodesInTree   int
+	writeThrottleLimit   int32
+	enableWriteThrottle  bool
+
+	// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
+	tmp []*priorityNode
+
+	// pool of empty queues for reuse.
+	queuePool writeQueuePool
+}
+
+func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+	// The stream may be currently idle but cannot be opened or closed.
+	if curr := ws.nodes[streamID]; curr != nil {
+		if curr.state != priorityNodeIdle {
+			panic(fmt.Sprintf("stream %d already opened", streamID))
+		}
+		curr.state = priorityNodeOpen
+		return
+	}
+
+	// RFC 7540, Section 5.3.5:
+	//  "All streams are initially assigned a non-exclusive dependency on stream 0x0.
+	//  Pushed streams initially depend on their associated stream. In both cases,
+	//  streams are assigned a default weight of 16."
+	parent := ws.nodes[options.PusherID]
+	if parent == nil {
+		parent = &ws.root
+	}
+	n := &priorityNode{
+		q:      *ws.queuePool.get(),
+		id:     streamID,
+		weight: priorityDefaultWeight,
+		state:  priorityNodeOpen,
+	}
+	n.setParent(parent)
+	ws.nodes[streamID] = n
+	if streamID > ws.maxID {
+		ws.maxID = streamID
+	}
+}
+
+func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
+	if streamID == 0 {
+		panic("violation of WriteScheduler interface: cannot close stream 0")
+	}
+	if ws.nodes[streamID] == nil {
+		panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
+	}
+	if ws.nodes[streamID].state != priorityNodeOpen {
+		panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
+	}
+
+	n := ws.nodes[streamID]
+	n.state = priorityNodeClosed
+	n.addBytes(-n.bytes)
+
+	q := n.q
+	ws.queuePool.put(&q)
+	n.q.s = nil
+	if ws.maxClosedNodesInTree > 0 {
+		ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
+	} else {
+		ws.removeNode(n)
+	}
+}
+
+func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
+	if streamID == 0 {
+		panic("adjustPriority on root")
+	}
+
+	// If streamID does not exist, there are two cases:
+	// - A closed stream that has been removed (this will have ID <= maxID)
+	// - An idle stream that is being used for "grouping" (this will have ID > maxID)
+	n := ws.nodes[streamID]
+	if n == nil {
+		if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
+			return
+		}
+		ws.maxID = streamID
+		n = &priorityNode{
+			q:      *ws.queuePool.get(),
+			id:     streamID,
+			weight: priorityDefaultWeight,
+			state:  priorityNodeIdle,
+		}
+		n.setParent(&ws.root)
+		ws.nodes[streamID] = n
+		ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
+	}
+
+	// Section 5.3.1: A dependency on a stream that is not currently in the tree
+	// results in that stream being given a default priority (Section 5.3.5).
+	parent := ws.nodes[priority.StreamDep]
+	if parent == nil {
+		n.setParent(&ws.root)
+		n.weight = priorityDefaultWeight
+		return
+	}
+
+	// Ignore if the client tries to make a node its own parent.
+	if n == parent {
+		return
+	}
+
+	// Section 5.3.3:
+	//   "If a stream is made dependent on one of its own dependencies, the
+	//   formerly dependent stream is first moved to be dependent on the
+	//   reprioritized stream's previous parent. The moved dependency retains
+	//   its weight."
+	//
+	// That is: if parent depends on n, move parent to depend on n.parent.
+	for x := parent.parent; x != nil; x = x.parent {
+		if x == n {
+			parent.setParent(n.parent)
+			break
+		}
+	}
+
+	// Section 5.3.3: The exclusive flag causes the stream to become the sole
+	// dependency of its parent stream, causing other dependencies to become
+	// dependent on the exclusive stream.
+	if priority.Exclusive {
+		k := parent.kids
+		for k != nil {
+			next := k.next
+			if k != n {
+				k.setParent(n)
+			}
+			k = next
+		}
+	}
+
+	n.setParent(parent)
+	n.weight = priority.Weight
+}
+
+func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
+	var n *priorityNode
+	if id := wr.StreamID(); id == 0 {
+		n = &ws.root
+	} else {
+		n = ws.nodes[id]
+		if n == nil {
+			panic("add on non-open stream")
+		}
+	}
+	n.q.push(wr)
+}
+
+func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
+	ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
+		limit := int32(math.MaxInt32)
+		if openParent {
+			limit = ws.writeThrottleLimit
+		}
+		wr, ok = n.q.consume(limit)
+		if !ok {
+			return false
+		}
+		n.addBytes(int64(wr.DataSize()))
+		// If B depends on A and B continuously has data available but A
+		// does not, gradually increase the throttling limit to allow B to
+		// steal more and more bandwidth from A.
+		if openParent {
+			ws.writeThrottleLimit += 1024
+			if ws.writeThrottleLimit < 0 {
+				ws.writeThrottleLimit = math.MaxInt32
+			}
+		} else if ws.enableWriteThrottle {
+			ws.writeThrottleLimit = 1024
+		}
+		return true
+	})
+	return wr, ok
+}
+
+func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
+	if maxSize == 0 {
+		return
+	}
+	if len(*list) == maxSize {
+		// Remove the oldest node, then shift left.
+		ws.removeNode((*list)[0])
+		x := (*list)[1:]
+		copy(*list, x)
+		*list = (*list)[:len(x)]
+	}
+	*list = append(*list, n)
+}
+
+func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
+	for k := n.kids; k != nil; k = k.next {
+		k.setParent(n.parent)
+	}
+	n.setParent(nil)
+	delete(ws.nodes, n.id)
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go
new file mode 100644
index 0000000000000000000000000000000000000000..36d7919f16ac1b51831ea4e798f511b758f533ef
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_random.go
@@ -0,0 +1,72 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "math"
+
+// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2
+// priorities. Control frames like SETTINGS and PING are written before DATA
+// frames, but if no control frames are queued and multiple streams have queued
+// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.
+func NewRandomWriteScheduler() WriteScheduler {
+	return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)}
+}
+
+type randomWriteScheduler struct {
+	// zero are frames not associated with a specific stream.
+	zero writeQueue
+
+	// sq contains the stream-specific queues, keyed by stream ID.
+	// When a stream is idle or closed, it's deleted from the map.
+	sq map[uint32]*writeQueue
+
+	// pool of empty queues for reuse.
+	queuePool writeQueuePool
+}
+
+func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+	// no-op: idle streams are not tracked
+}
+
+func (ws *randomWriteScheduler) CloseStream(streamID uint32) {
+	q, ok := ws.sq[streamID]
+	if !ok {
+		return
+	}
+	delete(ws.sq, streamID)
+	ws.queuePool.put(q)
+}
+
+func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
+	// no-op: priorities are ignored
+}
+
+func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
+	id := wr.StreamID()
+	if id == 0 {
+		ws.zero.push(wr)
+		return
+	}
+	q, ok := ws.sq[id]
+	if !ok {
+		q = ws.queuePool.get()
+		ws.sq[id] = q
+	}
+	q.push(wr)
+}
+
+func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
+	// Control frames first.
+	if !ws.zero.empty() {
+		return ws.zero.shift(), true
+	}
+	// Iterate over all non-idle streams until finding one that can be consumed.
+	for _, q := range ws.sq {
+		if wr, ok := q.consume(math.MaxInt32); ok {
+			return wr, true
+		}
+	}
+	return FrameWriteRequest{}, false
+}
diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go
new file mode 100644
index 0000000000000000000000000000000000000000..56332692c4214d6f307815fc5db3efd05469a6c0
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go
@@ -0,0 +1,20 @@
+// Copyright 2016 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build gccgo,linux,sparc64
+
+package unix
+
+import "syscall"
+
+//extern sysconf
+func realSysconf(name int) int64
+
+func sysconf(name int) (n int64, err syscall.Errno) {
+	r := realSysconf(name)
+	if r < 0 {
+		return 0, syscall.GetErrno()
+	}
+	return r, 0
+}
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
index 3e224c57e2ab4660f844b94523fec24a6283f944..2a1473f1610c32235059e0566f9b0fdd36168345 100755
--- a/vendor/golang.org/x/sys/unix/mkall.sh
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -223,6 +223,13 @@ linux_s390x)
 	# package generates its version of the types file.
 	mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
 	;;
+linux_sparc64)
+	GOOSARCH_in=syscall_linux_sparc64.go
+	unistd_h=/usr/include/sparc64-linux-gnu/asm/unistd.h
+	mkerrors="$mkerrors -m64"
+	mksysnum="./mksysnum_linux.pl $unistd_h"
+	mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+	;;
 netbsd_386)
 	mkerrors="$mkerrors -m32"
 	mksyscall="./mksyscall.pl -l32 -netbsd"
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index c40d788c4ab3f5b6327bb21e11a73c1a57a64a19..33b7922bdd1857b0763a76890c18cd46105d7f89 100755
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -127,6 +127,7 @@ includes_Linux='
 #include <linux/sched.h>
 #include <linux/wait.h>
 #include <linux/icmpv6.h>
+#include <linux/serial.h>
 #include <net/route.h>
 #include <asm/termbits.h>
 
@@ -141,6 +142,12 @@ includes_Linux='
 #ifndef PTRACE_SETREGS
 #define PTRACE_SETREGS	0xd
 #endif
+
+#ifdef SOL_BLUETOOTH
+// SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h
+// but it is already in bluetooth_linux.go
+#undef SOL_BLUETOOTH
+#endif
 '
 
 includes_NetBSD='
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 6d10c9cffab591e6bfd905b5478f215d7627542d..cfac4a44094187f6616f291046d3d18e7579aeff 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -69,10 +69,10 @@ func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error
 	return ppoll(&fds[0], len(fds), timeout, sigmask)
 }
 
-//sys	readlinkat(dirfd int, path string, buf []byte) (n int, err error)
+//sys	Readlinkat(dirfd int, path string, buf []byte) (n int, err error)
 
 func Readlink(path string, buf []byte) (n int, err error) {
-	return readlinkat(AT_FDCWD, path, buf)
+	return Readlinkat(AT_FDCWD, path, buf)
 }
 
 func Rename(oldpath string, newpath string) (err error) {
@@ -80,24 +80,20 @@ func Rename(oldpath string, newpath string) (err error) {
 }
 
 func Rmdir(path string) error {
-	return unlinkat(AT_FDCWD, path, AT_REMOVEDIR)
+	return Unlinkat(AT_FDCWD, path, AT_REMOVEDIR)
 }
 
-//sys	symlinkat(oldpath string, newdirfd int, newpath string) (err error)
+//sys	Symlinkat(oldpath string, newdirfd int, newpath string) (err error)
 
 func Symlink(oldpath string, newpath string) (err error) {
-	return symlinkat(oldpath, AT_FDCWD, newpath)
+	return Symlinkat(oldpath, AT_FDCWD, newpath)
 }
 
 func Unlink(path string) error {
-	return unlinkat(AT_FDCWD, path, 0)
+	return Unlinkat(AT_FDCWD, path, 0)
 }
 
-//sys	unlinkat(dirfd int, path string, flags int) (err error)
-
-func Unlinkat(dirfd int, path string, flags int) error {
-	return unlinkat(dirfd, path, flags)
-}
+//sys	Unlinkat(dirfd int, path string, flags int) (err error)
 
 //sys	utimes(path string, times *[2]Timeval) (err error)
 
@@ -143,8 +139,7 @@ func UtimesNano(path string, ts []Timespec) error {
 	// in 2.6.22, Released, 8 July 2007) then fall back to utimes
 	var tv [2]Timeval
 	for i := 0; i < 2; i++ {
-		tv[i].Sec = ts[i].Sec
-		tv[i].Usec = ts[i].Nsec / 1000
+		tv[i] = NsecToTimeval(TimespecToNsec(ts[i]))
 	}
 	return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
 }
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
new file mode 100644
index 0000000000000000000000000000000000000000..20b7454d7703ffe79427836df731fa6d68c32ef9
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
@@ -0,0 +1,169 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build sparc64,linux
+
+package unix
+
+import (
+	"sync/atomic"
+	"syscall"
+)
+
+//sys	EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
+//sys	Dup2(oldfd int, newfd int) (err error)
+//sys	Fchown(fd int, uid int, gid int) (err error)
+//sys	Fstat(fd int, stat *Stat_t) (err error)
+//sys	Fstatfs(fd int, buf *Statfs_t) (err error)
+//sys	Ftruncate(fd int, length int64) (err error)
+//sysnb	Getegid() (egid int)
+//sysnb	Geteuid() (euid int)
+//sysnb	Getgid() (gid int)
+//sysnb	Getrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb	Getuid() (uid int)
+//sysnb	InotifyInit() (fd int, err error)
+//sys	Lchown(path string, uid int, gid int) (err error)
+//sys	Listen(s int, n int) (err error)
+//sys	Lstat(path string, stat *Stat_t) (err error)
+//sys	Pause() (err error)
+//sys	Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys	Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys	Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
+//sys	Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
+//sys	sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
+//sys	Setfsgid(gid int) (err error)
+//sys	Setfsuid(uid int) (err error)
+//sysnb	Setregid(rgid int, egid int) (err error)
+//sysnb	Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb	Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb	Setrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb	Setreuid(ruid int, euid int) (err error)
+//sys	Shutdown(fd int, how int) (err error)
+//sys	Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
+//sys	Stat(path string, stat *Stat_t) (err error)
+//sys	Statfs(path string, buf *Statfs_t) (err error)
+//sys	SyncFileRange(fd int, off int64, n int64, flags int) (err error)
+//sys	Truncate(path string, length int64) (err error)
+//sys	accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
+//sys	accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
+//sys	bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys	connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb	getgroups(n int, list *_Gid_t) (nn int, err error)
+//sysnb	setgroups(n int, list *_Gid_t) (err error)
+//sys	getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys	setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sysnb	socket(domain int, typ int, proto int) (fd int, err error)
+//sysnb	socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
+//sysnb	getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sysnb	getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys	recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys	sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sys	recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys	sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys	mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
+
+func sysconf(name int) (n int64, err syscall.Errno)
+
+// pageSize caches the value of Getpagesize, since it can't change
+// once the system is booted.
+var pageSize int64 // accessed atomically
+
+func Getpagesize() int {
+	n := atomic.LoadInt64(&pageSize)
+	if n == 0 {
+		n, _ = sysconf(_SC_PAGESIZE)
+		atomic.StoreInt64(&pageSize, n)
+	}
+	return int(n)
+}
+
+func Ioperm(from int, num int, on int) (err error) {
+	return ENOSYS
+}
+
+func Iopl(level int) (err error) {
+	return ENOSYS
+}
+
+//sysnb	Gettimeofday(tv *Timeval) (err error)
+
+func Time(t *Time_t) (tt Time_t, err error) {
+	var tv Timeval
+	err = Gettimeofday(&tv)
+	if err != nil {
+		return 0, err
+	}
+	if t != nil {
+		*t = Time_t(tv.Sec)
+	}
+	return Time_t(tv.Sec), nil
+}
+
+//sys	Utime(path string, buf *Utimbuf) (err error)
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+	ts.Sec = nsec / 1e9
+	ts.Nsec = nsec % 1e9
+	return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+	nsec += 999 // round up to microsecond
+	tv.Sec = nsec / 1e9
+	tv.Usec = int32(nsec % 1e9 / 1e3)
+	return
+}
+
+func (r *PtraceRegs) PC() uint64 { return r.Tpc }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Tpc = pc }
+
+func (iov *Iovec) SetLen(length int) {
+	iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+	msghdr.Controllen = uint64(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+	cmsg.Len = uint64(length)
+}
+
+//sysnb pipe(p *[2]_C_int) (err error)
+
+func Pipe(p []int) (err error) {
+	if len(p) != 2 {
+		return EINVAL
+	}
+	var pp [2]_C_int
+	err = pipe(&pp)
+	p[0] = int(pp[0])
+	p[1] = int(pp[1])
+	return
+}
+
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
+func Pipe2(p []int, flags int) (err error) {
+	if len(p) != 2 {
+		return EINVAL
+	}
+	var pp [2]_C_int
+	err = pipe2(&pp, flags)
+	p[0] = int(pp[0])
+	p[1] = int(pp[1])
+	return
+}
+
+//sys	poll(fds *PollFd, nfds int, timeout int) (n int, err error)
+
+func Poll(fds []PollFd, timeout int) (n int, err error) {
+	if len(fds) == 0 {
+		return poll(nil, 0, timeout)
+	}
+	return poll(&fds[0], len(fds), timeout)
+}
diff --git a/vendor/golang.org/x/sys/unix/types_linux.go b/vendor/golang.org/x/sys/unix/types_linux.go
index 7dea79a8effadc30d7f453990c3d4120e54723f8..de80e2c8c0c49bd90a70071d11b9cfaf226c6616 100644
--- a/vendor/golang.org/x/sys/unix/types_linux.go
+++ b/vendor/golang.org/x/sys/unix/types_linux.go
@@ -105,6 +105,9 @@ typedef struct pt_regs PtraceRegs;
 typedef struct user PtraceRegs;
 #elif defined(__s390x__)
 typedef struct _user_regs_struct PtraceRegs;
+#elif defined(__sparc__)
+#include <asm/ptrace.h>
+typedef struct pt_regs PtraceRegs;
 #else
 typedef struct user_regs_struct PtraceRegs;
 #endif
@@ -126,7 +129,7 @@ struct my_epoll_event {
 	// padding is not specified in linux/eventpoll.h but added to conform to the
 	// alignment requirements of EABI
 	int32_t padFd;
-#elif defined(__powerpc64__) || defined(__s390x__)
+#elif defined(__powerpc64__) || defined(__s390x__) || defined(__sparc__)
 	int32_t _padFd;
 #endif
 	int32_t fd;
@@ -445,6 +448,10 @@ const (
 
 type Sigset_t C.sigset_t
 
+// sysconf information
+
+const _SC_PAGESIZE = C._SC_PAGESIZE
+
 // Terminal handling
 
 type Termios C.termios_t
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
new file mode 100644
index 0000000000000000000000000000000000000000..766d1e6128b6c11a9cd9abab9814af5335410c21
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -0,0 +1,2077 @@
+// mkerrors.sh -m64
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+// +build sparc64,linux
+
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs -- -m64 _const.go
+
+package unix
+
+import "syscall"
+
+const (
+	AF_ALG                           = 0x26
+	AF_APPLETALK                     = 0x5
+	AF_ASH                           = 0x12
+	AF_ATMPVC                        = 0x8
+	AF_ATMSVC                        = 0x14
+	AF_AX25                          = 0x3
+	AF_BLUETOOTH                     = 0x1f
+	AF_BRIDGE                        = 0x7
+	AF_CAIF                          = 0x25
+	AF_CAN                           = 0x1d
+	AF_DECnet                        = 0xc
+	AF_ECONET                        = 0x13
+	AF_FILE                          = 0x1
+	AF_IB                            = 0x1b
+	AF_IEEE802154                    = 0x24
+	AF_INET                          = 0x2
+	AF_INET6                         = 0xa
+	AF_IPX                           = 0x4
+	AF_IRDA                          = 0x17
+	AF_ISDN                          = 0x22
+	AF_IUCV                          = 0x20
+	AF_KCM                           = 0x29
+	AF_KEY                           = 0xf
+	AF_LLC                           = 0x1a
+	AF_LOCAL                         = 0x1
+	AF_MAX                           = 0x2a
+	AF_MPLS                          = 0x1c
+	AF_NETBEUI                       = 0xd
+	AF_NETLINK                       = 0x10
+	AF_NETROM                        = 0x6
+	AF_NFC                           = 0x27
+	AF_PACKET                        = 0x11
+	AF_PHONET                        = 0x23
+	AF_PPPOX                         = 0x18
+	AF_RDS                           = 0x15
+	AF_ROSE                          = 0xb
+	AF_ROUTE                         = 0x10
+	AF_RXRPC                         = 0x21
+	AF_SECURITY                      = 0xe
+	AF_SNA                           = 0x16
+	AF_TIPC                          = 0x1e
+	AF_UNIX                          = 0x1
+	AF_UNSPEC                        = 0x0
+	AF_VSOCK                         = 0x28
+	AF_WANPIPE                       = 0x19
+	AF_X25                           = 0x9
+	ARPHRD_6LOWPAN                   = 0x339
+	ARPHRD_ADAPT                     = 0x108
+	ARPHRD_APPLETLK                  = 0x8
+	ARPHRD_ARCNET                    = 0x7
+	ARPHRD_ASH                       = 0x30d
+	ARPHRD_ATM                       = 0x13
+	ARPHRD_AX25                      = 0x3
+	ARPHRD_BIF                       = 0x307
+	ARPHRD_CAIF                      = 0x336
+	ARPHRD_CAN                       = 0x118
+	ARPHRD_CHAOS                     = 0x5
+	ARPHRD_CISCO                     = 0x201
+	ARPHRD_CSLIP                     = 0x101
+	ARPHRD_CSLIP6                    = 0x103
+	ARPHRD_DDCMP                     = 0x205
+	ARPHRD_DLCI                      = 0xf
+	ARPHRD_ECONET                    = 0x30e
+	ARPHRD_EETHER                    = 0x2
+	ARPHRD_ETHER                     = 0x1
+	ARPHRD_EUI64                     = 0x1b
+	ARPHRD_FCAL                      = 0x311
+	ARPHRD_FCFABRIC                  = 0x313
+	ARPHRD_FCPL                      = 0x312
+	ARPHRD_FCPP                      = 0x310
+	ARPHRD_FDDI                      = 0x306
+	ARPHRD_FRAD                      = 0x302
+	ARPHRD_HDLC                      = 0x201
+	ARPHRD_HIPPI                     = 0x30c
+	ARPHRD_HWX25                     = 0x110
+	ARPHRD_IEEE1394                  = 0x18
+	ARPHRD_IEEE802                   = 0x6
+	ARPHRD_IEEE80211                 = 0x321
+	ARPHRD_IEEE80211_PRISM           = 0x322
+	ARPHRD_IEEE80211_RADIOTAP        = 0x323
+	ARPHRD_IEEE802154                = 0x324
+	ARPHRD_IEEE802154_MONITOR        = 0x325
+	ARPHRD_IEEE802_TR                = 0x320
+	ARPHRD_INFINIBAND                = 0x20
+	ARPHRD_IP6GRE                    = 0x337
+	ARPHRD_IPDDP                     = 0x309
+	ARPHRD_IPGRE                     = 0x30a
+	ARPHRD_IRDA                      = 0x30f
+	ARPHRD_LAPB                      = 0x204
+	ARPHRD_LOCALTLK                  = 0x305
+	ARPHRD_LOOPBACK                  = 0x304
+	ARPHRD_METRICOM                  = 0x17
+	ARPHRD_NETLINK                   = 0x338
+	ARPHRD_NETROM                    = 0x0
+	ARPHRD_NONE                      = 0xfffe
+	ARPHRD_PHONET                    = 0x334
+	ARPHRD_PHONET_PIPE               = 0x335
+	ARPHRD_PIMREG                    = 0x30b
+	ARPHRD_PPP                       = 0x200
+	ARPHRD_PRONET                    = 0x4
+	ARPHRD_RAWHDLC                   = 0x206
+	ARPHRD_ROSE                      = 0x10e
+	ARPHRD_RSRVD                     = 0x104
+	ARPHRD_SIT                       = 0x308
+	ARPHRD_SKIP                      = 0x303
+	ARPHRD_SLIP                      = 0x100
+	ARPHRD_SLIP6                     = 0x102
+	ARPHRD_TUNNEL                    = 0x300
+	ARPHRD_TUNNEL6                   = 0x301
+	ARPHRD_VOID                      = 0xffff
+	ARPHRD_X25                       = 0x10f
+	ASI_LEON_DFLUSH                  = 0x11
+	ASI_LEON_IFLUSH                  = 0x10
+	ASI_LEON_MMUFLUSH                = 0x18
+	B0                               = 0x0
+	B1000000                         = 0x100c
+	B110                             = 0x3
+	B115200                          = 0x1002
+	B1152000                         = 0x100d
+	B1200                            = 0x9
+	B134                             = 0x4
+	B150                             = 0x5
+	B1500000                         = 0x100e
+	B153600                          = 0x1006
+	B1800                            = 0xa
+	B19200                           = 0xe
+	B200                             = 0x6
+	B2000000                         = 0x100f
+	B230400                          = 0x1003
+	B2400                            = 0xb
+	B300                             = 0x7
+	B307200                          = 0x1007
+	B38400                           = 0xf
+	B460800                          = 0x1004
+	B4800                            = 0xc
+	B50                              = 0x1
+	B500000                          = 0x100a
+	B57600                           = 0x1001
+	B576000                          = 0x100b
+	B600                             = 0x8
+	B614400                          = 0x1008
+	B75                              = 0x2
+	B76800                           = 0x1005
+	B921600                          = 0x1009
+	B9600                            = 0xd
+	BOTHER                           = 0x1000
+	BPF_A                            = 0x10
+	BPF_ABS                          = 0x20
+	BPF_ADD                          = 0x0
+	BPF_ALU                          = 0x4
+	BPF_AND                          = 0x50
+	BPF_B                            = 0x10
+	BPF_DIV                          = 0x30
+	BPF_H                            = 0x8
+	BPF_IMM                          = 0x0
+	BPF_IND                          = 0x40
+	BPF_JA                           = 0x0
+	BPF_JEQ                          = 0x10
+	BPF_JGE                          = 0x30
+	BPF_JGT                          = 0x20
+	BPF_JMP                          = 0x5
+	BPF_JSET                         = 0x40
+	BPF_K                            = 0x0
+	BPF_LD                           = 0x0
+	BPF_LDX                          = 0x1
+	BPF_LEN                          = 0x80
+	BPF_LL_OFF                       = -0x200000
+	BPF_LSH                          = 0x60
+	BPF_MAJOR_VERSION                = 0x1
+	BPF_MAXINSNS                     = 0x1000
+	BPF_MEM                          = 0x60
+	BPF_MEMWORDS                     = 0x10
+	BPF_MINOR_VERSION                = 0x1
+	BPF_MISC                         = 0x7
+	BPF_MOD                          = 0x90
+	BPF_MSH                          = 0xa0
+	BPF_MUL                          = 0x20
+	BPF_NEG                          = 0x80
+	BPF_NET_OFF                      = -0x100000
+	BPF_OR                           = 0x40
+	BPF_RET                          = 0x6
+	BPF_RSH                          = 0x70
+	BPF_ST                           = 0x2
+	BPF_STX                          = 0x3
+	BPF_SUB                          = 0x10
+	BPF_TAX                          = 0x0
+	BPF_TXA                          = 0x80
+	BPF_W                            = 0x0
+	BPF_X                            = 0x8
+	BPF_XOR                          = 0xa0
+	BRKINT                           = 0x2
+	BS0                              = 0x0
+	BS1                              = 0x2000
+	BSDLY                            = 0x2000
+	CBAUD                            = 0x100f
+	CBAUDEX                          = 0x1000
+	CFLUSH                           = 0xf
+	CIBAUD                           = 0x100f0000
+	CLOCAL                           = 0x800
+	CLOCK_BOOTTIME                   = 0x7
+	CLOCK_BOOTTIME_ALARM             = 0x9
+	CLOCK_DEFAULT                    = 0x0
+	CLOCK_EXT                        = 0x1
+	CLOCK_INT                        = 0x2
+	CLOCK_MONOTONIC                  = 0x1
+	CLOCK_MONOTONIC_COARSE           = 0x6
+	CLOCK_MONOTONIC_RAW              = 0x4
+	CLOCK_PROCESS_CPUTIME_ID         = 0x2
+	CLOCK_REALTIME                   = 0x0
+	CLOCK_REALTIME_ALARM             = 0x8
+	CLOCK_REALTIME_COARSE            = 0x5
+	CLOCK_TAI                        = 0xb
+	CLOCK_THREAD_CPUTIME_ID          = 0x3
+	CLOCK_TXFROMRX                   = 0x4
+	CLOCK_TXINT                      = 0x3
+	CLONE_CHILD_CLEARTID             = 0x200000
+	CLONE_CHILD_SETTID               = 0x1000000
+	CLONE_DETACHED                   = 0x400000
+	CLONE_FILES                      = 0x400
+	CLONE_FS                         = 0x200
+	CLONE_IO                         = 0x80000000
+	CLONE_NEWCGROUP                  = 0x2000000
+	CLONE_NEWIPC                     = 0x8000000
+	CLONE_NEWNET                     = 0x40000000
+	CLONE_NEWNS                      = 0x20000
+	CLONE_NEWPID                     = 0x20000000
+	CLONE_NEWUSER                    = 0x10000000
+	CLONE_NEWUTS                     = 0x4000000
+	CLONE_PARENT                     = 0x8000
+	CLONE_PARENT_SETTID              = 0x100000
+	CLONE_PTRACE                     = 0x2000
+	CLONE_SETTLS                     = 0x80000
+	CLONE_SIGHAND                    = 0x800
+	CLONE_SYSVSEM                    = 0x40000
+	CLONE_THREAD                     = 0x10000
+	CLONE_UNTRACED                   = 0x800000
+	CLONE_VFORK                      = 0x4000
+	CLONE_VM                         = 0x100
+	CMSPAR                           = 0x40000000
+	CR0                              = 0x0
+	CR1                              = 0x200
+	CR2                              = 0x400
+	CR3                              = 0x600
+	CRDLY                            = 0x600
+	CREAD                            = 0x80
+	CRTSCTS                          = 0x80000000
+	CS5                              = 0x0
+	CS6                              = 0x10
+	CS7                              = 0x20
+	CS8                              = 0x30
+	CSIGNAL                          = 0xff
+	CSIZE                            = 0x30
+	CSTART                           = 0x11
+	CSTATUS                          = 0x0
+	CSTOP                            = 0x13
+	CSTOPB                           = 0x40
+	CSUSP                            = 0x1a
+	DT_BLK                           = 0x6
+	DT_CHR                           = 0x2
+	DT_DIR                           = 0x4
+	DT_FIFO                          = 0x1
+	DT_LNK                           = 0xa
+	DT_REG                           = 0x8
+	DT_SOCK                          = 0xc
+	DT_UNKNOWN                       = 0x0
+	DT_WHT                           = 0xe
+	ECHO                             = 0x8
+	ECHOCTL                          = 0x200
+	ECHOE                            = 0x10
+	ECHOK                            = 0x20
+	ECHOKE                           = 0x800
+	ECHONL                           = 0x40
+	ECHOPRT                          = 0x400
+	EMT_TAGOVF                       = 0x1
+	ENCODING_DEFAULT                 = 0x0
+	ENCODING_FM_MARK                 = 0x3
+	ENCODING_FM_SPACE                = 0x4
+	ENCODING_MANCHESTER              = 0x5
+	ENCODING_NRZ                     = 0x1
+	ENCODING_NRZI                    = 0x2
+	EPOLLERR                         = 0x8
+	EPOLLET                          = 0x80000000
+	EPOLLEXCLUSIVE                   = 0x10000000
+	EPOLLHUP                         = 0x10
+	EPOLLIN                          = 0x1
+	EPOLLMSG                         = 0x400
+	EPOLLONESHOT                     = 0x40000000
+	EPOLLOUT                         = 0x4
+	EPOLLPRI                         = 0x2
+	EPOLLRDBAND                      = 0x80
+	EPOLLRDHUP                       = 0x2000
+	EPOLLRDNORM                      = 0x40
+	EPOLLWAKEUP                      = 0x20000000
+	EPOLLWRBAND                      = 0x200
+	EPOLLWRNORM                      = 0x100
+	EPOLL_CLOEXEC                    = 0x400000
+	EPOLL_CTL_ADD                    = 0x1
+	EPOLL_CTL_DEL                    = 0x2
+	EPOLL_CTL_MOD                    = 0x3
+	ETH_P_1588                       = 0x88f7
+	ETH_P_8021AD                     = 0x88a8
+	ETH_P_8021AH                     = 0x88e7
+	ETH_P_8021Q                      = 0x8100
+	ETH_P_80221                      = 0x8917
+	ETH_P_802_2                      = 0x4
+	ETH_P_802_3                      = 0x1
+	ETH_P_802_3_MIN                  = 0x600
+	ETH_P_802_EX1                    = 0x88b5
+	ETH_P_AARP                       = 0x80f3
+	ETH_P_AF_IUCV                    = 0xfbfb
+	ETH_P_ALL                        = 0x3
+	ETH_P_AOE                        = 0x88a2
+	ETH_P_ARCNET                     = 0x1a
+	ETH_P_ARP                        = 0x806
+	ETH_P_ATALK                      = 0x809b
+	ETH_P_ATMFATE                    = 0x8884
+	ETH_P_ATMMPOA                    = 0x884c
+	ETH_P_AX25                       = 0x2
+	ETH_P_BATMAN                     = 0x4305
+	ETH_P_BPQ                        = 0x8ff
+	ETH_P_CAIF                       = 0xf7
+	ETH_P_CAN                        = 0xc
+	ETH_P_CANFD                      = 0xd
+	ETH_P_CONTROL                    = 0x16
+	ETH_P_CUST                       = 0x6006
+	ETH_P_DDCMP                      = 0x6
+	ETH_P_DEC                        = 0x6000
+	ETH_P_DIAG                       = 0x6005
+	ETH_P_DNA_DL                     = 0x6001
+	ETH_P_DNA_RC                     = 0x6002
+	ETH_P_DNA_RT                     = 0x6003
+	ETH_P_DSA                        = 0x1b
+	ETH_P_ECONET                     = 0x18
+	ETH_P_EDSA                       = 0xdada
+	ETH_P_FCOE                       = 0x8906
+	ETH_P_FIP                        = 0x8914
+	ETH_P_HDLC                       = 0x19
+	ETH_P_HSR                        = 0x892f
+	ETH_P_IEEE802154                 = 0xf6
+	ETH_P_IEEEPUP                    = 0xa00
+	ETH_P_IEEEPUPAT                  = 0xa01
+	ETH_P_IP                         = 0x800
+	ETH_P_IPV6                       = 0x86dd
+	ETH_P_IPX                        = 0x8137
+	ETH_P_IRDA                       = 0x17
+	ETH_P_LAT                        = 0x6004
+	ETH_P_LINK_CTL                   = 0x886c
+	ETH_P_LOCALTALK                  = 0x9
+	ETH_P_LOOP                       = 0x60
+	ETH_P_LOOPBACK                   = 0x9000
+	ETH_P_MACSEC                     = 0x88e5
+	ETH_P_MOBITEX                    = 0x15
+	ETH_P_MPLS_MC                    = 0x8848
+	ETH_P_MPLS_UC                    = 0x8847
+	ETH_P_MVRP                       = 0x88f5
+	ETH_P_PAE                        = 0x888e
+	ETH_P_PAUSE                      = 0x8808
+	ETH_P_PHONET                     = 0xf5
+	ETH_P_PPPTALK                    = 0x10
+	ETH_P_PPP_DISC                   = 0x8863
+	ETH_P_PPP_MP                     = 0x8
+	ETH_P_PPP_SES                    = 0x8864
+	ETH_P_PRP                        = 0x88fb
+	ETH_P_PUP                        = 0x200
+	ETH_P_PUPAT                      = 0x201
+	ETH_P_QINQ1                      = 0x9100
+	ETH_P_QINQ2                      = 0x9200
+	ETH_P_QINQ3                      = 0x9300
+	ETH_P_RARP                       = 0x8035
+	ETH_P_SCA                        = 0x6007
+	ETH_P_SLOW                       = 0x8809
+	ETH_P_SNAP                       = 0x5
+	ETH_P_TDLS                       = 0x890d
+	ETH_P_TEB                        = 0x6558
+	ETH_P_TIPC                       = 0x88ca
+	ETH_P_TRAILER                    = 0x1c
+	ETH_P_TR_802_2                   = 0x11
+	ETH_P_TSN                        = 0x22f0
+	ETH_P_WAN_PPP                    = 0x7
+	ETH_P_WCCP                       = 0x883e
+	ETH_P_X25                        = 0x805
+	ETH_P_XDSA                       = 0xf8
+	EXTA                             = 0xe
+	EXTB                             = 0xf
+	EXTPROC                          = 0x10000
+	FD_CLOEXEC                       = 0x1
+	FD_SETSIZE                       = 0x400
+	FF0                              = 0x0
+	FF1                              = 0x8000
+	FFDLY                            = 0x8000
+	FLUSHO                           = 0x2000
+	F_DUPFD                          = 0x0
+	F_DUPFD_CLOEXEC                  = 0x406
+	F_EXLCK                          = 0x4
+	F_GETFD                          = 0x1
+	F_GETFL                          = 0x3
+	F_GETLEASE                       = 0x401
+	F_GETLK                          = 0x7
+	F_GETLK64                        = 0x7
+	F_GETOWN                         = 0x5
+	F_GETOWN_EX                      = 0x10
+	F_GETPIPE_SZ                     = 0x408
+	F_GETSIG                         = 0xb
+	F_LOCK                           = 0x1
+	F_NOTIFY                         = 0x402
+	F_OFD_GETLK                      = 0x24
+	F_OFD_SETLK                      = 0x25
+	F_OFD_SETLKW                     = 0x26
+	F_OK                             = 0x0
+	F_RDLCK                          = 0x1
+	F_SETFD                          = 0x2
+	F_SETFL                          = 0x4
+	F_SETLEASE                       = 0x400
+	F_SETLK                          = 0x8
+	F_SETLK64                        = 0x8
+	F_SETLKW                         = 0x9
+	F_SETLKW64                       = 0x9
+	F_SETOWN                         = 0x6
+	F_SETOWN_EX                      = 0xf
+	F_SETPIPE_SZ                     = 0x407
+	F_SETSIG                         = 0xa
+	F_SHLCK                          = 0x8
+	F_TEST                           = 0x3
+	F_TLOCK                          = 0x2
+	F_ULOCK                          = 0x0
+	F_UNLCK                          = 0x3
+	F_WRLCK                          = 0x2
+	HUPCL                            = 0x400
+	IBSHIFT                          = 0x10
+	ICANON                           = 0x2
+	ICMPV6_FILTER                    = 0x1
+	ICRNL                            = 0x100
+	IEXTEN                           = 0x8000
+	IFA_F_DADFAILED                  = 0x8
+	IFA_F_DEPRECATED                 = 0x20
+	IFA_F_HOMEADDRESS                = 0x10
+	IFA_F_MANAGETEMPADDR             = 0x100
+	IFA_F_MCAUTOJOIN                 = 0x400
+	IFA_F_NODAD                      = 0x2
+	IFA_F_NOPREFIXROUTE              = 0x200
+	IFA_F_OPTIMISTIC                 = 0x4
+	IFA_F_PERMANENT                  = 0x80
+	IFA_F_SECONDARY                  = 0x1
+	IFA_F_STABLE_PRIVACY             = 0x800
+	IFA_F_TEMPORARY                  = 0x1
+	IFA_F_TENTATIVE                  = 0x40
+	IFA_MAX                          = 0x8
+	IFF_ALLMULTI                     = 0x200
+	IFF_ATTACH_QUEUE                 = 0x200
+	IFF_AUTOMEDIA                    = 0x4000
+	IFF_BROADCAST                    = 0x2
+	IFF_DEBUG                        = 0x4
+	IFF_DETACH_QUEUE                 = 0x400
+	IFF_DORMANT                      = 0x20000
+	IFF_DYNAMIC                      = 0x8000
+	IFF_ECHO                         = 0x40000
+	IFF_LOOPBACK                     = 0x8
+	IFF_LOWER_UP                     = 0x10000
+	IFF_MASTER                       = 0x400
+	IFF_MULTICAST                    = 0x1000
+	IFF_MULTI_QUEUE                  = 0x100
+	IFF_NOARP                        = 0x80
+	IFF_NOFILTER                     = 0x1000
+	IFF_NOTRAILERS                   = 0x20
+	IFF_NO_PI                        = 0x1000
+	IFF_ONE_QUEUE                    = 0x2000
+	IFF_PERSIST                      = 0x800
+	IFF_POINTOPOINT                  = 0x10
+	IFF_PORTSEL                      = 0x2000
+	IFF_PROMISC                      = 0x100
+	IFF_RUNNING                      = 0x40
+	IFF_SLAVE                        = 0x800
+	IFF_TAP                          = 0x2
+	IFF_TUN                          = 0x1
+	IFF_TUN_EXCL                     = 0x8000
+	IFF_UP                           = 0x1
+	IFF_VNET_HDR                     = 0x4000
+	IFF_VOLATILE                     = 0x70c5a
+	IFNAMSIZ                         = 0x10
+	IGNBRK                           = 0x1
+	IGNCR                            = 0x80
+	IGNPAR                           = 0x4
+	IMAXBEL                          = 0x2000
+	INLCR                            = 0x40
+	INPCK                            = 0x10
+	IN_ACCESS                        = 0x1
+	IN_ALL_EVENTS                    = 0xfff
+	IN_ATTRIB                        = 0x4
+	IN_CLASSA_HOST                   = 0xffffff
+	IN_CLASSA_MAX                    = 0x80
+	IN_CLASSA_NET                    = 0xff000000
+	IN_CLASSA_NSHIFT                 = 0x18
+	IN_CLASSB_HOST                   = 0xffff
+	IN_CLASSB_MAX                    = 0x10000
+	IN_CLASSB_NET                    = 0xffff0000
+	IN_CLASSB_NSHIFT                 = 0x10
+	IN_CLASSC_HOST                   = 0xff
+	IN_CLASSC_NET                    = 0xffffff00
+	IN_CLASSC_NSHIFT                 = 0x8
+	IN_CLOEXEC                       = 0x400000
+	IN_CLOSE                         = 0x18
+	IN_CLOSE_NOWRITE                 = 0x10
+	IN_CLOSE_WRITE                   = 0x8
+	IN_CREATE                        = 0x100
+	IN_DELETE                        = 0x200
+	IN_DELETE_SELF                   = 0x400
+	IN_DONT_FOLLOW                   = 0x2000000
+	IN_EXCL_UNLINK                   = 0x4000000
+	IN_IGNORED                       = 0x8000
+	IN_ISDIR                         = 0x40000000
+	IN_LOOPBACKNET                   = 0x7f
+	IN_MASK_ADD                      = 0x20000000
+	IN_MODIFY                        = 0x2
+	IN_MOVE                          = 0xc0
+	IN_MOVED_FROM                    = 0x40
+	IN_MOVED_TO                      = 0x80
+	IN_MOVE_SELF                     = 0x800
+	IN_NONBLOCK                      = 0x4000
+	IN_ONESHOT                       = 0x80000000
+	IN_ONLYDIR                       = 0x1000000
+	IN_OPEN                          = 0x20
+	IN_Q_OVERFLOW                    = 0x4000
+	IN_UNMOUNT                       = 0x2000
+	IPPROTO_AH                       = 0x33
+	IPPROTO_BEETPH                   = 0x5e
+	IPPROTO_COMP                     = 0x6c
+	IPPROTO_DCCP                     = 0x21
+	IPPROTO_DSTOPTS                  = 0x3c
+	IPPROTO_EGP                      = 0x8
+	IPPROTO_ENCAP                    = 0x62
+	IPPROTO_ESP                      = 0x32
+	IPPROTO_FRAGMENT                 = 0x2c
+	IPPROTO_GRE                      = 0x2f
+	IPPROTO_HOPOPTS                  = 0x0
+	IPPROTO_ICMP                     = 0x1
+	IPPROTO_ICMPV6                   = 0x3a
+	IPPROTO_IDP                      = 0x16
+	IPPROTO_IGMP                     = 0x2
+	IPPROTO_IP                       = 0x0
+	IPPROTO_IPIP                     = 0x4
+	IPPROTO_IPV6                     = 0x29
+	IPPROTO_MH                       = 0x87
+	IPPROTO_MPLS                     = 0x89
+	IPPROTO_MTP                      = 0x5c
+	IPPROTO_NONE                     = 0x3b
+	IPPROTO_PIM                      = 0x67
+	IPPROTO_PUP                      = 0xc
+	IPPROTO_RAW                      = 0xff
+	IPPROTO_ROUTING                  = 0x2b
+	IPPROTO_RSVP                     = 0x2e
+	IPPROTO_SCTP                     = 0x84
+	IPPROTO_TCP                      = 0x6
+	IPPROTO_TP                       = 0x1d
+	IPPROTO_UDP                      = 0x11
+	IPPROTO_UDPLITE                  = 0x88
+	IPV6_2292DSTOPTS                 = 0x4
+	IPV6_2292HOPLIMIT                = 0x8
+	IPV6_2292HOPOPTS                 = 0x3
+	IPV6_2292PKTINFO                 = 0x2
+	IPV6_2292PKTOPTIONS              = 0x6
+	IPV6_2292RTHDR                   = 0x5
+	IPV6_ADDRFORM                    = 0x1
+	IPV6_ADD_MEMBERSHIP              = 0x14
+	IPV6_AUTHHDR                     = 0xa
+	IPV6_CHECKSUM                    = 0x7
+	IPV6_DONTFRAG                    = 0x3e
+	IPV6_DROP_MEMBERSHIP             = 0x15
+	IPV6_DSTOPTS                     = 0x3b
+	IPV6_HDRINCL                     = 0x24
+	IPV6_HOPLIMIT                    = 0x34
+	IPV6_HOPOPTS                     = 0x36
+	IPV6_IPSEC_POLICY                = 0x22
+	IPV6_JOIN_ANYCAST                = 0x1b
+	IPV6_JOIN_GROUP                  = 0x14
+	IPV6_LEAVE_ANYCAST               = 0x1c
+	IPV6_LEAVE_GROUP                 = 0x15
+	IPV6_MTU                         = 0x18
+	IPV6_MTU_DISCOVER                = 0x17
+	IPV6_MULTICAST_HOPS              = 0x12
+	IPV6_MULTICAST_IF                = 0x11
+	IPV6_MULTICAST_LOOP              = 0x13
+	IPV6_NEXTHOP                     = 0x9
+	IPV6_PATHMTU                     = 0x3d
+	IPV6_PKTINFO                     = 0x32
+	IPV6_PMTUDISC_DO                 = 0x2
+	IPV6_PMTUDISC_DONT               = 0x0
+	IPV6_PMTUDISC_INTERFACE          = 0x4
+	IPV6_PMTUDISC_OMIT               = 0x5
+	IPV6_PMTUDISC_PROBE              = 0x3
+	IPV6_PMTUDISC_WANT               = 0x1
+	IPV6_RECVDSTOPTS                 = 0x3a
+	IPV6_RECVERR                     = 0x19
+	IPV6_RECVHOPLIMIT                = 0x33
+	IPV6_RECVHOPOPTS                 = 0x35
+	IPV6_RECVPATHMTU                 = 0x3c
+	IPV6_RECVPKTINFO                 = 0x31
+	IPV6_RECVRTHDR                   = 0x38
+	IPV6_RECVTCLASS                  = 0x42
+	IPV6_ROUTER_ALERT                = 0x16
+	IPV6_RTHDR                       = 0x39
+	IPV6_RTHDRDSTOPTS                = 0x37
+	IPV6_RTHDR_LOOSE                 = 0x0
+	IPV6_RTHDR_STRICT                = 0x1
+	IPV6_RTHDR_TYPE_0                = 0x0
+	IPV6_RXDSTOPTS                   = 0x3b
+	IPV6_RXHOPOPTS                   = 0x36
+	IPV6_TCLASS                      = 0x43
+	IPV6_UNICAST_HOPS                = 0x10
+	IPV6_V6ONLY                      = 0x1a
+	IPV6_XFRM_POLICY                 = 0x23
+	IP_ADD_MEMBERSHIP                = 0x23
+	IP_ADD_SOURCE_MEMBERSHIP         = 0x27
+	IP_BIND_ADDRESS_NO_PORT          = 0x18
+	IP_BLOCK_SOURCE                  = 0x26
+	IP_CHECKSUM                      = 0x17
+	IP_DEFAULT_MULTICAST_LOOP        = 0x1
+	IP_DEFAULT_MULTICAST_TTL         = 0x1
+	IP_DF                            = 0x4000
+	IP_DROP_MEMBERSHIP               = 0x24
+	IP_DROP_SOURCE_MEMBERSHIP        = 0x28
+	IP_FREEBIND                      = 0xf
+	IP_HDRINCL                       = 0x3
+	IP_IPSEC_POLICY                  = 0x10
+	IP_MAXPACKET                     = 0xffff
+	IP_MAX_MEMBERSHIPS               = 0x14
+	IP_MF                            = 0x2000
+	IP_MINTTL                        = 0x15
+	IP_MSFILTER                      = 0x29
+	IP_MSS                           = 0x240
+	IP_MTU                           = 0xe
+	IP_MTU_DISCOVER                  = 0xa
+	IP_MULTICAST_ALL                 = 0x31
+	IP_MULTICAST_IF                  = 0x20
+	IP_MULTICAST_LOOP                = 0x22
+	IP_MULTICAST_TTL                 = 0x21
+	IP_NODEFRAG                      = 0x16
+	IP_OFFMASK                       = 0x1fff
+	IP_OPTIONS                       = 0x4
+	IP_ORIGDSTADDR                   = 0x14
+	IP_PASSSEC                       = 0x12
+	IP_PKTINFO                       = 0x8
+	IP_PKTOPTIONS                    = 0x9
+	IP_PMTUDISC                      = 0xa
+	IP_PMTUDISC_DO                   = 0x2
+	IP_PMTUDISC_DONT                 = 0x0
+	IP_PMTUDISC_INTERFACE            = 0x4
+	IP_PMTUDISC_OMIT                 = 0x5
+	IP_PMTUDISC_PROBE                = 0x3
+	IP_PMTUDISC_WANT                 = 0x1
+	IP_RECVERR                       = 0xb
+	IP_RECVOPTS                      = 0x6
+	IP_RECVORIGDSTADDR               = 0x14
+	IP_RECVRETOPTS                   = 0x7
+	IP_RECVTOS                       = 0xd
+	IP_RECVTTL                       = 0xc
+	IP_RETOPTS                       = 0x7
+	IP_RF                            = 0x8000
+	IP_ROUTER_ALERT                  = 0x5
+	IP_TOS                           = 0x1
+	IP_TRANSPARENT                   = 0x13
+	IP_TTL                           = 0x2
+	IP_UNBLOCK_SOURCE                = 0x25
+	IP_UNICAST_IF                    = 0x32
+	IP_XFRM_POLICY                   = 0x11
+	ISIG                             = 0x1
+	ISTRIP                           = 0x20
+	IUCLC                            = 0x200
+	IUTF8                            = 0x4000
+	IXANY                            = 0x800
+	IXOFF                            = 0x1000
+	IXON                             = 0x400
+	LINUX_REBOOT_CMD_CAD_OFF         = 0x0
+	LINUX_REBOOT_CMD_CAD_ON          = 0x89abcdef
+	LINUX_REBOOT_CMD_HALT            = 0xcdef0123
+	LINUX_REBOOT_CMD_KEXEC           = 0x45584543
+	LINUX_REBOOT_CMD_POWER_OFF       = 0x4321fedc
+	LINUX_REBOOT_CMD_RESTART         = 0x1234567
+	LINUX_REBOOT_CMD_RESTART2        = 0xa1b2c3d4
+	LINUX_REBOOT_CMD_SW_SUSPEND      = 0xd000fce2
+	LINUX_REBOOT_MAGIC1              = 0xfee1dead
+	LINUX_REBOOT_MAGIC2              = 0x28121969
+	LOCK_EX                          = 0x2
+	LOCK_NB                          = 0x4
+	LOCK_SH                          = 0x1
+	LOCK_UN                          = 0x8
+	MADV_DODUMP                      = 0x11
+	MADV_DOFORK                      = 0xb
+	MADV_DONTDUMP                    = 0x10
+	MADV_DONTFORK                    = 0xa
+	MADV_DONTNEED                    = 0x4
+	MADV_FREE                        = 0x8
+	MADV_HUGEPAGE                    = 0xe
+	MADV_HWPOISON                    = 0x64
+	MADV_MERGEABLE                   = 0xc
+	MADV_NOHUGEPAGE                  = 0xf
+	MADV_NORMAL                      = 0x0
+	MADV_RANDOM                      = 0x1
+	MADV_REMOVE                      = 0x9
+	MADV_SEQUENTIAL                  = 0x2
+	MADV_UNMERGEABLE                 = 0xd
+	MADV_WILLNEED                    = 0x3
+	MAP_ANON                         = 0x20
+	MAP_ANONYMOUS                    = 0x20
+	MAP_DENYWRITE                    = 0x800
+	MAP_EXECUTABLE                   = 0x1000
+	MAP_FILE                         = 0x0
+	MAP_FIXED                        = 0x10
+	MAP_GROWSDOWN                    = 0x200
+	MAP_HUGETLB                      = 0x40000
+	MAP_HUGE_MASK                    = 0x3f
+	MAP_HUGE_SHIFT                   = 0x1a
+	MAP_LOCKED                       = 0x100
+	MAP_NONBLOCK                     = 0x10000
+	MAP_NORESERVE                    = 0x40
+	MAP_POPULATE                     = 0x8000
+	MAP_PRIVATE                      = 0x2
+	MAP_RENAME                       = 0x20
+	MAP_SHARED                       = 0x1
+	MAP_STACK                        = 0x20000
+	MAP_TYPE                         = 0xf
+	MCL_CURRENT                      = 0x2000
+	MCL_FUTURE                       = 0x4000
+	MCL_ONFAULT                      = 0x8000
+	MNT_DETACH                       = 0x2
+	MNT_EXPIRE                       = 0x4
+	MNT_FORCE                        = 0x1
+	MSG_BATCH                        = 0x40000
+	MSG_CMSG_CLOEXEC                 = 0x40000000
+	MSG_CONFIRM                      = 0x800
+	MSG_CTRUNC                       = 0x8
+	MSG_DONTROUTE                    = 0x4
+	MSG_DONTWAIT                     = 0x40
+	MSG_EOR                          = 0x80
+	MSG_ERRQUEUE                     = 0x2000
+	MSG_FASTOPEN                     = 0x20000000
+	MSG_FIN                          = 0x200
+	MSG_MORE                         = 0x8000
+	MSG_NOSIGNAL                     = 0x4000
+	MSG_OOB                          = 0x1
+	MSG_PEEK                         = 0x2
+	MSG_PROXY                        = 0x10
+	MSG_RST                          = 0x1000
+	MSG_SYN                          = 0x400
+	MSG_TRUNC                        = 0x20
+	MSG_TRYHARD                      = 0x4
+	MSG_WAITALL                      = 0x100
+	MSG_WAITFORONE                   = 0x10000
+	MS_ACTIVE                        = 0x40000000
+	MS_ASYNC                         = 0x1
+	MS_BIND                          = 0x1000
+	MS_DIRSYNC                       = 0x80
+	MS_INVALIDATE                    = 0x2
+	MS_I_VERSION                     = 0x800000
+	MS_KERNMOUNT                     = 0x400000
+	MS_LAZYTIME                      = 0x2000000
+	MS_MANDLOCK                      = 0x40
+	MS_MGC_MSK                       = 0xffff0000
+	MS_MGC_VAL                       = 0xc0ed0000
+	MS_MOVE                          = 0x2000
+	MS_NOATIME                       = 0x400
+	MS_NODEV                         = 0x4
+	MS_NODIRATIME                    = 0x800
+	MS_NOEXEC                        = 0x8
+	MS_NOSUID                        = 0x2
+	MS_NOUSER                        = -0x80000000
+	MS_POSIXACL                      = 0x10000
+	MS_PRIVATE                       = 0x40000
+	MS_RDONLY                        = 0x1
+	MS_REC                           = 0x4000
+	MS_RELATIME                      = 0x200000
+	MS_REMOUNT                       = 0x20
+	MS_RMT_MASK                      = 0x2800051
+	MS_SHARED                        = 0x100000
+	MS_SILENT                        = 0x8000
+	MS_SLAVE                         = 0x80000
+	MS_STRICTATIME                   = 0x1000000
+	MS_SYNC                          = 0x4
+	MS_SYNCHRONOUS                   = 0x10
+	MS_UNBINDABLE                    = 0x20000
+	NAME_MAX                         = 0xff
+	NETLINK_ADD_MEMBERSHIP           = 0x1
+	NETLINK_AUDIT                    = 0x9
+	NETLINK_BROADCAST_ERROR          = 0x4
+	NETLINK_CAP_ACK                  = 0xa
+	NETLINK_CONNECTOR                = 0xb
+	NETLINK_CRYPTO                   = 0x15
+	NETLINK_DNRTMSG                  = 0xe
+	NETLINK_DROP_MEMBERSHIP          = 0x2
+	NETLINK_ECRYPTFS                 = 0x13
+	NETLINK_FIB_LOOKUP               = 0xa
+	NETLINK_FIREWALL                 = 0x3
+	NETLINK_GENERIC                  = 0x10
+	NETLINK_INET_DIAG                = 0x4
+	NETLINK_IP6_FW                   = 0xd
+	NETLINK_ISCSI                    = 0x8
+	NETLINK_KOBJECT_UEVENT           = 0xf
+	NETLINK_LISTEN_ALL_NSID          = 0x8
+	NETLINK_LIST_MEMBERSHIPS         = 0x9
+	NETLINK_NETFILTER                = 0xc
+	NETLINK_NFLOG                    = 0x5
+	NETLINK_NO_ENOBUFS               = 0x5
+	NETLINK_PKTINFO                  = 0x3
+	NETLINK_RDMA                     = 0x14
+	NETLINK_ROUTE                    = 0x0
+	NETLINK_RX_RING                  = 0x6
+	NETLINK_SCSITRANSPORT            = 0x12
+	NETLINK_SELINUX                  = 0x7
+	NETLINK_SOCK_DIAG                = 0x4
+	NETLINK_TX_RING                  = 0x7
+	NETLINK_UNUSED                   = 0x1
+	NETLINK_USERSOCK                 = 0x2
+	NETLINK_XFRM                     = 0x6
+	NL0                              = 0x0
+	NL1                              = 0x100
+	NLA_ALIGNTO                      = 0x4
+	NLA_F_NESTED                     = 0x8000
+	NLA_F_NET_BYTEORDER              = 0x4000
+	NLA_HDRLEN                       = 0x4
+	NLDLY                            = 0x100
+	NLMSG_ALIGNTO                    = 0x4
+	NLMSG_DONE                       = 0x3
+	NLMSG_ERROR                      = 0x2
+	NLMSG_HDRLEN                     = 0x10
+	NLMSG_MIN_TYPE                   = 0x10
+	NLMSG_NOOP                       = 0x1
+	NLMSG_OVERRUN                    = 0x4
+	NLM_F_ACK                        = 0x4
+	NLM_F_APPEND                     = 0x800
+	NLM_F_ATOMIC                     = 0x400
+	NLM_F_CREATE                     = 0x400
+	NLM_F_DUMP                       = 0x300
+	NLM_F_DUMP_FILTERED              = 0x20
+	NLM_F_DUMP_INTR                  = 0x10
+	NLM_F_ECHO                       = 0x8
+	NLM_F_EXCL                       = 0x200
+	NLM_F_MATCH                      = 0x200
+	NLM_F_MULTI                      = 0x2
+	NLM_F_REPLACE                    = 0x100
+	NLM_F_REQUEST                    = 0x1
+	NLM_F_ROOT                       = 0x100
+	NOFLSH                           = 0x80
+	OCRNL                            = 0x8
+	OFDEL                            = 0x80
+	OFILL                            = 0x40
+	OLCUC                            = 0x2
+	ONLCR                            = 0x4
+	ONLRET                           = 0x20
+	ONOCR                            = 0x10
+	OPOST                            = 0x1
+	O_ACCMODE                        = 0x3
+	O_APPEND                         = 0x8
+	O_ASYNC                          = 0x40
+	O_CLOEXEC                        = 0x400000
+	O_CREAT                          = 0x200
+	O_DIRECT                         = 0x100000
+	O_DIRECTORY                      = 0x10000
+	O_DSYNC                          = 0x2000
+	O_EXCL                           = 0x800
+	O_FSYNC                          = 0x802000
+	O_LARGEFILE                      = 0x0
+	O_NDELAY                         = 0x4004
+	O_NOATIME                        = 0x200000
+	O_NOCTTY                         = 0x8000
+	O_NOFOLLOW                       = 0x20000
+	O_NONBLOCK                       = 0x4000
+	O_PATH                           = 0x1000000
+	O_RDONLY                         = 0x0
+	O_RDWR                           = 0x2
+	O_RSYNC                          = 0x802000
+	O_SYNC                           = 0x802000
+	O_TMPFILE                        = 0x2010000
+	O_TRUNC                          = 0x400
+	O_WRONLY                         = 0x1
+	PACKET_ADD_MEMBERSHIP            = 0x1
+	PACKET_AUXDATA                   = 0x8
+	PACKET_BROADCAST                 = 0x1
+	PACKET_COPY_THRESH               = 0x7
+	PACKET_DROP_MEMBERSHIP           = 0x2
+	PACKET_FANOUT                    = 0x12
+	PACKET_FANOUT_CBPF               = 0x6
+	PACKET_FANOUT_CPU                = 0x2
+	PACKET_FANOUT_DATA               = 0x16
+	PACKET_FANOUT_EBPF               = 0x7
+	PACKET_FANOUT_FLAG_DEFRAG        = 0x8000
+	PACKET_FANOUT_FLAG_ROLLOVER      = 0x1000
+	PACKET_FANOUT_HASH               = 0x0
+	PACKET_FANOUT_LB                 = 0x1
+	PACKET_FANOUT_QM                 = 0x5
+	PACKET_FANOUT_RND                = 0x4
+	PACKET_FANOUT_ROLLOVER           = 0x3
+	PACKET_FASTROUTE                 = 0x6
+	PACKET_HDRLEN                    = 0xb
+	PACKET_HOST                      = 0x0
+	PACKET_KERNEL                    = 0x7
+	PACKET_LOOPBACK                  = 0x5
+	PACKET_LOSS                      = 0xe
+	PACKET_MR_ALLMULTI               = 0x2
+	PACKET_MR_MULTICAST              = 0x0
+	PACKET_MR_PROMISC                = 0x1
+	PACKET_MR_UNICAST                = 0x3
+	PACKET_MULTICAST                 = 0x2
+	PACKET_ORIGDEV                   = 0x9
+	PACKET_OTHERHOST                 = 0x3
+	PACKET_OUTGOING                  = 0x4
+	PACKET_QDISC_BYPASS              = 0x14
+	PACKET_RECV_OUTPUT               = 0x3
+	PACKET_RESERVE                   = 0xc
+	PACKET_ROLLOVER_STATS            = 0x15
+	PACKET_RX_RING                   = 0x5
+	PACKET_STATISTICS                = 0x6
+	PACKET_TIMESTAMP                 = 0x11
+	PACKET_TX_HAS_OFF                = 0x13
+	PACKET_TX_RING                   = 0xd
+	PACKET_TX_TIMESTAMP              = 0x10
+	PACKET_USER                      = 0x6
+	PACKET_VERSION                   = 0xa
+	PACKET_VNET_HDR                  = 0xf
+	PARENB                           = 0x100
+	PARITY_CRC16_PR0                 = 0x2
+	PARITY_CRC16_PR0_CCITT           = 0x4
+	PARITY_CRC16_PR1                 = 0x3
+	PARITY_CRC16_PR1_CCITT           = 0x5
+	PARITY_CRC32_PR0_CCITT           = 0x6
+	PARITY_CRC32_PR1_CCITT           = 0x7
+	PARITY_DEFAULT                   = 0x0
+	PARITY_NONE                      = 0x1
+	PARMRK                           = 0x8
+	PARODD                           = 0x200
+	PENDIN                           = 0x4000
+	PRIO_PGRP                        = 0x1
+	PRIO_PROCESS                     = 0x0
+	PRIO_USER                        = 0x2
+	PROT_EXEC                        = 0x4
+	PROT_GROWSDOWN                   = 0x1000000
+	PROT_GROWSUP                     = 0x2000000
+	PROT_NONE                        = 0x0
+	PROT_READ                        = 0x1
+	PROT_WRITE                       = 0x2
+	PR_CAPBSET_DROP                  = 0x18
+	PR_CAPBSET_READ                  = 0x17
+	PR_CAP_AMBIENT                   = 0x2f
+	PR_CAP_AMBIENT_CLEAR_ALL         = 0x4
+	PR_CAP_AMBIENT_IS_SET            = 0x1
+	PR_CAP_AMBIENT_LOWER             = 0x3
+	PR_CAP_AMBIENT_RAISE             = 0x2
+	PR_ENDIAN_BIG                    = 0x0
+	PR_ENDIAN_LITTLE                 = 0x1
+	PR_ENDIAN_PPC_LITTLE             = 0x2
+	PR_FPEMU_NOPRINT                 = 0x1
+	PR_FPEMU_SIGFPE                  = 0x2
+	PR_FP_EXC_ASYNC                  = 0x2
+	PR_FP_EXC_DISABLED               = 0x0
+	PR_FP_EXC_DIV                    = 0x10000
+	PR_FP_EXC_INV                    = 0x100000
+	PR_FP_EXC_NONRECOV               = 0x1
+	PR_FP_EXC_OVF                    = 0x20000
+	PR_FP_EXC_PRECISE                = 0x3
+	PR_FP_EXC_RES                    = 0x80000
+	PR_FP_EXC_SW_ENABLE              = 0x80
+	PR_FP_EXC_UND                    = 0x40000
+	PR_FP_MODE_FR                    = 0x1
+	PR_FP_MODE_FRE                   = 0x2
+	PR_GET_CHILD_SUBREAPER           = 0x25
+	PR_GET_DUMPABLE                  = 0x3
+	PR_GET_ENDIAN                    = 0x13
+	PR_GET_FPEMU                     = 0x9
+	PR_GET_FPEXC                     = 0xb
+	PR_GET_FP_MODE                   = 0x2e
+	PR_GET_KEEPCAPS                  = 0x7
+	PR_GET_NAME                      = 0x10
+	PR_GET_NO_NEW_PRIVS              = 0x27
+	PR_GET_PDEATHSIG                 = 0x2
+	PR_GET_SECCOMP                   = 0x15
+	PR_GET_SECUREBITS                = 0x1b
+	PR_GET_THP_DISABLE               = 0x2a
+	PR_GET_TID_ADDRESS               = 0x28
+	PR_GET_TIMERSLACK                = 0x1e
+	PR_GET_TIMING                    = 0xd
+	PR_GET_TSC                       = 0x19
+	PR_GET_UNALIGN                   = 0x5
+	PR_MCE_KILL                      = 0x21
+	PR_MCE_KILL_CLEAR                = 0x0
+	PR_MCE_KILL_DEFAULT              = 0x2
+	PR_MCE_KILL_EARLY                = 0x1
+	PR_MCE_KILL_GET                  = 0x22
+	PR_MCE_KILL_LATE                 = 0x0
+	PR_MCE_KILL_SET                  = 0x1
+	PR_MPX_DISABLE_MANAGEMENT        = 0x2c
+	PR_MPX_ENABLE_MANAGEMENT         = 0x2b
+	PR_SET_CHILD_SUBREAPER           = 0x24
+	PR_SET_DUMPABLE                  = 0x4
+	PR_SET_ENDIAN                    = 0x14
+	PR_SET_FPEMU                     = 0xa
+	PR_SET_FPEXC                     = 0xc
+	PR_SET_FP_MODE                   = 0x2d
+	PR_SET_KEEPCAPS                  = 0x8
+	PR_SET_MM                        = 0x23
+	PR_SET_MM_ARG_END                = 0x9
+	PR_SET_MM_ARG_START              = 0x8
+	PR_SET_MM_AUXV                   = 0xc
+	PR_SET_MM_BRK                    = 0x7
+	PR_SET_MM_END_CODE               = 0x2
+	PR_SET_MM_END_DATA               = 0x4
+	PR_SET_MM_ENV_END                = 0xb
+	PR_SET_MM_ENV_START              = 0xa
+	PR_SET_MM_EXE_FILE               = 0xd
+	PR_SET_MM_MAP                    = 0xe
+	PR_SET_MM_MAP_SIZE               = 0xf
+	PR_SET_MM_START_BRK              = 0x6
+	PR_SET_MM_START_CODE             = 0x1
+	PR_SET_MM_START_DATA             = 0x3
+	PR_SET_MM_START_STACK            = 0x5
+	PR_SET_NAME                      = 0xf
+	PR_SET_NO_NEW_PRIVS              = 0x26
+	PR_SET_PDEATHSIG                 = 0x1
+	PR_SET_PTRACER                   = 0x59616d61
+	PR_SET_PTRACER_ANY               = -0x1
+	PR_SET_SECCOMP                   = 0x16
+	PR_SET_SECUREBITS                = 0x1c
+	PR_SET_THP_DISABLE               = 0x29
+	PR_SET_TIMERSLACK                = 0x1d
+	PR_SET_TIMING                    = 0xe
+	PR_SET_TSC                       = 0x1a
+	PR_SET_UNALIGN                   = 0x6
+	PR_TASK_PERF_EVENTS_DISABLE      = 0x1f
+	PR_TASK_PERF_EVENTS_ENABLE       = 0x20
+	PR_TIMING_STATISTICAL            = 0x0
+	PR_TIMING_TIMESTAMP              = 0x1
+	PR_TSC_ENABLE                    = 0x1
+	PR_TSC_SIGSEGV                   = 0x2
+	PR_UNALIGN_NOPRINT               = 0x1
+	PR_UNALIGN_SIGBUS                = 0x2
+	PTRACE_ATTACH                    = 0x10
+	PTRACE_CONT                      = 0x7
+	PTRACE_DETACH                    = 0x11
+	PTRACE_EVENT_CLONE               = 0x3
+	PTRACE_EVENT_EXEC                = 0x4
+	PTRACE_EVENT_EXIT                = 0x6
+	PTRACE_EVENT_FORK                = 0x1
+	PTRACE_EVENT_SECCOMP             = 0x7
+	PTRACE_EVENT_STOP                = 0x80
+	PTRACE_EVENT_VFORK               = 0x2
+	PTRACE_EVENT_VFORK_DONE          = 0x5
+	PTRACE_GETEVENTMSG               = 0x4201
+	PTRACE_GETFPAREGS                = 0x14
+	PTRACE_GETFPREGS                 = 0xe
+	PTRACE_GETFPREGS64               = 0x19
+	PTRACE_GETREGS                   = 0xc
+	PTRACE_GETREGS64                 = 0x16
+	PTRACE_GETREGSET                 = 0x4204
+	PTRACE_GETSIGINFO                = 0x4202
+	PTRACE_GETSIGMASK                = 0x420a
+	PTRACE_INTERRUPT                 = 0x4207
+	PTRACE_KILL                      = 0x8
+	PTRACE_LISTEN                    = 0x4208
+	PTRACE_O_EXITKILL                = 0x100000
+	PTRACE_O_MASK                    = 0x3000ff
+	PTRACE_O_SUSPEND_SECCOMP         = 0x200000
+	PTRACE_O_TRACECLONE              = 0x8
+	PTRACE_O_TRACEEXEC               = 0x10
+	PTRACE_O_TRACEEXIT               = 0x40
+	PTRACE_O_TRACEFORK               = 0x2
+	PTRACE_O_TRACESECCOMP            = 0x80
+	PTRACE_O_TRACESYSGOOD            = 0x1
+	PTRACE_O_TRACEVFORK              = 0x4
+	PTRACE_O_TRACEVFORKDONE          = 0x20
+	PTRACE_PEEKDATA                  = 0x2
+	PTRACE_PEEKSIGINFO               = 0x4209
+	PTRACE_PEEKSIGINFO_SHARED        = 0x1
+	PTRACE_PEEKTEXT                  = 0x1
+	PTRACE_PEEKUSR                   = 0x3
+	PTRACE_POKEDATA                  = 0x5
+	PTRACE_POKETEXT                  = 0x4
+	PTRACE_POKEUSR                   = 0x6
+	PTRACE_READDATA                  = 0x10
+	PTRACE_READTEXT                  = 0x12
+	PTRACE_SECCOMP_GET_FILTER        = 0x420c
+	PTRACE_SEIZE                     = 0x4206
+	PTRACE_SETFPAREGS                = 0x15
+	PTRACE_SETFPREGS                 = 0xf
+	PTRACE_SETFPREGS64               = 0x1a
+	PTRACE_SETOPTIONS                = 0x4200
+	PTRACE_SETREGS                   = 0xd
+	PTRACE_SETREGS64                 = 0x17
+	PTRACE_SETREGSET                 = 0x4205
+	PTRACE_SETSIGINFO                = 0x4203
+	PTRACE_SETSIGMASK                = 0x420b
+	PTRACE_SINGLESTEP                = 0x9
+	PTRACE_SPARC_DETACH              = 0xb
+	PTRACE_SYSCALL                   = 0x18
+	PTRACE_TRACEME                   = 0x0
+	PTRACE_WRITEDATA                 = 0x11
+	PTRACE_WRITETEXT                 = 0x13
+	PT_FP                            = 0x48
+	PT_G0                            = 0x10
+	PT_G1                            = 0x14
+	PT_G2                            = 0x18
+	PT_G3                            = 0x1c
+	PT_G4                            = 0x20
+	PT_G5                            = 0x24
+	PT_G6                            = 0x28
+	PT_G7                            = 0x2c
+	PT_I0                            = 0x30
+	PT_I1                            = 0x34
+	PT_I2                            = 0x38
+	PT_I3                            = 0x3c
+	PT_I4                            = 0x40
+	PT_I5                            = 0x44
+	PT_I6                            = 0x48
+	PT_I7                            = 0x4c
+	PT_NPC                           = 0x8
+	PT_PC                            = 0x4
+	PT_PSR                           = 0x0
+	PT_REGS_MAGIC                    = 0x57ac6c00
+	PT_TNPC                          = 0x90
+	PT_TPC                           = 0x88
+	PT_TSTATE                        = 0x80
+	PT_V9_FP                         = 0x70
+	PT_V9_G0                         = 0x0
+	PT_V9_G1                         = 0x8
+	PT_V9_G2                         = 0x10
+	PT_V9_G3                         = 0x18
+	PT_V9_G4                         = 0x20
+	PT_V9_G5                         = 0x28
+	PT_V9_G6                         = 0x30
+	PT_V9_G7                         = 0x38
+	PT_V9_I0                         = 0x40
+	PT_V9_I1                         = 0x48
+	PT_V9_I2                         = 0x50
+	PT_V9_I3                         = 0x58
+	PT_V9_I4                         = 0x60
+	PT_V9_I5                         = 0x68
+	PT_V9_I6                         = 0x70
+	PT_V9_I7                         = 0x78
+	PT_V9_MAGIC                      = 0x9c
+	PT_V9_TNPC                       = 0x90
+	PT_V9_TPC                        = 0x88
+	PT_V9_TSTATE                     = 0x80
+	PT_V9_Y                          = 0x98
+	PT_WIM                           = 0x10
+	PT_Y                             = 0xc
+	RLIMIT_AS                        = 0x9
+	RLIMIT_CORE                      = 0x4
+	RLIMIT_CPU                       = 0x0
+	RLIMIT_DATA                      = 0x2
+	RLIMIT_FSIZE                     = 0x1
+	RLIMIT_NOFILE                    = 0x6
+	RLIMIT_STACK                     = 0x3
+	RLIM_INFINITY                    = -0x1
+	RTAX_ADVMSS                      = 0x8
+	RTAX_CC_ALGO                     = 0x10
+	RTAX_CWND                        = 0x7
+	RTAX_FEATURES                    = 0xc
+	RTAX_FEATURE_ALLFRAG             = 0x8
+	RTAX_FEATURE_ECN                 = 0x1
+	RTAX_FEATURE_MASK                = 0xf
+	RTAX_FEATURE_SACK                = 0x2
+	RTAX_FEATURE_TIMESTAMP           = 0x4
+	RTAX_HOPLIMIT                    = 0xa
+	RTAX_INITCWND                    = 0xb
+	RTAX_INITRWND                    = 0xe
+	RTAX_LOCK                        = 0x1
+	RTAX_MAX                         = 0x10
+	RTAX_MTU                         = 0x2
+	RTAX_QUICKACK                    = 0xf
+	RTAX_REORDERING                  = 0x9
+	RTAX_RTO_MIN                     = 0xd
+	RTAX_RTT                         = 0x4
+	RTAX_RTTVAR                      = 0x5
+	RTAX_SSTHRESH                    = 0x6
+	RTAX_UNSPEC                      = 0x0
+	RTAX_WINDOW                      = 0x3
+	RTA_ALIGNTO                      = 0x4
+	RTA_MAX                          = 0x18
+	RTCF_DIRECTSRC                   = 0x4000000
+	RTCF_DOREDIRECT                  = 0x1000000
+	RTCF_LOG                         = 0x2000000
+	RTCF_MASQ                        = 0x400000
+	RTCF_NAT                         = 0x800000
+	RTCF_VALVE                       = 0x200000
+	RTF_ADDRCLASSMASK                = 0xf8000000
+	RTF_ADDRCONF                     = 0x40000
+	RTF_ALLONLINK                    = 0x20000
+	RTF_BROADCAST                    = 0x10000000
+	RTF_CACHE                        = 0x1000000
+	RTF_DEFAULT                      = 0x10000
+	RTF_DYNAMIC                      = 0x10
+	RTF_FLOW                         = 0x2000000
+	RTF_GATEWAY                      = 0x2
+	RTF_HOST                         = 0x4
+	RTF_INTERFACE                    = 0x40000000
+	RTF_IRTT                         = 0x100
+	RTF_LINKRT                       = 0x100000
+	RTF_LOCAL                        = 0x80000000
+	RTF_MODIFIED                     = 0x20
+	RTF_MSS                          = 0x40
+	RTF_MTU                          = 0x40
+	RTF_MULTICAST                    = 0x20000000
+	RTF_NAT                          = 0x8000000
+	RTF_NOFORWARD                    = 0x1000
+	RTF_NONEXTHOP                    = 0x200000
+	RTF_NOPMTUDISC                   = 0x4000
+	RTF_POLICY                       = 0x4000000
+	RTF_REINSTATE                    = 0x8
+	RTF_REJECT                       = 0x200
+	RTF_STATIC                       = 0x400
+	RTF_THROW                        = 0x2000
+	RTF_UP                           = 0x1
+	RTF_WINDOW                       = 0x80
+	RTF_XRESOLVE                     = 0x800
+	RTM_BASE                         = 0x10
+	RTM_DELACTION                    = 0x31
+	RTM_DELADDR                      = 0x15
+	RTM_DELADDRLABEL                 = 0x49
+	RTM_DELLINK                      = 0x11
+	RTM_DELMDB                       = 0x55
+	RTM_DELNEIGH                     = 0x1d
+	RTM_DELNSID                      = 0x59
+	RTM_DELQDISC                     = 0x25
+	RTM_DELROUTE                     = 0x19
+	RTM_DELRULE                      = 0x21
+	RTM_DELTCLASS                    = 0x29
+	RTM_DELTFILTER                   = 0x2d
+	RTM_F_CLONED                     = 0x200
+	RTM_F_EQUALIZE                   = 0x400
+	RTM_F_LOOKUP_TABLE               = 0x1000
+	RTM_F_NOTIFY                     = 0x100
+	RTM_F_PREFIX                     = 0x800
+	RTM_GETACTION                    = 0x32
+	RTM_GETADDR                      = 0x16
+	RTM_GETADDRLABEL                 = 0x4a
+	RTM_GETANYCAST                   = 0x3e
+	RTM_GETDCB                       = 0x4e
+	RTM_GETLINK                      = 0x12
+	RTM_GETMDB                       = 0x56
+	RTM_GETMULTICAST                 = 0x3a
+	RTM_GETNEIGH                     = 0x1e
+	RTM_GETNEIGHTBL                  = 0x42
+	RTM_GETNETCONF                   = 0x52
+	RTM_GETNSID                      = 0x5a
+	RTM_GETQDISC                     = 0x26
+	RTM_GETROUTE                     = 0x1a
+	RTM_GETRULE                      = 0x22
+	RTM_GETSTATS                     = 0x5e
+	RTM_GETTCLASS                    = 0x2a
+	RTM_GETTFILTER                   = 0x2e
+	RTM_MAX                          = 0x5f
+	RTM_NEWACTION                    = 0x30
+	RTM_NEWADDR                      = 0x14
+	RTM_NEWADDRLABEL                 = 0x48
+	RTM_NEWLINK                      = 0x10
+	RTM_NEWMDB                       = 0x54
+	RTM_NEWNDUSEROPT                 = 0x44
+	RTM_NEWNEIGH                     = 0x1c
+	RTM_NEWNEIGHTBL                  = 0x40
+	RTM_NEWNETCONF                   = 0x50
+	RTM_NEWNSID                      = 0x58
+	RTM_NEWPREFIX                    = 0x34
+	RTM_NEWQDISC                     = 0x24
+	RTM_NEWROUTE                     = 0x18
+	RTM_NEWRULE                      = 0x20
+	RTM_NEWSTATS                     = 0x5c
+	RTM_NEWTCLASS                    = 0x28
+	RTM_NEWTFILTER                   = 0x2c
+	RTM_NR_FAMILIES                  = 0x14
+	RTM_NR_MSGTYPES                  = 0x50
+	RTM_SETDCB                       = 0x4f
+	RTM_SETLINK                      = 0x13
+	RTM_SETNEIGHTBL                  = 0x43
+	RTNH_ALIGNTO                     = 0x4
+	RTNH_COMPARE_MASK                = 0x11
+	RTNH_F_DEAD                      = 0x1
+	RTNH_F_LINKDOWN                  = 0x10
+	RTNH_F_OFFLOAD                   = 0x8
+	RTNH_F_ONLINK                    = 0x4
+	RTNH_F_PERVASIVE                 = 0x2
+	RTN_MAX                          = 0xb
+	RTPROT_BABEL                     = 0x2a
+	RTPROT_BIRD                      = 0xc
+	RTPROT_BOOT                      = 0x3
+	RTPROT_DHCP                      = 0x10
+	RTPROT_DNROUTED                  = 0xd
+	RTPROT_GATED                     = 0x8
+	RTPROT_KERNEL                    = 0x2
+	RTPROT_MROUTED                   = 0x11
+	RTPROT_MRT                       = 0xa
+	RTPROT_NTK                       = 0xf
+	RTPROT_RA                        = 0x9
+	RTPROT_REDIRECT                  = 0x1
+	RTPROT_STATIC                    = 0x4
+	RTPROT_UNSPEC                    = 0x0
+	RTPROT_XORP                      = 0xe
+	RTPROT_ZEBRA                     = 0xb
+	RT_CLASS_DEFAULT                 = 0xfd
+	RT_CLASS_LOCAL                   = 0xff
+	RT_CLASS_MAIN                    = 0xfe
+	RT_CLASS_MAX                     = 0xff
+	RT_CLASS_UNSPEC                  = 0x0
+	RUSAGE_CHILDREN                  = -0x1
+	RUSAGE_SELF                      = 0x0
+	RUSAGE_THREAD                    = 0x1
+	SCM_CREDENTIALS                  = 0x2
+	SCM_RIGHTS                       = 0x1
+	SCM_TIMESTAMP                    = 0x1d
+	SCM_TIMESTAMPING                 = 0x23
+	SCM_TIMESTAMPNS                  = 0x21
+	SCM_WIFI_STATUS                  = 0x25
+	SHUT_RD                          = 0x0
+	SHUT_RDWR                        = 0x2
+	SHUT_WR                          = 0x1
+	SIOCADDDLCI                      = 0x8980
+	SIOCADDMULTI                     = 0x8931
+	SIOCADDRT                        = 0x890b
+	SIOCATMARK                       = 0x8905
+	SIOCBONDCHANGEACTIVE             = 0x8995
+	SIOCBONDENSLAVE                  = 0x8990
+	SIOCBONDINFOQUERY                = 0x8994
+	SIOCBONDRELEASE                  = 0x8991
+	SIOCBONDSETHWADDR                = 0x8992
+	SIOCBONDSLAVEINFOQUERY           = 0x8993
+	SIOCBRADDBR                      = 0x89a0
+	SIOCBRADDIF                      = 0x89a2
+	SIOCBRDELBR                      = 0x89a1
+	SIOCBRDELIF                      = 0x89a3
+	SIOCDARP                         = 0x8953
+	SIOCDELDLCI                      = 0x8981
+	SIOCDELMULTI                     = 0x8932
+	SIOCDELRT                        = 0x890c
+	SIOCDEVPRIVATE                   = 0x89f0
+	SIOCDIFADDR                      = 0x8936
+	SIOCDRARP                        = 0x8960
+	SIOCETHTOOL                      = 0x8946
+	SIOCGARP                         = 0x8954
+	SIOCGHWTSTAMP                    = 0x89b1
+	SIOCGIFADDR                      = 0x8915
+	SIOCGIFBR                        = 0x8940
+	SIOCGIFBRDADDR                   = 0x8919
+	SIOCGIFCONF                      = 0x8912
+	SIOCGIFCOUNT                     = 0x8938
+	SIOCGIFDSTADDR                   = 0x8917
+	SIOCGIFENCAP                     = 0x8925
+	SIOCGIFFLAGS                     = 0x8913
+	SIOCGIFHWADDR                    = 0x8927
+	SIOCGIFINDEX                     = 0x8933
+	SIOCGIFMAP                       = 0x8970
+	SIOCGIFMEM                       = 0x891f
+	SIOCGIFMETRIC                    = 0x891d
+	SIOCGIFMTU                       = 0x8921
+	SIOCGIFNAME                      = 0x8910
+	SIOCGIFNETMASK                   = 0x891b
+	SIOCGIFPFLAGS                    = 0x8935
+	SIOCGIFSLAVE                     = 0x8929
+	SIOCGIFTXQLEN                    = 0x8942
+	SIOCGIFVLAN                      = 0x8982
+	SIOCGMIIPHY                      = 0x8947
+	SIOCGMIIREG                      = 0x8948
+	SIOCGPGRP                        = 0x8904
+	SIOCGRARP                        = 0x8961
+	SIOCGSTAMP                       = 0x8906
+	SIOCGSTAMPNS                     = 0x8907
+	SIOCINQ                          = 0x4004667f
+	SIOCOUTQ                         = 0x40047473
+	SIOCOUTQNSD                      = 0x894b
+	SIOCPROTOPRIVATE                 = 0x89e0
+	SIOCRTMSG                        = 0x890d
+	SIOCSARP                         = 0x8955
+	SIOCSHWTSTAMP                    = 0x89b0
+	SIOCSIFADDR                      = 0x8916
+	SIOCSIFBR                        = 0x8941
+	SIOCSIFBRDADDR                   = 0x891a
+	SIOCSIFDSTADDR                   = 0x8918
+	SIOCSIFENCAP                     = 0x8926
+	SIOCSIFFLAGS                     = 0x8914
+	SIOCSIFHWADDR                    = 0x8924
+	SIOCSIFHWBROADCAST               = 0x8937
+	SIOCSIFLINK                      = 0x8911
+	SIOCSIFMAP                       = 0x8971
+	SIOCSIFMEM                       = 0x8920
+	SIOCSIFMETRIC                    = 0x891e
+	SIOCSIFMTU                       = 0x8922
+	SIOCSIFNAME                      = 0x8923
+	SIOCSIFNETMASK                   = 0x891c
+	SIOCSIFPFLAGS                    = 0x8934
+	SIOCSIFSLAVE                     = 0x8930
+	SIOCSIFTXQLEN                    = 0x8943
+	SIOCSIFVLAN                      = 0x8983
+	SIOCSMIIREG                      = 0x8949
+	SIOCSPGRP                        = 0x8902
+	SIOCSRARP                        = 0x8962
+	SIOCWANDEV                       = 0x894a
+	SOCK_CLOEXEC                     = 0x400000
+	SOCK_DCCP                        = 0x6
+	SOCK_DGRAM                       = 0x2
+	SOCK_NONBLOCK                    = 0x4000
+	SOCK_PACKET                      = 0xa
+	SOCK_RAW                         = 0x3
+	SOCK_RDM                         = 0x4
+	SOCK_SEQPACKET                   = 0x5
+	SOCK_STREAM                      = 0x1
+	SOL_AAL                          = 0x109
+	SOL_ALG                          = 0x117
+	SOL_ATM                          = 0x108
+	SOL_CAIF                         = 0x116
+	SOL_DCCP                         = 0x10d
+	SOL_DECNET                       = 0x105
+	SOL_ICMPV6                       = 0x3a
+	SOL_IP                           = 0x0
+	SOL_IPV6                         = 0x29
+	SOL_IRDA                         = 0x10a
+	SOL_IUCV                         = 0x115
+	SOL_KCM                          = 0x119
+	SOL_LLC                          = 0x10c
+	SOL_NETBEUI                      = 0x10b
+	SOL_NETLINK                      = 0x10e
+	SOL_NFC                          = 0x118
+	SOL_PACKET                       = 0x107
+	SOL_PNPIPE                       = 0x113
+	SOL_PPPOL2TP                     = 0x111
+	SOL_RAW                          = 0xff
+	SOL_RDS                          = 0x114
+	SOL_RXRPC                        = 0x110
+	SOL_SOCKET                       = 0xffff
+	SOL_TCP                          = 0x6
+	SOL_TIPC                         = 0x10f
+	SOL_X25                          = 0x106
+	SOMAXCONN                        = 0x80
+	SO_ACCEPTCONN                    = 0x8000
+	SO_ATTACH_BPF                    = 0x34
+	SO_ATTACH_FILTER                 = 0x1a
+	SO_ATTACH_REUSEPORT_CBPF         = 0x35
+	SO_ATTACH_REUSEPORT_EBPF         = 0x36
+	SO_BINDTODEVICE                  = 0xd
+	SO_BPF_EXTENSIONS                = 0x32
+	SO_BROADCAST                     = 0x20
+	SO_BSDCOMPAT                     = 0x400
+	SO_BUSY_POLL                     = 0x30
+	SO_CNX_ADVICE                    = 0x37
+	SO_DEBUG                         = 0x1
+	SO_DETACH_BPF                    = 0x1b
+	SO_DETACH_FILTER                 = 0x1b
+	SO_DOMAIN                        = 0x1029
+	SO_DONTROUTE                     = 0x10
+	SO_ERROR                         = 0x1007
+	SO_GET_FILTER                    = 0x1a
+	SO_INCOMING_CPU                  = 0x33
+	SO_KEEPALIVE                     = 0x8
+	SO_LINGER                        = 0x80
+	SO_LOCK_FILTER                   = 0x28
+	SO_MARK                          = 0x22
+	SO_MAX_PACING_RATE               = 0x31
+	SO_NOFCS                         = 0x27
+	SO_NO_CHECK                      = 0xb
+	SO_OOBINLINE                     = 0x100
+	SO_PASSCRED                      = 0x2
+	SO_PASSSEC                       = 0x1f
+	SO_PEEK_OFF                      = 0x26
+	SO_PEERCRED                      = 0x40
+	SO_PEERNAME                      = 0x1c
+	SO_PEERSEC                       = 0x1e
+	SO_PRIORITY                      = 0xc
+	SO_PROTOCOL                      = 0x1028
+	SO_RCVBUF                        = 0x1002
+	SO_RCVBUFFORCE                   = 0x100b
+	SO_RCVLOWAT                      = 0x800
+	SO_RCVTIMEO                      = 0x2000
+	SO_REUSEADDR                     = 0x4
+	SO_REUSEPORT                     = 0x200
+	SO_RXQ_OVFL                      = 0x24
+	SO_SECURITY_AUTHENTICATION       = 0x5001
+	SO_SECURITY_ENCRYPTION_NETWORK   = 0x5004
+	SO_SECURITY_ENCRYPTION_TRANSPORT = 0x5002
+	SO_SELECT_ERR_QUEUE              = 0x29
+	SO_SNDBUF                        = 0x1001
+	SO_SNDBUFFORCE                   = 0x100a
+	SO_SNDLOWAT                      = 0x1000
+	SO_SNDTIMEO                      = 0x4000
+	SO_TIMESTAMP                     = 0x1d
+	SO_TIMESTAMPING                  = 0x23
+	SO_TIMESTAMPNS                   = 0x21
+	SO_TYPE                          = 0x1008
+	SO_WIFI_STATUS                   = 0x25
+	S_BLKSIZE                        = 0x200
+	S_IEXEC                          = 0x40
+	S_IFBLK                          = 0x6000
+	S_IFCHR                          = 0x2000
+	S_IFDIR                          = 0x4000
+	S_IFIFO                          = 0x1000
+	S_IFLNK                          = 0xa000
+	S_IFMT                           = 0xf000
+	S_IFREG                          = 0x8000
+	S_IFSOCK                         = 0xc000
+	S_IREAD                          = 0x100
+	S_IRGRP                          = 0x20
+	S_IROTH                          = 0x4
+	S_IRUSR                          = 0x100
+	S_IRWXG                          = 0x38
+	S_IRWXO                          = 0x7
+	S_IRWXU                          = 0x1c0
+	S_ISGID                          = 0x400
+	S_ISUID                          = 0x800
+	S_ISVTX                          = 0x200
+	S_IWGRP                          = 0x10
+	S_IWOTH                          = 0x2
+	S_IWRITE                         = 0x80
+	S_IWUSR                          = 0x80
+	S_IXGRP                          = 0x8
+	S_IXOTH                          = 0x1
+	S_IXUSR                          = 0x40
+	TAB0                             = 0x0
+	TAB1                             = 0x800
+	TAB2                             = 0x1000
+	TAB3                             = 0x1800
+	TABDLY                           = 0x1800
+	TCFLSH                           = 0x20005407
+	TCGETA                           = 0x40125401
+	TCGETS                           = 0x40245408
+	TCGETS2                          = 0x402c540c
+	TCIFLUSH                         = 0x0
+	TCIOFF                           = 0x2
+	TCIOFLUSH                        = 0x2
+	TCION                            = 0x3
+	TCOFLUSH                         = 0x1
+	TCOOFF                           = 0x0
+	TCOON                            = 0x1
+	TCP_CC_INFO                      = 0x1a
+	TCP_CONGESTION                   = 0xd
+	TCP_COOKIE_IN_ALWAYS             = 0x1
+	TCP_COOKIE_MAX                   = 0x10
+	TCP_COOKIE_MIN                   = 0x8
+	TCP_COOKIE_OUT_NEVER             = 0x2
+	TCP_COOKIE_PAIR_SIZE             = 0x20
+	TCP_COOKIE_TRANSACTIONS          = 0xf
+	TCP_CORK                         = 0x3
+	TCP_DEFER_ACCEPT                 = 0x9
+	TCP_FASTOPEN                     = 0x17
+	TCP_INFO                         = 0xb
+	TCP_KEEPCNT                      = 0x6
+	TCP_KEEPIDLE                     = 0x4
+	TCP_KEEPINTVL                    = 0x5
+	TCP_LINGER2                      = 0x8
+	TCP_MAXSEG                       = 0x2
+	TCP_MAXWIN                       = 0xffff
+	TCP_MAX_WINSHIFT                 = 0xe
+	TCP_MD5SIG                       = 0xe
+	TCP_MD5SIG_MAXKEYLEN             = 0x50
+	TCP_MSS                          = 0x200
+	TCP_MSS_DEFAULT                  = 0x218
+	TCP_MSS_DESIRED                  = 0x4c4
+	TCP_NODELAY                      = 0x1
+	TCP_NOTSENT_LOWAT                = 0x19
+	TCP_QUEUE_SEQ                    = 0x15
+	TCP_QUICKACK                     = 0xc
+	TCP_REPAIR                       = 0x13
+	TCP_REPAIR_OPTIONS               = 0x16
+	TCP_REPAIR_QUEUE                 = 0x14
+	TCP_SAVED_SYN                    = 0x1c
+	TCP_SAVE_SYN                     = 0x1b
+	TCP_SYNCNT                       = 0x7
+	TCP_S_DATA_IN                    = 0x4
+	TCP_S_DATA_OUT                   = 0x8
+	TCP_THIN_DUPACK                  = 0x11
+	TCP_THIN_LINEAR_TIMEOUTS         = 0x10
+	TCP_TIMESTAMP                    = 0x18
+	TCP_USER_TIMEOUT                 = 0x12
+	TCP_WINDOW_CLAMP                 = 0xa
+	TCSAFLUSH                        = 0x2
+	TCSBRK                           = 0x20005405
+	TCSBRKP                          = 0x5425
+	TCSETA                           = 0x80125402
+	TCSETAF                          = 0x80125404
+	TCSETAW                          = 0x80125403
+	TCSETS                           = 0x80245409
+	TCSETS2                          = 0x802c540d
+	TCSETSF                          = 0x8024540b
+	TCSETSF2                         = 0x802c540f
+	TCSETSW                          = 0x8024540a
+	TCSETSW2                         = 0x802c540e
+	TCXONC                           = 0x20005406
+	TIOCCBRK                         = 0x2000747a
+	TIOCCONS                         = 0x20007424
+	TIOCEXCL                         = 0x2000740d
+	TIOCGDEV                         = 0x40045432
+	TIOCGETD                         = 0x40047400
+	TIOCGEXCL                        = 0x40045440
+	TIOCGICOUNT                      = 0x545d
+	TIOCGLCKTRMIOS                   = 0x5456
+	TIOCGPGRP                        = 0x40047483
+	TIOCGPKT                         = 0x40045438
+	TIOCGPTLCK                       = 0x40045439
+	TIOCGPTN                         = 0x40047486
+	TIOCGRS485                       = 0x40205441
+	TIOCGSERIAL                      = 0x541e
+	TIOCGSID                         = 0x40047485
+	TIOCGSOFTCAR                     = 0x40047464
+	TIOCGWINSZ                       = 0x40087468
+	TIOCINQ                          = 0x4004667f
+	TIOCLINUX                        = 0x541c
+	TIOCMBIC                         = 0x8004746b
+	TIOCMBIS                         = 0x8004746c
+	TIOCMGET                         = 0x4004746a
+	TIOCMIWAIT                       = 0x545c
+	TIOCMSET                         = 0x8004746d
+	TIOCM_CAR                        = 0x40
+	TIOCM_CD                         = 0x40
+	TIOCM_CTS                        = 0x20
+	TIOCM_DSR                        = 0x100
+	TIOCM_DTR                        = 0x2
+	TIOCM_LE                         = 0x1
+	TIOCM_LOOP                       = 0x8000
+	TIOCM_OUT1                       = 0x2000
+	TIOCM_OUT2                       = 0x4000
+	TIOCM_RI                         = 0x80
+	TIOCM_RNG                        = 0x80
+	TIOCM_RTS                        = 0x4
+	TIOCM_SR                         = 0x10
+	TIOCM_ST                         = 0x8
+	TIOCNOTTY                        = 0x20007471
+	TIOCNXCL                         = 0x2000740e
+	TIOCOUTQ                         = 0x40047473
+	TIOCPKT                          = 0x80047470
+	TIOCPKT_DATA                     = 0x0
+	TIOCPKT_DOSTOP                   = 0x20
+	TIOCPKT_FLUSHREAD                = 0x1
+	TIOCPKT_FLUSHWRITE               = 0x2
+	TIOCPKT_IOCTL                    = 0x40
+	TIOCPKT_NOSTOP                   = 0x10
+	TIOCPKT_START                    = 0x8
+	TIOCPKT_STOP                     = 0x4
+	TIOCSBRK                         = 0x2000747b
+	TIOCSCTTY                        = 0x20007484
+	TIOCSERCONFIG                    = 0x5453
+	TIOCSERGETLSR                    = 0x5459
+	TIOCSERGETMULTI                  = 0x545a
+	TIOCSERGSTRUCT                   = 0x5458
+	TIOCSERGWILD                     = 0x5454
+	TIOCSERSETMULTI                  = 0x545b
+	TIOCSERSWILD                     = 0x5455
+	TIOCSER_TEMT                     = 0x1
+	TIOCSETD                         = 0x80047401
+	TIOCSIG                          = 0x80047488
+	TIOCSLCKTRMIOS                   = 0x5457
+	TIOCSPGRP                        = 0x80047482
+	TIOCSPTLCK                       = 0x80047487
+	TIOCSRS485                       = 0xc0205442
+	TIOCSSERIAL                      = 0x541f
+	TIOCSSOFTCAR                     = 0x80047465
+	TIOCSTART                        = 0x2000746e
+	TIOCSTI                          = 0x80017472
+	TIOCSTOP                         = 0x2000746f
+	TIOCSWINSZ                       = 0x80087467
+	TIOCVHANGUP                      = 0x20005437
+	TOSTOP                           = 0x100
+	TUNATTACHFILTER                  = 0x801054d5
+	TUNDETACHFILTER                  = 0x801054d6
+	TUNGETFEATURES                   = 0x400454cf
+	TUNGETFILTER                     = 0x401054db
+	TUNGETIFF                        = 0x400454d2
+	TUNGETSNDBUF                     = 0x400454d3
+	TUNGETVNETBE                     = 0x400454df
+	TUNGETVNETHDRSZ                  = 0x400454d7
+	TUNGETVNETLE                     = 0x400454dd
+	TUNSETDEBUG                      = 0x800454c9
+	TUNSETGROUP                      = 0x800454ce
+	TUNSETIFF                        = 0x800454ca
+	TUNSETIFINDEX                    = 0x800454da
+	TUNSETLINK                       = 0x800454cd
+	TUNSETNOCSUM                     = 0x800454c8
+	TUNSETOFFLOAD                    = 0x800454d0
+	TUNSETOWNER                      = 0x800454cc
+	TUNSETPERSIST                    = 0x800454cb
+	TUNSETQUEUE                      = 0x800454d9
+	TUNSETSNDBUF                     = 0x800454d4
+	TUNSETTXFILTER                   = 0x800454d1
+	TUNSETVNETBE                     = 0x800454de
+	TUNSETVNETHDRSZ                  = 0x800454d8
+	TUNSETVNETLE                     = 0x800454dc
+	VDISCARD                         = 0xd
+	VDSUSP                           = 0xb
+	VEOF                             = 0x4
+	VEOL                             = 0x5
+	VEOL2                            = 0x6
+	VERASE                           = 0x2
+	VINTR                            = 0x0
+	VKILL                            = 0x3
+	VLNEXT                           = 0xf
+	VMIN                             = 0x4
+	VQUIT                            = 0x1
+	VREPRINT                         = 0xc
+	VSTART                           = 0x8
+	VSTOP                            = 0x9
+	VSUSP                            = 0xa
+	VSWTC                            = 0x7
+	VT0                              = 0x0
+	VT1                              = 0x4000
+	VTDLY                            = 0x4000
+	VTIME                            = 0x5
+	VWERASE                          = 0xe
+	WALL                             = 0x40000000
+	WCLONE                           = 0x80000000
+	WCONTINUED                       = 0x8
+	WEXITED                          = 0x4
+	WNOHANG                          = 0x1
+	WNOTHREAD                        = 0x20000000
+	WNOWAIT                          = 0x1000000
+	WORDSIZE                         = 0x40
+	WRAP                             = 0x20000
+	WSTOPPED                         = 0x2
+	WUNTRACED                        = 0x2
+	XCASE                            = 0x4
+	XTABS                            = 0x1800
+	__TIOCFLUSH                      = 0x80047410
+)
+
+// Errors
+const (
+	E2BIG           = syscall.Errno(0x7)
+	EACCES          = syscall.Errno(0xd)
+	EADDRINUSE      = syscall.Errno(0x30)
+	EADDRNOTAVAIL   = syscall.Errno(0x31)
+	EADV            = syscall.Errno(0x53)
+	EAFNOSUPPORT    = syscall.Errno(0x2f)
+	EAGAIN          = syscall.Errno(0xb)
+	EALREADY        = syscall.Errno(0x25)
+	EBADE           = syscall.Errno(0x66)
+	EBADF           = syscall.Errno(0x9)
+	EBADFD          = syscall.Errno(0x5d)
+	EBADMSG         = syscall.Errno(0x4c)
+	EBADR           = syscall.Errno(0x67)
+	EBADRQC         = syscall.Errno(0x6a)
+	EBADSLT         = syscall.Errno(0x6b)
+	EBFONT          = syscall.Errno(0x6d)
+	EBUSY           = syscall.Errno(0x10)
+	ECANCELED       = syscall.Errno(0x7f)
+	ECHILD          = syscall.Errno(0xa)
+	ECHRNG          = syscall.Errno(0x5e)
+	ECOMM           = syscall.Errno(0x55)
+	ECONNABORTED    = syscall.Errno(0x35)
+	ECONNREFUSED    = syscall.Errno(0x3d)
+	ECONNRESET      = syscall.Errno(0x36)
+	EDEADLK         = syscall.Errno(0x4e)
+	EDEADLOCK       = syscall.Errno(0x6c)
+	EDESTADDRREQ    = syscall.Errno(0x27)
+	EDOM            = syscall.Errno(0x21)
+	EDOTDOT         = syscall.Errno(0x58)
+	EDQUOT          = syscall.Errno(0x45)
+	EEXIST          = syscall.Errno(0x11)
+	EFAULT          = syscall.Errno(0xe)
+	EFBIG           = syscall.Errno(0x1b)
+	EHOSTDOWN       = syscall.Errno(0x40)
+	EHOSTUNREACH    = syscall.Errno(0x41)
+	EHWPOISON       = syscall.Errno(0x87)
+	EIDRM           = syscall.Errno(0x4d)
+	EILSEQ          = syscall.Errno(0x7a)
+	EINPROGRESS     = syscall.Errno(0x24)
+	EINTR           = syscall.Errno(0x4)
+	EINVAL          = syscall.Errno(0x16)
+	EIO             = syscall.Errno(0x5)
+	EISCONN         = syscall.Errno(0x38)
+	EISDIR          = syscall.Errno(0x15)
+	EISNAM          = syscall.Errno(0x78)
+	EKEYEXPIRED     = syscall.Errno(0x81)
+	EKEYREJECTED    = syscall.Errno(0x83)
+	EKEYREVOKED     = syscall.Errno(0x82)
+	EL2HLT          = syscall.Errno(0x65)
+	EL2NSYNC        = syscall.Errno(0x5f)
+	EL3HLT          = syscall.Errno(0x60)
+	EL3RST          = syscall.Errno(0x61)
+	ELIBACC         = syscall.Errno(0x72)
+	ELIBBAD         = syscall.Errno(0x70)
+	ELIBEXEC        = syscall.Errno(0x6e)
+	ELIBMAX         = syscall.Errno(0x7b)
+	ELIBSCN         = syscall.Errno(0x7c)
+	ELNRNG          = syscall.Errno(0x62)
+	ELOOP           = syscall.Errno(0x3e)
+	EMEDIUMTYPE     = syscall.Errno(0x7e)
+	EMFILE          = syscall.Errno(0x18)
+	EMLINK          = syscall.Errno(0x1f)
+	EMSGSIZE        = syscall.Errno(0x28)
+	EMULTIHOP       = syscall.Errno(0x57)
+	ENAMETOOLONG    = syscall.Errno(0x3f)
+	ENAVAIL         = syscall.Errno(0x77)
+	ENETDOWN        = syscall.Errno(0x32)
+	ENETRESET       = syscall.Errno(0x34)
+	ENETUNREACH     = syscall.Errno(0x33)
+	ENFILE          = syscall.Errno(0x17)
+	ENOANO          = syscall.Errno(0x69)
+	ENOBUFS         = syscall.Errno(0x37)
+	ENOCSI          = syscall.Errno(0x64)
+	ENODATA         = syscall.Errno(0x6f)
+	ENODEV          = syscall.Errno(0x13)
+	ENOENT          = syscall.Errno(0x2)
+	ENOEXEC         = syscall.Errno(0x8)
+	ENOKEY          = syscall.Errno(0x80)
+	ENOLCK          = syscall.Errno(0x4f)
+	ENOLINK         = syscall.Errno(0x52)
+	ENOMEDIUM       = syscall.Errno(0x7d)
+	ENOMEM          = syscall.Errno(0xc)
+	ENOMSG          = syscall.Errno(0x4b)
+	ENONET          = syscall.Errno(0x50)
+	ENOPKG          = syscall.Errno(0x71)
+	ENOPROTOOPT     = syscall.Errno(0x2a)
+	ENOSPC          = syscall.Errno(0x1c)
+	ENOSR           = syscall.Errno(0x4a)
+	ENOSTR          = syscall.Errno(0x48)
+	ENOSYS          = syscall.Errno(0x5a)
+	ENOTBLK         = syscall.Errno(0xf)
+	ENOTCONN        = syscall.Errno(0x39)
+	ENOTDIR         = syscall.Errno(0x14)
+	ENOTEMPTY       = syscall.Errno(0x42)
+	ENOTNAM         = syscall.Errno(0x76)
+	ENOTRECOVERABLE = syscall.Errno(0x85)
+	ENOTSOCK        = syscall.Errno(0x26)
+	ENOTSUP         = syscall.Errno(0x2d)
+	ENOTTY          = syscall.Errno(0x19)
+	ENOTUNIQ        = syscall.Errno(0x73)
+	ENXIO           = syscall.Errno(0x6)
+	EOPNOTSUPP      = syscall.Errno(0x2d)
+	EOVERFLOW       = syscall.Errno(0x5c)
+	EOWNERDEAD      = syscall.Errno(0x84)
+	EPERM           = syscall.Errno(0x1)
+	EPFNOSUPPORT    = syscall.Errno(0x2e)
+	EPIPE           = syscall.Errno(0x20)
+	EPROCLIM        = syscall.Errno(0x43)
+	EPROTO          = syscall.Errno(0x56)
+	EPROTONOSUPPORT = syscall.Errno(0x2b)
+	EPROTOTYPE      = syscall.Errno(0x29)
+	ERANGE          = syscall.Errno(0x22)
+	EREMCHG         = syscall.Errno(0x59)
+	EREMOTE         = syscall.Errno(0x47)
+	EREMOTEIO       = syscall.Errno(0x79)
+	ERESTART        = syscall.Errno(0x74)
+	ERFKILL         = syscall.Errno(0x86)
+	EROFS           = syscall.Errno(0x1e)
+	ERREMOTE        = syscall.Errno(0x51)
+	ESHUTDOWN       = syscall.Errno(0x3a)
+	ESOCKTNOSUPPORT = syscall.Errno(0x2c)
+	ESPIPE          = syscall.Errno(0x1d)
+	ESRCH           = syscall.Errno(0x3)
+	ESRMNT          = syscall.Errno(0x54)
+	ESTALE          = syscall.Errno(0x46)
+	ESTRPIPE        = syscall.Errno(0x5b)
+	ETIME           = syscall.Errno(0x49)
+	ETIMEDOUT       = syscall.Errno(0x3c)
+	ETOOMANYREFS    = syscall.Errno(0x3b)
+	ETXTBSY         = syscall.Errno(0x1a)
+	EUCLEAN         = syscall.Errno(0x75)
+	EUNATCH         = syscall.Errno(0x63)
+	EUSERS          = syscall.Errno(0x44)
+	EWOULDBLOCK     = syscall.Errno(0xb)
+	EXDEV           = syscall.Errno(0x12)
+	EXFULL          = syscall.Errno(0x68)
+)
+
+// Signals
+const (
+	SIGABRT   = syscall.Signal(0x6)
+	SIGALRM   = syscall.Signal(0xe)
+	SIGBUS    = syscall.Signal(0xa)
+	SIGCHLD   = syscall.Signal(0x14)
+	SIGCLD    = syscall.Signal(0x14)
+	SIGCONT   = syscall.Signal(0x13)
+	SIGEMT    = syscall.Signal(0x7)
+	SIGFPE    = syscall.Signal(0x8)
+	SIGHUP    = syscall.Signal(0x1)
+	SIGILL    = syscall.Signal(0x4)
+	SIGINT    = syscall.Signal(0x2)
+	SIGIO     = syscall.Signal(0x17)
+	SIGIOT    = syscall.Signal(0x6)
+	SIGKILL   = syscall.Signal(0x9)
+	SIGLOST   = syscall.Signal(0x1d)
+	SIGPIPE   = syscall.Signal(0xd)
+	SIGPOLL   = syscall.Signal(0x17)
+	SIGPROF   = syscall.Signal(0x1b)
+	SIGPWR    = syscall.Signal(0x1d)
+	SIGQUIT   = syscall.Signal(0x3)
+	SIGSEGV   = syscall.Signal(0xb)
+	SIGSTOP   = syscall.Signal(0x11)
+	SIGSYS    = syscall.Signal(0xc)
+	SIGTERM   = syscall.Signal(0xf)
+	SIGTRAP   = syscall.Signal(0x5)
+	SIGTSTP   = syscall.Signal(0x12)
+	SIGTTIN   = syscall.Signal(0x15)
+	SIGTTOU   = syscall.Signal(0x16)
+	SIGURG    = syscall.Signal(0x10)
+	SIGUSR1   = syscall.Signal(0x1e)
+	SIGUSR2   = syscall.Signal(0x1f)
+	SIGVTALRM = syscall.Signal(0x1a)
+	SIGWINCH  = syscall.Signal(0x1c)
+	SIGXCPU   = syscall.Signal(0x18)
+	SIGXFSZ   = syscall.Signal(0x19)
+)
+
+// Error table
+var errors = [...]string{
+	1:   "operation not permitted",
+	2:   "no such file or directory",
+	3:   "no such process",
+	4:   "interrupted system call",
+	5:   "input/output error",
+	6:   "no such device or address",
+	7:   "argument list too long",
+	8:   "exec format error",
+	9:   "bad file descriptor",
+	10:  "no child processes",
+	11:  "resource temporarily unavailable",
+	12:  "cannot allocate memory",
+	13:  "permission denied",
+	14:  "bad address",
+	15:  "block device required",
+	16:  "device or resource busy",
+	17:  "file exists",
+	18:  "invalid cross-device link",
+	19:  "no such device",
+	20:  "not a directory",
+	21:  "is a directory",
+	22:  "invalid argument",
+	23:  "too many open files in system",
+	24:  "too many open files",
+	25:  "inappropriate ioctl for device",
+	26:  "text file busy",
+	27:  "file too large",
+	28:  "no space left on device",
+	29:  "illegal seek",
+	30:  "read-only file system",
+	31:  "too many links",
+	32:  "broken pipe",
+	33:  "numerical argument out of domain",
+	34:  "numerical result out of range",
+	36:  "operation now in progress",
+	37:  "operation already in progress",
+	38:  "socket operation on non-socket",
+	39:  "destination address required",
+	40:  "message too long",
+	41:  "protocol wrong type for socket",
+	42:  "protocol not available",
+	43:  "protocol not supported",
+	44:  "socket type not supported",
+	45:  "operation not supported",
+	46:  "protocol family not supported",
+	47:  "address family not supported by protocol",
+	48:  "address already in use",
+	49:  "cannot assign requested address",
+	50:  "network is down",
+	51:  "network is unreachable",
+	52:  "network dropped connection on reset",
+	53:  "software caused connection abort",
+	54:  "connection reset by peer",
+	55:  "no buffer space available",
+	56:  "transport endpoint is already connected",
+	57:  "transport endpoint is not connected",
+	58:  "cannot send after transport endpoint shutdown",
+	59:  "too many references: cannot splice",
+	60:  "connection timed out",
+	61:  "connection refused",
+	62:  "too many levels of symbolic links",
+	63:  "file name too long",
+	64:  "host is down",
+	65:  "no route to host",
+	66:  "directory not empty",
+	67:  "too many processes",
+	68:  "too many users",
+	69:  "disk quota exceeded",
+	70:  "stale file handle",
+	71:  "object is remote",
+	72:  "device not a stream",
+	73:  "timer expired",
+	74:  "out of streams resources",
+	75:  "no message of desired type",
+	76:  "bad message",
+	77:  "identifier removed",
+	78:  "resource deadlock avoided",
+	79:  "no locks available",
+	80:  "machine is not on the network",
+	81:  "unknown error 81",
+	82:  "link has been severed",
+	83:  "advertise error",
+	84:  "srmount error",
+	85:  "communication error on send",
+	86:  "protocol error",
+	87:  "multihop attempted",
+	88:  "RFS specific error",
+	89:  "remote address changed",
+	90:  "function not implemented",
+	91:  "streams pipe error",
+	92:  "value too large for defined data type",
+	93:  "file descriptor in bad state",
+	94:  "channel number out of range",
+	95:  "level 2 not synchronized",
+	96:  "level 3 halted",
+	97:  "level 3 reset",
+	98:  "link number out of range",
+	99:  "protocol driver not attached",
+	100: "no CSI structure available",
+	101: "level 2 halted",
+	102: "invalid exchange",
+	103: "invalid request descriptor",
+	104: "exchange full",
+	105: "no anode",
+	106: "invalid request code",
+	107: "invalid slot",
+	108: "file locking deadlock error",
+	109: "bad font file format",
+	110: "cannot exec a shared library directly",
+	111: "no data available",
+	112: "accessing a corrupted shared library",
+	113: "package not installed",
+	114: "can not access a needed shared library",
+	115: "name not unique on network",
+	116: "interrupted system call should be restarted",
+	117: "structure needs cleaning",
+	118: "not a XENIX named type file",
+	119: "no XENIX semaphores available",
+	120: "is a named type file",
+	121: "remote I/O error",
+	122: "invalid or incomplete multibyte or wide character",
+	123: "attempting to link in too many shared libraries",
+	124: ".lib section in a.out corrupted",
+	125: "no medium found",
+	126: "wrong medium type",
+	127: "operation canceled",
+	128: "required key not available",
+	129: "key has expired",
+	130: "key has been revoked",
+	131: "key was rejected by service",
+	132: "owner died",
+	133: "state not recoverable",
+	134: "operation not possible due to RF-kill",
+	135: "memory page has hardware error",
+}
+
+// Signal table
+var signals = [...]string{
+	1:  "hangup",
+	2:  "interrupt",
+	3:  "quit",
+	4:  "illegal instruction",
+	5:  "trace/breakpoint trap",
+	6:  "aborted",
+	7:  "EMT trap",
+	8:  "floating point exception",
+	9:  "killed",
+	10: "bus error",
+	11: "segmentation fault",
+	12: "bad system call",
+	13: "broken pipe",
+	14: "alarm clock",
+	15: "terminated",
+	16: "urgent I/O condition",
+	17: "stopped (signal)",
+	18: "stopped",
+	19: "continued",
+	20: "child exited",
+	21: "stopped (tty input)",
+	22: "stopped (tty output)",
+	23: "I/O possible",
+	24: "CPU time limit exceeded",
+	25: "file size limit exceeded",
+	26: "virtual timer expired",
+	27: "profiling timer expired",
+	28: "window changed",
+	29: "resource lost",
+	30: "user defined signal 1",
+	31: "user defined signal 2",
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
index 1f7a7566950767963939bb26db5fe3eebc60c31f..80f6a1b0ad8bc7f737fc6304efcb10d2ffcfb074 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
@@ -64,7 +64,7 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int,
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
@@ -87,7 +87,7 @@ func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
 	if err != nil {
@@ -109,7 +109,7 @@ func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func unlinkat(dirfd int, path string, flags int) (err error) {
+func Unlinkat(dirfd int, path string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
index b4e24fc0a0fbc422b054407fbfcfaf5ade058e83..078c8f05af870c3996524e0babd52f776372e7dd 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
@@ -64,7 +64,7 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int,
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
@@ -87,7 +87,7 @@ func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
 	if err != nil {
@@ -109,7 +109,7 @@ func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func unlinkat(dirfd int, path string, flags int) (err error) {
+func Unlinkat(dirfd int, path string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
index 20bf33ce5ff3fe042a54684e537c0157a68e980a..76e5f7c0bbc9c12e6bcc25a86c275b5185216722 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
@@ -64,7 +64,7 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int,
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
@@ -87,7 +87,7 @@ func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
 	if err != nil {
@@ -109,7 +109,7 @@ func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func unlinkat(dirfd int, path string, flags int) (err error) {
+func Unlinkat(dirfd int, path string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
index c7286db4851de92b7497a9978045861a7adf626d..72b79470a25fccef2af14069afa4adc86ac093f9 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
@@ -64,7 +64,7 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int,
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
@@ -87,7 +87,7 @@ func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
 	if err != nil {
@@ -109,7 +109,7 @@ func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func unlinkat(dirfd int, path string, flags int) (err error) {
+func Unlinkat(dirfd int, path string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
index b709ed2f532bffb7ee33f646d5b13af05ba1179d..ba55509ea0e825e43ad80857ca695b05d20eadc6 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
@@ -64,7 +64,7 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int,
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
@@ -87,7 +87,7 @@ func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
 	if err != nil {
@@ -109,7 +109,7 @@ func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func unlinkat(dirfd int, path string, flags int) (err error) {
+func Unlinkat(dirfd int, path string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
index 5cb1c56715622790ee90243cd313b86612b35ea4..2b1cc8473b640f4fdd947c978c87eb0d217468f9 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
@@ -64,7 +64,7 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int,
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
@@ -87,7 +87,7 @@ func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
 	if err != nil {
@@ -109,7 +109,7 @@ func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func unlinkat(dirfd int, path string, flags int) (err error) {
+func Unlinkat(dirfd int, path string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
index 873bb18f7850684d15c511a0151add3ff59615cd..25f39db9df0f55dd12ab62abe829dfdfa6256976 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
@@ -64,7 +64,7 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int,
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
@@ -87,7 +87,7 @@ func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
 	if err != nil {
@@ -109,7 +109,7 @@ func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func unlinkat(dirfd int, path string, flags int) (err error) {
+func Unlinkat(dirfd int, path string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
index bf08835c5c8b6853c753f7c33a876f4d8b7863d3..70702b516668ad6542c21e481d25559cf93ef557 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
@@ -64,7 +64,7 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int,
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
@@ -87,7 +87,7 @@ func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
 	if err != nil {
@@ -109,7 +109,7 @@ func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func unlinkat(dirfd int, path string, flags int) (err error) {
+func Unlinkat(dirfd int, path string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
index dbaa53b9843d8403ca73c452915c7a35895a3a52..94b93d3d02cdaee340ae2df64230735e3343f7a7 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
@@ -64,7 +64,7 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int,
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
@@ -87,7 +87,7 @@ func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
 	if err != nil {
@@ -109,7 +109,7 @@ func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func unlinkat(dirfd int, path string, flags int) (err error) {
+func Unlinkat(dirfd int, path string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
new file mode 100644
index 0000000000000000000000000000000000000000..774b10ed8ff3a67099897dced26f1342e20b137d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
@@ -0,0 +1,1834 @@
+// mksyscall.pl syscall_linux.go syscall_linux_sparc64.go
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+// +build sparc64,linux
+
+package unix
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var _ syscall.Errno
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(oldpath)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(newpath)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0)
+	use(unsafe.Pointer(_p0))
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+	r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 unsafe.Pointer
+	if len(buf) > 0 {
+		_p1 = unsafe.Pointer(&buf[0])
+	} else {
+		_p1 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(oldpath)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(newpath)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unlinkat(dirfd int, path string, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimes(path string, times *[2]Timeval) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) {
+	_, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getcwd(buf []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
+	r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
+	wpid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
+	_, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(arg)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(source)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(target)
+	if err != nil {
+		return
+	}
+	var _p2 *byte
+	_p2, err = BytePtrFromString(fstype)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	use(unsafe.Pointer(_p2))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Acct(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Adjtimex(buf *Timex) (state int, err error) {
+	r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0)
+	state = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chdir(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chroot(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+	_, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+	_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(oldfd int) (fd int, err error) {
+	r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup3(oldfd int, newfd int, flags int) (err error) {
+	_, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCreate(size int) (fd int, err error) {
+	r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCreate1(flag int) (fd int, err error) {
+	r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
+	_, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Exit(code int) {
+	Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fallocate(fd int, mode uint32, off int64, len int64) (err error) {
+	_, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchdir(fd int) (err error) {
+	_, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmod(fd int, mode uint32) (err error) {
+	_, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntl(fd int, cmd int, arg int) (val int, err error) {
+	r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fdatasync(fd int) (err error) {
+	_, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Flock(fd int, how int) (err error) {
+	_, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fsync(fd int) (err error) {
+	_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getdents(fd int, buf []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpgid(pid int) (pgid int, err error) {
+	r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
+	pgid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpid() (pid int) {
+	r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
+	pid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getppid() (ppid int) {
+	r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
+	ppid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpriority(which int, who int) (prio int, err error) {
+	r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
+	prio = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrusage(who int, rusage *Rusage) (err error) {
+	_, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettid() (tid int) {
+	r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
+	tid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getxattr(path string, attr string, dest []byte) (sz int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(attr)
+	if err != nil {
+		return
+	}
+	var _p2 unsafe.Pointer
+	if len(dest) > 0 {
+		_p2 = unsafe.Pointer(&dest[0])
+	} else {
+		_p2 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	sz = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(pathname)
+	if err != nil {
+		return
+	}
+	r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask))
+	use(unsafe.Pointer(_p0))
+	watchdesc = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyInit1(flags int) (fd int, err error) {
+	r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) {
+	r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0)
+	success = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Kill(pid int, sig syscall.Signal) (err error) {
+	_, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Klogctl(typ int, buf []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listxattr(path string, dest []byte) (sz int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 unsafe.Pointer
+	if len(dest) > 0 {
+		_p1 = unsafe.Pointer(&dest[0])
+	} else {
+		_p1 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
+	use(unsafe.Pointer(_p0))
+	sz = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkdirat(dirfd int, path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
+	_, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func PivotRoot(newroot string, putold string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(newroot)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(putold)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
+	_, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) {
+	_, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func read(fd int, p []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Removexattr(path string, attr string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(attr)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(oldpath)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(newpath)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setdomainname(p []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sethostname(p []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpgid(pid int, pgid int) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setsid() (pid int, err error) {
+	r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
+	pid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Settimeofday(tv *Timeval) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setns(fd int, nstype int) (err error) {
+	_, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpriority(which int, who int, prio int) (err error) {
+	_, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setxattr(path string, attr string, data []byte, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(attr)
+	if err != nil {
+		return
+	}
+	var _p2 unsafe.Pointer
+	if len(data) > 0 {
+		_p2 = unsafe.Pointer(&data[0])
+	} else {
+		_p2 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sync() {
+	Syscall(SYS_SYNC, 0, 0, 0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sysinfo(info *Sysinfo_t) (err error) {
+	_, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
+	r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0)
+	n = int64(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
+	_, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Times(tms *Tms) (ticks uintptr, err error) {
+	r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0)
+	ticks = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Umask(mask int) (oldmask int) {
+	r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0)
+	oldmask = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Uname(buf *Utsname) (err error) {
+	_, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unmount(target string, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(target)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unshare(flags int) (err error) {
+	_, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ustat(dev int, ubuf *Ustat_t) (err error) {
+	_, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func write(fd int, p []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func exitThread(code int) (err error) {
+	_, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readlen(fd int, p *byte, np int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writelen(fd int, p *byte, np int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func munmap(addr uintptr, length uintptr) (err error) {
+	_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Madvise(b []byte, advice int) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mprotect(b []byte, prot int) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlock(b []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlock(b []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlockall(flags int) (err error) {
+	_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlockall() (err error) {
+	_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(events) > 0 {
+		_p0 = unsafe.Pointer(&events[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup2(oldfd int, newfd int) (err error) {
+	_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchown(fd int, uid int, gid int) (err error) {
+	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, stat *Stat_t) (err error) {
+	_, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstatfs(fd int, buf *Statfs_t) (err error) {
+	_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ftruncate(fd int, length int64) (err error) {
+	_, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getegid() (egid int) {
+	r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
+	egid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Geteuid() (euid int) {
+	r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
+	euid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getgid() (gid int) {
+	r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
+	gid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrlimit(resource int, rlim *Rlimit) (err error) {
+	_, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getuid() (uid int) {
+	r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
+	uid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyInit() (fd int, err error) {
+	r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lchown(path string, uid int, gid int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listen(s int, n int) (err error) {
+	_, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lstat(path string, stat *Stat_t) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pause() (err error) {
+	_, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seek(fd int, offset int64, whence int) (off int64, err error) {
+	r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
+	off = int64(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
+	r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+	r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
+	written = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setfsgid(gid int) (err error) {
+	_, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setfsuid(uid int) (err error) {
+	_, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setregid(rgid int, egid int) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setresgid(rgid int, egid int, sgid int) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setresuid(ruid int, euid int, suid int) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setrlimit(resource int, rlim *Rlimit) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setreuid(ruid int, euid int) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Shutdown(fd int, how int) (err error) {
+	_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) {
+	r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
+	n = int64(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Stat(path string, stat *Stat_t) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Statfs(path string, buf *Statfs_t) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func SyncFileRange(fd int, off int64, n int64, flags int) (err error) {
+	_, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Truncate(path string, length int64) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
+	r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
+	r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+	_, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+	_, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getgroups(n int, list *_Gid_t) (nn int, err error) {
+	r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
+	nn = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setgroups(n int, list *_Gid_t) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
+	_, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
+	_, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socket(domain int, typ int, proto int) (fd int, err error) {
+	r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
+	_, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+	_, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+	_, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
+	r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset))
+	xaddr = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettimeofday(tv *Timeval) (err error) {
+	_, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Utime(path string, buf *Utimbuf) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe(p *[2]_C_int) (err error) {
+	_, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe2(p *[2]_C_int, flags int) (err error) {
+	_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
new file mode 100644
index 0000000000000000000000000000000000000000..46b5bee1db04e5ba4049092f17cefc235fb067c2
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
@@ -0,0 +1,348 @@
+// mksysnum_linux.pl /usr/include/sparc64-linux-gnu/asm/unistd.h
+// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
+
+// +build sparc64,linux
+
+package unix
+
+const (
+	SYS_RESTART_SYSCALL        = 0
+	SYS_EXIT                   = 1
+	SYS_FORK                   = 2
+	SYS_READ                   = 3
+	SYS_WRITE                  = 4
+	SYS_OPEN                   = 5
+	SYS_CLOSE                  = 6
+	SYS_WAIT4                  = 7
+	SYS_CREAT                  = 8
+	SYS_LINK                   = 9
+	SYS_UNLINK                 = 10
+	SYS_EXECV                  = 11
+	SYS_CHDIR                  = 12
+	SYS_CHOWN                  = 13
+	SYS_MKNOD                  = 14
+	SYS_CHMOD                  = 15
+	SYS_LCHOWN                 = 16
+	SYS_BRK                    = 17
+	SYS_PERFCTR                = 18
+	SYS_LSEEK                  = 19
+	SYS_GETPID                 = 20
+	SYS_CAPGET                 = 21
+	SYS_CAPSET                 = 22
+	SYS_SETUID                 = 23
+	SYS_GETUID                 = 24
+	SYS_VMSPLICE               = 25
+	SYS_PTRACE                 = 26
+	SYS_ALARM                  = 27
+	SYS_SIGALTSTACK            = 28
+	SYS_PAUSE                  = 29
+	SYS_UTIME                  = 30
+	SYS_ACCESS                 = 33
+	SYS_NICE                   = 34
+	SYS_SYNC                   = 36
+	SYS_KILL                   = 37
+	SYS_STAT                   = 38
+	SYS_SENDFILE               = 39
+	SYS_LSTAT                  = 40
+	SYS_DUP                    = 41
+	SYS_PIPE                   = 42
+	SYS_TIMES                  = 43
+	SYS_UMOUNT2                = 45
+	SYS_SETGID                 = 46
+	SYS_GETGID                 = 47
+	SYS_SIGNAL                 = 48
+	SYS_GETEUID                = 49
+	SYS_GETEGID                = 50
+	SYS_ACCT                   = 51
+	SYS_MEMORY_ORDERING        = 52
+	SYS_IOCTL                  = 54
+	SYS_REBOOT                 = 55
+	SYS_SYMLINK                = 57
+	SYS_READLINK               = 58
+	SYS_EXECVE                 = 59
+	SYS_UMASK                  = 60
+	SYS_CHROOT                 = 61
+	SYS_FSTAT                  = 62
+	SYS_FSTAT64                = 63
+	SYS_GETPAGESIZE            = 64
+	SYS_MSYNC                  = 65
+	SYS_VFORK                  = 66
+	SYS_PREAD64                = 67
+	SYS_PWRITE64               = 68
+	SYS_MMAP                   = 71
+	SYS_MUNMAP                 = 73
+	SYS_MPROTECT               = 74
+	SYS_MADVISE                = 75
+	SYS_VHANGUP                = 76
+	SYS_MINCORE                = 78
+	SYS_GETGROUPS              = 79
+	SYS_SETGROUPS              = 80
+	SYS_GETPGRP                = 81
+	SYS_SETITIMER              = 83
+	SYS_SWAPON                 = 85
+	SYS_GETITIMER              = 86
+	SYS_SETHOSTNAME            = 88
+	SYS_DUP2                   = 90
+	SYS_FCNTL                  = 92
+	SYS_SELECT                 = 93
+	SYS_FSYNC                  = 95
+	SYS_SETPRIORITY            = 96
+	SYS_SOCKET                 = 97
+	SYS_CONNECT                = 98
+	SYS_ACCEPT                 = 99
+	SYS_GETPRIORITY            = 100
+	SYS_RT_SIGRETURN           = 101
+	SYS_RT_SIGACTION           = 102
+	SYS_RT_SIGPROCMASK         = 103
+	SYS_RT_SIGPENDING          = 104
+	SYS_RT_SIGTIMEDWAIT        = 105
+	SYS_RT_SIGQUEUEINFO        = 106
+	SYS_RT_SIGSUSPEND          = 107
+	SYS_SETRESUID              = 108
+	SYS_GETRESUID              = 109
+	SYS_SETRESGID              = 110
+	SYS_GETRESGID              = 111
+	SYS_RECVMSG                = 113
+	SYS_SENDMSG                = 114
+	SYS_GETTIMEOFDAY           = 116
+	SYS_GETRUSAGE              = 117
+	SYS_GETSOCKOPT             = 118
+	SYS_GETCWD                 = 119
+	SYS_READV                  = 120
+	SYS_WRITEV                 = 121
+	SYS_SETTIMEOFDAY           = 122
+	SYS_FCHOWN                 = 123
+	SYS_FCHMOD                 = 124
+	SYS_RECVFROM               = 125
+	SYS_SETREUID               = 126
+	SYS_SETREGID               = 127
+	SYS_RENAME                 = 128
+	SYS_TRUNCATE               = 129
+	SYS_FTRUNCATE              = 130
+	SYS_FLOCK                  = 131
+	SYS_LSTAT64                = 132
+	SYS_SENDTO                 = 133
+	SYS_SHUTDOWN               = 134
+	SYS_SOCKETPAIR             = 135
+	SYS_MKDIR                  = 136
+	SYS_RMDIR                  = 137
+	SYS_UTIMES                 = 138
+	SYS_STAT64                 = 139
+	SYS_SENDFILE64             = 140
+	SYS_GETPEERNAME            = 141
+	SYS_FUTEX                  = 142
+	SYS_GETTID                 = 143
+	SYS_GETRLIMIT              = 144
+	SYS_SETRLIMIT              = 145
+	SYS_PIVOT_ROOT             = 146
+	SYS_PRCTL                  = 147
+	SYS_PCICONFIG_READ         = 148
+	SYS_PCICONFIG_WRITE        = 149
+	SYS_GETSOCKNAME            = 150
+	SYS_INOTIFY_INIT           = 151
+	SYS_INOTIFY_ADD_WATCH      = 152
+	SYS_POLL                   = 153
+	SYS_GETDENTS64             = 154
+	SYS_INOTIFY_RM_WATCH       = 156
+	SYS_STATFS                 = 157
+	SYS_FSTATFS                = 158
+	SYS_UMOUNT                 = 159
+	SYS_SCHED_SET_AFFINITY     = 160
+	SYS_SCHED_GET_AFFINITY     = 161
+	SYS_GETDOMAINNAME          = 162
+	SYS_SETDOMAINNAME          = 163
+	SYS_UTRAP_INSTALL          = 164
+	SYS_QUOTACTL               = 165
+	SYS_SET_TID_ADDRESS        = 166
+	SYS_MOUNT                  = 167
+	SYS_USTAT                  = 168
+	SYS_SETXATTR               = 169
+	SYS_LSETXATTR              = 170
+	SYS_FSETXATTR              = 171
+	SYS_GETXATTR               = 172
+	SYS_LGETXATTR              = 173
+	SYS_GETDENTS               = 174
+	SYS_SETSID                 = 175
+	SYS_FCHDIR                 = 176
+	SYS_FGETXATTR              = 177
+	SYS_LISTXATTR              = 178
+	SYS_LLISTXATTR             = 179
+	SYS_FLISTXATTR             = 180
+	SYS_REMOVEXATTR            = 181
+	SYS_LREMOVEXATTR           = 182
+	SYS_SIGPENDING             = 183
+	SYS_QUERY_MODULE           = 184
+	SYS_SETPGID                = 185
+	SYS_FREMOVEXATTR           = 186
+	SYS_TKILL                  = 187
+	SYS_EXIT_GROUP             = 188
+	SYS_UNAME                  = 189
+	SYS_INIT_MODULE            = 190
+	SYS_PERSONALITY            = 191
+	SYS_REMAP_FILE_PAGES       = 192
+	SYS_EPOLL_CREATE           = 193
+	SYS_EPOLL_CTL              = 194
+	SYS_EPOLL_WAIT             = 195
+	SYS_IOPRIO_SET             = 196
+	SYS_GETPPID                = 197
+	SYS_SIGACTION              = 198
+	SYS_SGETMASK               = 199
+	SYS_SSETMASK               = 200
+	SYS_SIGSUSPEND             = 201
+	SYS_OLDLSTAT               = 202
+	SYS_USELIB                 = 203
+	SYS_READDIR                = 204
+	SYS_READAHEAD              = 205
+	SYS_SOCKETCALL             = 206
+	SYS_SYSLOG                 = 207
+	SYS_LOOKUP_DCOOKIE         = 208
+	SYS_FADVISE64              = 209
+	SYS_FADVISE64_64           = 210
+	SYS_TGKILL                 = 211
+	SYS_WAITPID                = 212
+	SYS_SWAPOFF                = 213
+	SYS_SYSINFO                = 214
+	SYS_IPC                    = 215
+	SYS_SIGRETURN              = 216
+	SYS_CLONE                  = 217
+	SYS_IOPRIO_GET             = 218
+	SYS_ADJTIMEX               = 219
+	SYS_SIGPROCMASK            = 220
+	SYS_CREATE_MODULE          = 221
+	SYS_DELETE_MODULE          = 222
+	SYS_GET_KERNEL_SYMS        = 223
+	SYS_GETPGID                = 224
+	SYS_BDFLUSH                = 225
+	SYS_SYSFS                  = 226
+	SYS_AFS_SYSCALL            = 227
+	SYS_SETFSUID               = 228
+	SYS_SETFSGID               = 229
+	SYS__NEWSELECT             = 230
+	SYS_SPLICE                 = 232
+	SYS_STIME                  = 233
+	SYS_STATFS64               = 234
+	SYS_FSTATFS64              = 235
+	SYS__LLSEEK                = 236
+	SYS_MLOCK                  = 237
+	SYS_MUNLOCK                = 238
+	SYS_MLOCKALL               = 239
+	SYS_MUNLOCKALL             = 240
+	SYS_SCHED_SETPARAM         = 241
+	SYS_SCHED_GETPARAM         = 242
+	SYS_SCHED_SETSCHEDULER     = 243
+	SYS_SCHED_GETSCHEDULER     = 244
+	SYS_SCHED_YIELD            = 245
+	SYS_SCHED_GET_PRIORITY_MAX = 246
+	SYS_SCHED_GET_PRIORITY_MIN = 247
+	SYS_SCHED_RR_GET_INTERVAL  = 248
+	SYS_NANOSLEEP              = 249
+	SYS_MREMAP                 = 250
+	SYS__SYSCTL                = 251
+	SYS_GETSID                 = 252
+	SYS_FDATASYNC              = 253
+	SYS_NFSSERVCTL             = 254
+	SYS_SYNC_FILE_RANGE        = 255
+	SYS_CLOCK_SETTIME          = 256
+	SYS_CLOCK_GETTIME          = 257
+	SYS_CLOCK_GETRES           = 258
+	SYS_CLOCK_NANOSLEEP        = 259
+	SYS_SCHED_GETAFFINITY      = 260
+	SYS_SCHED_SETAFFINITY      = 261
+	SYS_TIMER_SETTIME          = 262
+	SYS_TIMER_GETTIME          = 263
+	SYS_TIMER_GETOVERRUN       = 264
+	SYS_TIMER_DELETE           = 265
+	SYS_TIMER_CREATE           = 266
+	SYS_IO_SETUP               = 268
+	SYS_IO_DESTROY             = 269
+	SYS_IO_SUBMIT              = 270
+	SYS_IO_CANCEL              = 271
+	SYS_IO_GETEVENTS           = 272
+	SYS_MQ_OPEN                = 273
+	SYS_MQ_UNLINK              = 274
+	SYS_MQ_TIMEDSEND           = 275
+	SYS_MQ_TIMEDRECEIVE        = 276
+	SYS_MQ_NOTIFY              = 277
+	SYS_MQ_GETSETATTR          = 278
+	SYS_WAITID                 = 279
+	SYS_TEE                    = 280
+	SYS_ADD_KEY                = 281
+	SYS_REQUEST_KEY            = 282
+	SYS_KEYCTL                 = 283
+	SYS_OPENAT                 = 284
+	SYS_MKDIRAT                = 285
+	SYS_MKNODAT                = 286
+	SYS_FCHOWNAT               = 287
+	SYS_FUTIMESAT              = 288
+	SYS_FSTATAT64              = 289
+	SYS_UNLINKAT               = 290
+	SYS_RENAMEAT               = 291
+	SYS_LINKAT                 = 292
+	SYS_SYMLINKAT              = 293
+	SYS_READLINKAT             = 294
+	SYS_FCHMODAT               = 295
+	SYS_FACCESSAT              = 296
+	SYS_PSELECT6               = 297
+	SYS_PPOLL                  = 298
+	SYS_UNSHARE                = 299
+	SYS_SET_ROBUST_LIST        = 300
+	SYS_GET_ROBUST_LIST        = 301
+	SYS_MIGRATE_PAGES          = 302
+	SYS_MBIND                  = 303
+	SYS_GET_MEMPOLICY          = 304
+	SYS_SET_MEMPOLICY          = 305
+	SYS_KEXEC_LOAD             = 306
+	SYS_MOVE_PAGES             = 307
+	SYS_GETCPU                 = 308
+	SYS_EPOLL_PWAIT            = 309
+	SYS_UTIMENSAT              = 310
+	SYS_SIGNALFD               = 311
+	SYS_TIMERFD_CREATE         = 312
+	SYS_EVENTFD                = 313
+	SYS_FALLOCATE              = 314
+	SYS_TIMERFD_SETTIME        = 315
+	SYS_TIMERFD_GETTIME        = 316
+	SYS_SIGNALFD4              = 317
+	SYS_EVENTFD2               = 318
+	SYS_EPOLL_CREATE1          = 319
+	SYS_DUP3                   = 320
+	SYS_PIPE2                  = 321
+	SYS_INOTIFY_INIT1          = 322
+	SYS_ACCEPT4                = 323
+	SYS_PREADV                 = 324
+	SYS_PWRITEV                = 325
+	SYS_RT_TGSIGQUEUEINFO      = 326
+	SYS_PERF_EVENT_OPEN        = 327
+	SYS_RECVMMSG               = 328
+	SYS_FANOTIFY_INIT          = 329
+	SYS_FANOTIFY_MARK          = 330
+	SYS_PRLIMIT64              = 331
+	SYS_NAME_TO_HANDLE_AT      = 332
+	SYS_OPEN_BY_HANDLE_AT      = 333
+	SYS_CLOCK_ADJTIME          = 334
+	SYS_SYNCFS                 = 335
+	SYS_SENDMMSG               = 336
+	SYS_SETNS                  = 337
+	SYS_PROCESS_VM_READV       = 338
+	SYS_PROCESS_VM_WRITEV      = 339
+	SYS_KERN_FEATURES          = 340
+	SYS_KCMP                   = 341
+	SYS_FINIT_MODULE           = 342
+	SYS_SCHED_SETATTR          = 343
+	SYS_SCHED_GETATTR          = 344
+	SYS_RENAMEAT2              = 345
+	SYS_SECCOMP                = 346
+	SYS_GETRANDOM              = 347
+	SYS_MEMFD_CREATE           = 348
+	SYS_BPF                    = 349
+	SYS_EXECVEAT               = 350
+	SYS_MEMBARRIER             = 351
+	SYS_USERFAULTFD            = 352
+	SYS_BIND                   = 353
+	SYS_LISTEN                 = 354
+	SYS_SETSOCKOPT             = 355
+	SYS_MLOCK2                 = 356
+	SYS_COPY_FILE_RANGE        = 357
+	SYS_PREADV2                = 358
+	SYS_PWRITEV2               = 359
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index 817ac9c29a02f91e2a5c43ef2af8ef5b738b0dc2..35f11bd1bcbb3da688005969c8879af91132c275 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -1,6 +1,6 @@
 // +build arm,linux
 // Created by cgo -godefs - DO NOT EDIT
-// cgo -godefs types_linux.go
+// cgo -godefs types_linux.go | go run mkpost.go
 
 package unix
 
@@ -155,6 +155,15 @@ type Flock_t struct {
 	Pad_cgo_1 [4]byte
 }
 
+const (
+	FADV_NORMAL     = 0x0
+	FADV_RANDOM     = 0x1
+	FADV_SEQUENTIAL = 0x2
+	FADV_WILLNEED   = 0x3
+	FADV_DONTNEED   = 0x4
+	FADV_NOREUSE    = 0x5
+)
+
 type RawSockaddrInet4 struct {
 	Family uint16
 	Port   uint16
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
new file mode 100644
index 0000000000000000000000000000000000000000..7d18b704afa65bbb19529fa1c2a244cc464a112b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
@@ -0,0 +1,640 @@
+// +build sparc64,linux
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_linux.go | go run mkpost.go
+
+package unix
+
+const (
+	sizeofPtr      = 0x8
+	sizeofShort    = 0x2
+	sizeofInt      = 0x4
+	sizeofLong     = 0x8
+	sizeofLongLong = 0x8
+	PathMax        = 0x1000
+)
+
+type (
+	_C_short     int16
+	_C_int       int32
+	_C_long      int64
+	_C_long_long int64
+)
+
+type Timespec struct {
+	Sec  int64
+	Nsec int64
+}
+
+type Timeval struct {
+	Sec       int64
+	Usec      int32
+	Pad_cgo_0 [4]byte
+}
+
+type Timex struct {
+	Modes     uint32
+	Pad_cgo_0 [4]byte
+	Offset    int64
+	Freq      int64
+	Maxerror  int64
+	Esterror  int64
+	Status    int32
+	Pad_cgo_1 [4]byte
+	Constant  int64
+	Precision int64
+	Tolerance int64
+	Time      Timeval
+	Tick      int64
+	Ppsfreq   int64
+	Jitter    int64
+	Shift     int32
+	Pad_cgo_2 [4]byte
+	Stabil    int64
+	Jitcnt    int64
+	Calcnt    int64
+	Errcnt    int64
+	Stbcnt    int64
+	Tai       int32
+	Pad_cgo_3 [44]byte
+}
+
+type Time_t int64
+
+type Tms struct {
+	Utime  int64
+	Stime  int64
+	Cutime int64
+	Cstime int64
+}
+
+type Utimbuf struct {
+	Actime  int64
+	Modtime int64
+}
+
+type Rusage struct {
+	Utime    Timeval
+	Stime    Timeval
+	Maxrss   int64
+	Ixrss    int64
+	Idrss    int64
+	Isrss    int64
+	Minflt   int64
+	Majflt   int64
+	Nswap    int64
+	Inblock  int64
+	Oublock  int64
+	Msgsnd   int64
+	Msgrcv   int64
+	Nsignals int64
+	Nvcsw    int64
+	Nivcsw   int64
+}
+
+type Rlimit struct {
+	Cur uint64
+	Max uint64
+}
+
+type _Gid_t uint32
+
+type Stat_t struct {
+	Dev                uint64
+	X__pad1            uint16
+	Pad_cgo_0          [6]byte
+	Ino                uint64
+	Mode               uint32
+	Nlink              uint32
+	Uid                uint32
+	Gid                uint32
+	Rdev               uint64
+	X__pad2            uint16
+	Pad_cgo_1          [6]byte
+	Size               int64
+	Blksize            int64
+	Blocks             int64
+	Atim               Timespec
+	Mtim               Timespec
+	Ctim               Timespec
+	X__glibc_reserved4 uint64
+	X__glibc_reserved5 uint64
+}
+
+type Statfs_t struct {
+	Type    int64
+	Bsize   int64
+	Blocks  uint64
+	Bfree   uint64
+	Bavail  uint64
+	Files   uint64
+	Ffree   uint64
+	Fsid    Fsid
+	Namelen int64
+	Frsize  int64
+	Flags   int64
+	Spare   [4]int64
+}
+
+type Dirent struct {
+	Ino       uint64
+	Off       int64
+	Reclen    uint16
+	Type      uint8
+	Name      [256]int8
+	Pad_cgo_0 [5]byte
+}
+
+type Fsid struct {
+	X__val [2]int32
+}
+
+type Flock_t struct {
+	Type              int16
+	Whence            int16
+	Pad_cgo_0         [4]byte
+	Start             int64
+	Len               int64
+	Pid               int32
+	X__glibc_reserved int16
+	Pad_cgo_1         [2]byte
+}
+
+const (
+	FADV_NORMAL     = 0x0
+	FADV_RANDOM     = 0x1
+	FADV_SEQUENTIAL = 0x2
+	FADV_WILLNEED   = 0x3
+	FADV_DONTNEED   = 0x4
+	FADV_NOREUSE    = 0x5
+)
+
+type RawSockaddrInet4 struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	Zero   [8]uint8
+}
+
+type RawSockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type RawSockaddrUnix struct {
+	Family uint16
+	Path   [108]int8
+}
+
+type RawSockaddrLinklayer struct {
+	Family   uint16
+	Protocol uint16
+	Ifindex  int32
+	Hatype   uint16
+	Pkttype  uint8
+	Halen    uint8
+	Addr     [8]uint8
+}
+
+type RawSockaddrNetlink struct {
+	Family uint16
+	Pad    uint16
+	Pid    uint32
+	Groups uint32
+}
+
+type RawSockaddrHCI struct {
+	Family  uint16
+	Dev     uint16
+	Channel uint16
+}
+
+type RawSockaddr struct {
+	Family uint16
+	Data   [14]int8
+}
+
+type RawSockaddrAny struct {
+	Addr RawSockaddr
+	Pad  [96]int8
+}
+
+type _Socklen uint32
+
+type Linger struct {
+	Onoff  int32
+	Linger int32
+}
+
+type Iovec struct {
+	Base *byte
+	Len  uint64
+}
+
+type IPMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type IPMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type IPv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Interface uint32
+}
+
+type Msghdr struct {
+	Name       *byte
+	Namelen    uint32
+	Pad_cgo_0  [4]byte
+	Iov        *Iovec
+	Iovlen     uint64
+	Control    *byte
+	Controllen uint64
+	Flags      int32
+	Pad_cgo_1  [4]byte
+}
+
+type Cmsghdr struct {
+	Len   uint64
+	Level int32
+	Type  int32
+}
+
+type Inet4Pktinfo struct {
+	Ifindex  int32
+	Spec_dst [4]byte /* in_addr */
+	Addr     [4]byte /* in_addr */
+}
+
+type Inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex uint32
+}
+
+type IPv6MTUInfo struct {
+	Addr RawSockaddrInet6
+	Mtu  uint32
+}
+
+type ICMPv6Filter struct {
+	Data [8]uint32
+}
+
+type Ucred struct {
+	Pid int32
+	Uid uint32
+	Gid uint32
+}
+
+type TCPInfo struct {
+	State          uint8
+	Ca_state       uint8
+	Retransmits    uint8
+	Probes         uint8
+	Backoff        uint8
+	Options        uint8
+	Pad_cgo_0      [2]byte
+	Rto            uint32
+	Ato            uint32
+	Snd_mss        uint32
+	Rcv_mss        uint32
+	Unacked        uint32
+	Sacked         uint32
+	Lost           uint32
+	Retrans        uint32
+	Fackets        uint32
+	Last_data_sent uint32
+	Last_ack_sent  uint32
+	Last_data_recv uint32
+	Last_ack_recv  uint32
+	Pmtu           uint32
+	Rcv_ssthresh   uint32
+	Rtt            uint32
+	Rttvar         uint32
+	Snd_ssthresh   uint32
+	Snd_cwnd       uint32
+	Advmss         uint32
+	Reordering     uint32
+	Rcv_rtt        uint32
+	Rcv_space      uint32
+	Total_retrans  uint32
+}
+
+const (
+	SizeofSockaddrInet4     = 0x10
+	SizeofSockaddrInet6     = 0x1c
+	SizeofSockaddrAny       = 0x70
+	SizeofSockaddrUnix      = 0x6e
+	SizeofSockaddrLinklayer = 0x14
+	SizeofSockaddrNetlink   = 0xc
+	SizeofSockaddrHCI       = 0x6
+	SizeofLinger            = 0x8
+	SizeofIPMreq            = 0x8
+	SizeofIPMreqn           = 0xc
+	SizeofIPv6Mreq          = 0x14
+	SizeofMsghdr            = 0x38
+	SizeofCmsghdr           = 0x10
+	SizeofInet4Pktinfo      = 0xc
+	SizeofInet6Pktinfo      = 0x14
+	SizeofIPv6MTUInfo       = 0x20
+	SizeofICMPv6Filter      = 0x20
+	SizeofUcred             = 0xc
+	SizeofTCPInfo           = 0x68
+)
+
+const (
+	IFA_UNSPEC          = 0x0
+	IFA_ADDRESS         = 0x1
+	IFA_LOCAL           = 0x2
+	IFA_LABEL           = 0x3
+	IFA_BROADCAST       = 0x4
+	IFA_ANYCAST         = 0x5
+	IFA_CACHEINFO       = 0x6
+	IFA_MULTICAST       = 0x7
+	IFLA_UNSPEC         = 0x0
+	IFLA_ADDRESS        = 0x1
+	IFLA_BROADCAST      = 0x2
+	IFLA_IFNAME         = 0x3
+	IFLA_MTU            = 0x4
+	IFLA_LINK           = 0x5
+	IFLA_QDISC          = 0x6
+	IFLA_STATS          = 0x7
+	IFLA_COST           = 0x8
+	IFLA_PRIORITY       = 0x9
+	IFLA_MASTER         = 0xa
+	IFLA_WIRELESS       = 0xb
+	IFLA_PROTINFO       = 0xc
+	IFLA_TXQLEN         = 0xd
+	IFLA_MAP            = 0xe
+	IFLA_WEIGHT         = 0xf
+	IFLA_OPERSTATE      = 0x10
+	IFLA_LINKMODE       = 0x11
+	IFLA_LINKINFO       = 0x12
+	IFLA_NET_NS_PID     = 0x13
+	IFLA_IFALIAS        = 0x14
+	IFLA_MAX            = 0x2a
+	RT_SCOPE_UNIVERSE   = 0x0
+	RT_SCOPE_SITE       = 0xc8
+	RT_SCOPE_LINK       = 0xfd
+	RT_SCOPE_HOST       = 0xfe
+	RT_SCOPE_NOWHERE    = 0xff
+	RT_TABLE_UNSPEC     = 0x0
+	RT_TABLE_COMPAT     = 0xfc
+	RT_TABLE_DEFAULT    = 0xfd
+	RT_TABLE_MAIN       = 0xfe
+	RT_TABLE_LOCAL      = 0xff
+	RT_TABLE_MAX        = 0xffffffff
+	RTA_UNSPEC          = 0x0
+	RTA_DST             = 0x1
+	RTA_SRC             = 0x2
+	RTA_IIF             = 0x3
+	RTA_OIF             = 0x4
+	RTA_GATEWAY         = 0x5
+	RTA_PRIORITY        = 0x6
+	RTA_PREFSRC         = 0x7
+	RTA_METRICS         = 0x8
+	RTA_MULTIPATH       = 0x9
+	RTA_FLOW            = 0xb
+	RTA_CACHEINFO       = 0xc
+	RTA_TABLE           = 0xf
+	RTN_UNSPEC          = 0x0
+	RTN_UNICAST         = 0x1
+	RTN_LOCAL           = 0x2
+	RTN_BROADCAST       = 0x3
+	RTN_ANYCAST         = 0x4
+	RTN_MULTICAST       = 0x5
+	RTN_BLACKHOLE       = 0x6
+	RTN_UNREACHABLE     = 0x7
+	RTN_PROHIBIT        = 0x8
+	RTN_THROW           = 0x9
+	RTN_NAT             = 0xa
+	RTN_XRESOLVE        = 0xb
+	RTNLGRP_NONE        = 0x0
+	RTNLGRP_LINK        = 0x1
+	RTNLGRP_NOTIFY      = 0x2
+	RTNLGRP_NEIGH       = 0x3
+	RTNLGRP_TC          = 0x4
+	RTNLGRP_IPV4_IFADDR = 0x5
+	RTNLGRP_IPV4_MROUTE = 0x6
+	RTNLGRP_IPV4_ROUTE  = 0x7
+	RTNLGRP_IPV4_RULE   = 0x8
+	RTNLGRP_IPV6_IFADDR = 0x9
+	RTNLGRP_IPV6_MROUTE = 0xa
+	RTNLGRP_IPV6_ROUTE  = 0xb
+	RTNLGRP_IPV6_IFINFO = 0xc
+	RTNLGRP_IPV6_PREFIX = 0x12
+	RTNLGRP_IPV6_RULE   = 0x13
+	RTNLGRP_ND_USEROPT  = 0x14
+	SizeofNlMsghdr      = 0x10
+	SizeofNlMsgerr      = 0x14
+	SizeofRtGenmsg      = 0x1
+	SizeofNlAttr        = 0x4
+	SizeofRtAttr        = 0x4
+	SizeofIfInfomsg     = 0x10
+	SizeofIfAddrmsg     = 0x8
+	SizeofRtMsg         = 0xc
+	SizeofRtNexthop     = 0x8
+)
+
+type NlMsghdr struct {
+	Len   uint32
+	Type  uint16
+	Flags uint16
+	Seq   uint32
+	Pid   uint32
+}
+
+type NlMsgerr struct {
+	Error int32
+	Msg   NlMsghdr
+}
+
+type RtGenmsg struct {
+	Family uint8
+}
+
+type NlAttr struct {
+	Len  uint16
+	Type uint16
+}
+
+type RtAttr struct {
+	Len  uint16
+	Type uint16
+}
+
+type IfInfomsg struct {
+	Family     uint8
+	X__ifi_pad uint8
+	Type       uint16
+	Index      int32
+	Flags      uint32
+	Change     uint32
+}
+
+type IfAddrmsg struct {
+	Family    uint8
+	Prefixlen uint8
+	Flags     uint8
+	Scope     uint8
+	Index     uint32
+}
+
+type RtMsg struct {
+	Family   uint8
+	Dst_len  uint8
+	Src_len  uint8
+	Tos      uint8
+	Table    uint8
+	Protocol uint8
+	Scope    uint8
+	Type     uint8
+	Flags    uint32
+}
+
+type RtNexthop struct {
+	Len     uint16
+	Flags   uint8
+	Hops    uint8
+	Ifindex int32
+}
+
+const (
+	SizeofSockFilter = 0x8
+	SizeofSockFprog  = 0x10
+)
+
+type SockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
+
+type SockFprog struct {
+	Len       uint16
+	Pad_cgo_0 [6]byte
+	Filter    *SockFilter
+}
+
+type InotifyEvent struct {
+	Wd     int32
+	Mask   uint32
+	Cookie uint32
+	Len    uint32
+}
+
+const SizeofInotifyEvent = 0x10
+
+type PtraceRegs struct {
+	Regs   [16]uint64
+	Tstate uint64
+	Tpc    uint64
+	Tnpc   uint64
+	Y      uint32
+	Magic  uint32
+}
+
+type ptracePsw struct {
+}
+
+type ptraceFpregs struct {
+}
+
+type ptracePer struct {
+}
+
+type FdSet struct {
+	Bits [16]int64
+}
+
+type Sysinfo_t struct {
+	Uptime    int64
+	Loads     [3]uint64
+	Totalram  uint64
+	Freeram   uint64
+	Sharedram uint64
+	Bufferram uint64
+	Totalswap uint64
+	Freeswap  uint64
+	Procs     uint16
+	Pad       uint16
+	Pad_cgo_0 [4]byte
+	Totalhigh uint64
+	Freehigh  uint64
+	Unit      uint32
+	X_f       [0]int8
+	Pad_cgo_1 [4]byte
+}
+
+type Utsname struct {
+	Sysname    [65]int8
+	Nodename   [65]int8
+	Release    [65]int8
+	Version    [65]int8
+	Machine    [65]int8
+	Domainname [65]int8
+}
+
+type Ustat_t struct {
+	Tfree     int32
+	Pad_cgo_0 [4]byte
+	Tinode    uint64
+	Fname     [6]int8
+	Fpack     [6]int8
+	Pad_cgo_1 [4]byte
+}
+
+type EpollEvent struct {
+	Events  uint32
+	X_padFd int32
+	Fd      int32
+	Pad     int32
+}
+
+const (
+	AT_FDCWD            = -0x64
+	AT_REMOVEDIR        = 0x200
+	AT_SYMLINK_FOLLOW   = 0x400
+	AT_SYMLINK_NOFOLLOW = 0x100
+)
+
+type PollFd struct {
+	Fd      int32
+	Events  int16
+	Revents int16
+}
+
+const (
+	POLLIN    = 0x1
+	POLLPRI   = 0x2
+	POLLOUT   = 0x4
+	POLLRDHUP = 0x800
+	POLLERR   = 0x8
+	POLLHUP   = 0x10
+	POLLNVAL  = 0x20
+)
+
+type Sigset_t struct {
+	X__val [16]uint64
+}
+
+const _SC_PAGESIZE = 0x1e
+
+type Termios struct {
+	Iflag  uint32
+	Oflag  uint32
+	Cflag  uint32
+	Lflag  uint32
+	Line   uint8
+	Cc     [19]uint8
+	Ispeed uint32
+	Ospeed uint32
+}
diff --git a/vendor/golang.org/x/text/internal/gen/gen.go b/vendor/golang.org/x/text/internal/gen/gen.go
index 9eb2987bd54b4009e669a5a2bdbeff67207ea4c7..84c699faa96f27b686684a3794647c5482bf0f05 100644
--- a/vendor/golang.org/x/text/internal/gen/gen.go
+++ b/vendor/golang.org/x/text/internal/gen/gen.go
@@ -85,7 +85,11 @@ func CLDRVersion() string {
 
 // IsLocal reports whether data files are available locally.
 func IsLocal() bool {
-	if _, err := os.Stat(localReadmeFile()); err != nil {
+	dir, err := localReadmeFile()
+	if err != nil {
+		return false
+	}
+	if _, err = os.Stat(dir); err != nil {
 		return false
 	}
 	return true
@@ -130,19 +134,22 @@ var (
 
 const permissions = 0755
 
-func localReadmeFile() string {
+func localReadmeFile() (string, error) {
 	p, err := build.Import("golang.org/x/text", "", build.FindOnly)
 	if err != nil {
-		log.Fatalf("Could not locate package: %v", err)
+		return "", fmt.Errorf("Could not locate package: %v", err)
 	}
-	return filepath.Join(p.Dir, "DATA", "README")
+	return filepath.Join(p.Dir, "DATA", "README"), nil
 }
 
 func getLocalDir() string {
 	dirMutex.Lock()
 	defer dirMutex.Unlock()
 
-	readme := localReadmeFile()
+	readme, err := localReadmeFile()
+	if err != nil {
+		log.Fatal(err)
+	}
 	dir := filepath.Dir(readme)
 	if _, err := os.Stat(readme); err != nil {
 		if err := os.MkdirAll(dir, permissions); err != nil {
diff --git a/vendor/golang.org/x/text/internal/ucd/ucd.go b/vendor/golang.org/x/text/internal/ucd/ucd.go
index 60b27d51bec36a70629a59df6e12bcd9d9adecac..309e8d8b16e6f8451b021b52440f154913d67cb7 100644
--- a/vendor/golang.org/x/text/internal/ucd/ucd.go
+++ b/vendor/golang.org/x/text/internal/ucd/ucd.go
@@ -13,7 +13,6 @@ import (
 	"bufio"
 	"bytes"
 	"errors"
-	"fmt"
 	"io"
 	"log"
 	"regexp"
@@ -112,7 +111,6 @@ func (p *Parser) setError(err error) {
 
 func (p *Parser) getField(i int) []byte {
 	if i >= len(p.field) {
-		p.setError(fmt.Errorf("ucd: index of field %d out of bounds", i))
 		return nil
 	}
 	return p.field[i]
diff --git a/vendor/golang.org/x/text/unicode/cldr/base.go b/vendor/golang.org/x/text/unicode/cldr/base.go
index 21821791ec0ab93d03324997e6cd533206476fe1..2382f4d6da1afb4d3d9c8692cfe7078f032810b0 100644
--- a/vendor/golang.org/x/text/unicode/cldr/base.go
+++ b/vendor/golang.org/x/text/unicode/cldr/base.go
@@ -2,16 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Package cldr provides a parser for LDML and related XML formats.
-// This package is inteded to be used by the table generation tools
-// for the various internationalization-related packages.
-// As the XML types are generated from the CLDR DTD, and as the CLDR standard
-// is periodically amended, this package may change considerably over time.
-// This mostly means that data may appear and disappear between versions.
-// That is, old code should keep compiling for newer versions, but data
-// may have moved or changed.
-// CLDR version 22 is the first version supported by this package.
-// Older versions may not work.
 package cldr
 
 import (
diff --git a/vendor/golang.org/x/text/unicode/cldr/cldr.go b/vendor/golang.org/x/text/unicode/cldr/cldr.go
index ea3fe139ef57786bac9215a488ff3b78d4491508..2197f8ac268e3bcbbbb8792beb1ff61a6842f648 100644
--- a/vendor/golang.org/x/text/unicode/cldr/cldr.go
+++ b/vendor/golang.org/x/text/unicode/cldr/cldr.go
@@ -5,7 +5,7 @@
 //go:generate go run makexml.go -output xml.go
 
 // Package cldr provides a parser for LDML and related XML formats.
-// This package is inteded to be used by the table generation tools
+// This package is intended to be used by the table generation tools
 // for the various internationalization-related packages.
 // As the XML types are generated from the CLDR DTD, and as the CLDR standard
 // is periodically amended, this package may change considerably over time.
diff --git a/vendor/golang.org/x/text/unicode/cldr/xml.go b/vendor/golang.org/x/text/unicode/cldr/xml.go
index 0a6e515c9ed79ab2902aa1703c92996a750e9bc6..a1550ed95ff6b352ea71a89012bdd2e78eea43bd 100644
--- a/vendor/golang.org/x/text/unicode/cldr/xml.go
+++ b/vendor/golang.org/x/text/unicode/cldr/xml.go
@@ -185,6 +185,11 @@ type SupplementalData struct {
 			Day         string `xml:"day,attr"`
 			Territories string `xml:"territories,attr"`
 		} `xml:"weekendEnd"`
+		WeekOfPreference []*struct {
+			Common
+			Locales  string `xml:"locales,attr"`
+			Ordering string `xml:"ordering,attr"`
+		} `xml:"weekOfPreference"`
 	} `xml:"weekData"`
 	TimeData *struct {
 		Common
@@ -731,6 +736,7 @@ type LDML struct {
 						Count string `xml:"count,attr"`
 					} `xml:"relativeTimePattern"`
 				} `xml:"relativeTime"`
+				RelativePeriod []*Common `xml:"relativePeriod"`
 			} `xml:"field"`
 		} `xml:"fields"`
 		TimeZoneNames *TimeZoneNames `xml:"timeZoneNames"`
@@ -801,6 +807,14 @@ type LDML struct {
 			Noexpr  []*Common `xml:"noexpr"`
 		} `xml:"messages"`
 	} `xml:"posix"`
+	CharacterLabels *struct {
+		Common
+		CharacterLabelPattern []*struct {
+			Common
+			Count string `xml:"count,attr"`
+		} `xml:"characterLabelPattern"`
+		CharacterLabel []*Common `xml:"characterLabel"`
+	} `xml:"characterLabels"`
 	Segmentations *struct {
 		Common
 		Segmentation []*struct {
@@ -1096,7 +1110,8 @@ type Calendar struct {
 			Common
 			DateFormatItem []*struct {
 				Common
-				Id string `xml:"id,attr"`
+				Id    string `xml:"id,attr"`
+				Count string `xml:"count,attr"`
 			} `xml:"dateFormatItem"`
 		} `xml:"availableFormats"`
 		AppendItems []*struct {
@@ -1135,6 +1150,7 @@ type Calendar struct {
 					Count string `xml:"count,attr"`
 				} `xml:"relativeTimePattern"`
 			} `xml:"relativeTime"`
+			RelativePeriod []*Common `xml:"relativePeriod"`
 		} `xml:"field"`
 	} `xml:"fields"`
 }
@@ -1437,4 +1453,4 @@ type Numbers struct {
 }
 
 // Version is the version of CLDR from which the XML definitions are generated.
-const Version = "29"
+const Version = "30"
diff --git a/vendor/golang.org/x/text/unicode/norm/readwriter.go b/vendor/golang.org/x/text/unicode/norm/readwriter.go
index 4fa0e04b21f4a3f7400d94fb808ffeffd3cff957..d926ee903e56f210f6d71ddda9d12b6999ee7f5d 100644
--- a/vendor/golang.org/x/text/unicode/norm/readwriter.go
+++ b/vendor/golang.org/x/text/unicode/norm/readwriter.go
@@ -112,7 +112,6 @@ func (r *normReader) Read(p []byte) (int, error) {
 			}
 		}
 	}
-	panic("should not reach here")
 }
 
 // Reader returns a new reader that implements Read
diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go
index 806e02d646fe56beeb8e99f8ac58d1c6633b8c3a..f6e15be35da9ade73f53a19de5fb03cbcb0ca0ae 100644
--- a/vendor/google.golang.org/api/googleapi/googleapi.go
+++ b/vendor/google.golang.org/api/googleapi/googleapi.go
@@ -50,7 +50,7 @@ const (
 	// UserAgent is the header string used to identify this package.
 	UserAgent = "google-api-go-client/" + Version
 
-	// The default chunk size to use for resumable uplods if not specified by the user.
+	// The default chunk size to use for resumable uploads if not specified by the user.
 	DefaultUploadChunkSize = 8 * 1024 * 1024
 
 	// The minimum chunk size that can be used for resumable uploads.  All
diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json
index 69bdf2760b16271c6e96f8947b3221c8e0d4e919..598d1a54a7b4415752f850d1a3f0615b46483795 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-api.json
+++ b/vendor/google.golang.org/api/storage/v1/storage-api.json
@@ -1,11 +1,11 @@
 {
  "kind": "discovery#restDescription",
- "etag": "\"C5oy1hgQsABtYOYIOXWcR3BgYqU/HhsBn9wwz5D-d92BzzyinZOo-SY\"",
+ "etag": "\"C5oy1hgQsABtYOYIOXWcR3BgYqU/G3kZz5Dv92Y-2NZwaNrcr5jwm4A\"",
  "discoveryVersion": "v1",
  "id": "storage:v1",
  "name": "storage",
  "version": "v1",
- "revision": "20160923",
+ "revision": "20161019",
  "title": "Cloud Storage JSON API",
  "description": "Stores and retrieves potentially large, immutable data objects.",
  "ownerDomain": "google.com",
@@ -177,9 +177,13 @@
           "type": "object",
           "description": "The action to take.",
           "properties": {
+           "storageClass": {
+            "type": "string",
+            "description": "Target storage class. Required iff the type of the action is SetStorageClass."
+           },
            "type": {
             "type": "string",
-            "description": "Type of the action. Currently, only Delete is supported."
+            "description": "Type of the action. Currently, only Delete and SetStorageClass are supported."
            }
           }
          },
@@ -201,6 +205,13 @@
             "type": "boolean",
             "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects."
            },
+           "matchesStorageClass": {
+            "type": "array",
+            "description": "Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.",
+            "items": {
+             "type": "string"
+            }
+           },
            "numNewerVersions": {
             "type": "integer",
             "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.",
@@ -270,7 +281,7 @@
     },
     "storageClass": {
      "type": "string",
-     "description": "The bucket's storage class. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. Defaults to STANDARD. For more information, see storage classes."
+     "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes."
     },
     "timeCreated": {
      "type": "string",
@@ -1261,7 +1272,7 @@
      "id": "storage.buckets.patch",
      "path": "b/{bucket}",
      "httpMethod": "PATCH",
-     "description": "Updates a bucket. This method supports patch semantics.",
+     "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate. This method supports patch semantics.",
      "parameters": {
       "bucket": {
        "type": "string",
@@ -1353,7 +1364,7 @@
      "id": "storage.buckets.update",
      "path": "b/{bucket}",
      "httpMethod": "PUT",
-     "description": "Updates a bucket.",
+     "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.",
      "parameters": {
       "bucket": {
        "type": "string",
diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go
index a7b69824d26ce11fac18c3c57516af8ae43b7498..4c458af1808fc0a227b4a84dbe96baada2711e20 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-gen.go
+++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go
@@ -210,10 +210,13 @@ type Bucket struct {
 	// SelfLink: The URI of this bucket.
 	SelfLink string `json:"selfLink,omitempty"`
 
-	// StorageClass: The bucket's storage class. This defines how objects in
-	// the bucket are stored and determines the SLA and the cost of storage.
-	// Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY.
-	// Defaults to STANDARD. For more information, see storage classes.
+	// StorageClass: The bucket's default storage class, used whenever no
+	// storageClass is specified for a newly-created object. This defines
+	// how objects in the bucket are stored and determines the SLA and the
+	// cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD,
+	// NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value
+	// is not specified when the bucket is created, it will default to
+	// STANDARD. For more information, see storage classes.
 	StorageClass string `json:"storageClass,omitempty"`
 
 	// TimeCreated: The creation time of the bucket in RFC 3339 format.
@@ -362,10 +365,15 @@ func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) {
 
 // BucketLifecycleRuleAction: The action to take.
 type BucketLifecycleRuleAction struct {
-	// Type: Type of the action. Currently, only Delete is supported.
+	// StorageClass: Target storage class. Required iff the type of the
+	// action is SetStorageClass.
+	StorageClass string `json:"storageClass,omitempty"`
+
+	// Type: Type of the action. Currently, only Delete and SetStorageClass
+	// are supported.
 	Type string `json:"type,omitempty"`
 
-	// ForceSendFields is a list of field names (e.g. "Type") to
+	// ForceSendFields is a list of field names (e.g. "StorageClass") to
 	// unconditionally include in API requests. By default, fields with
 	// empty values are omitted from API requests. However, any non-pointer,
 	// non-interface field appearing in ForceSendFields will be sent to the
@@ -373,10 +381,10 @@ type BucketLifecycleRuleAction struct {
 	// used to include empty fields in Patch requests.
 	ForceSendFields []string `json:"-"`
 
-	// NullFields is a list of field names (e.g. "Type") to include in API
-	// requests with the JSON null value. By default, fields with empty
-	// values are omitted from API requests. However, any field with an
-	// empty value appearing in NullFields will be sent to the server as
+	// NullFields is a list of field names (e.g. "StorageClass") to include
+	// in API requests with the JSON null value. By default, fields with
+	// empty values are omitted from API requests. However, any field with
+	// an empty value appearing in NullFields will be sent to the server as
 	// null. It is an error if a field in this list has a non-empty value.
 	// This may be used to include null fields in Patch requests.
 	NullFields []string `json:"-"`
@@ -405,6 +413,12 @@ type BucketLifecycleRuleCondition struct {
 	// matches archived objects.
 	IsLive bool `json:"isLive,omitempty"`
 
+	// MatchesStorageClass: Objects having any of the storage classes
+	// specified by this condition will be matched. Values include
+	// MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and
+	// DURABLE_REDUCED_AVAILABILITY.
+	MatchesStorageClass []string `json:"matchesStorageClass,omitempty"`
+
 	// NumNewerVersions: Relevant only for versioned objects. If the value
 	// is N, this condition is satisfied when there are at least N versions
 	// (including the live version) newer than this version of the object.
@@ -2842,7 +2856,9 @@ type BucketsPatchCall struct {
 	ctx_       context.Context
 }
 
-// Patch: Updates a bucket. This method supports patch semantics.
+// Patch: Updates a bucket. Changes to the bucket will be readable
+// immediately after writing, but configuration changes may take time to
+// propagate. This method supports patch semantics.
 func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall {
 	c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
 	c.bucket = bucket
@@ -2992,7 +3008,7 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
 	}
 	return ret, nil
 	// {
-	//   "description": "Updates a bucket. This method supports patch semantics.",
+	//   "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate. This method supports patch semantics.",
 	//   "httpMethod": "PATCH",
 	//   "id": "storage.buckets.patch",
 	//   "parameterOrder": [
@@ -3096,7 +3112,9 @@ type BucketsUpdateCall struct {
 	ctx_       context.Context
 }
 
-// Update: Updates a bucket.
+// Update: Updates a bucket. Changes to the bucket will be readable
+// immediately after writing, but configuration changes may take time to
+// propagate.
 func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall {
 	c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
 	c.bucket = bucket
@@ -3246,7 +3264,7 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
 	}
 	return ret, nil
 	// {
-	//   "description": "Updates a bucket.",
+	//   "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.",
 	//   "httpMethod": "PUT",
 	//   "id": "storage.buckets.update",
 	//   "parameterOrder": [
diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go
index 475cf2e32e80d044b86a111ac6382443d3851d88..d4f808442b7a735816ab36a987a705352d70eac5 100644
--- a/vendor/google.golang.org/appengine/appengine.go
+++ b/vendor/google.golang.org/appengine/appengine.go
@@ -28,7 +28,8 @@ import (
 // See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests
 // for details on how to do your own health checking.
 //
-// Main is not yet supported on App Engine Standard.
+// On App Engine Standard it ensures the server has started and is prepared to
+// receive requests.
 //
 // Main never returns.
 //
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
index e9c56d4ce8c431acc387259f933f6c481456a5aa..09562c462b6adaf9d7891a1aa224d14ddd3a56fd 100644
--- a/vendor/google.golang.org/appengine/internal/api.go
+++ b/vendor/google.golang.org/appengine/internal/api.go
@@ -32,7 +32,8 @@ import (
 )
 
 const (
-	apiPath = "/rpc_http"
+	apiPath             = "/rpc_http"
+	defaultTicketSuffix = "/default.20150612t184001.0"
 )
 
 var (
@@ -269,8 +270,13 @@ func WithContext(parent netcontext.Context, req *http.Request) netcontext.Contex
 	return withContext(parent, c)
 }
 
-func getDefaultTicket() string {
+// DefaultTicket returns a ticket used for background context or dev_appserver.
+func DefaultTicket() string {
 	defaultTicketOnce.Do(func() {
+		if IsDevAppServer() {
+			defaultTicket = "testapp" + defaultTicketSuffix
+			return
+		}
 		appID := partitionlessAppID()
 		escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
 		majVersion := VersionID(nil)
@@ -291,7 +297,7 @@ func BackgroundContext() netcontext.Context {
 	}
 
 	// Compute background security ticket.
-	ticket := getDefaultTicket()
+	ticket := DefaultTicket()
 
 	ctxs.bg = &context{
 		req: &http.Request{
@@ -485,9 +491,15 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message)
 	}
 
 	ticket := c.req.Header.Get(ticketHeader)
-	// Fall back to use background ticket when the request ticket is not available in Flex.
+	// Use a test ticket under test environment.
+	if ticket == "" {
+		if appid := ctx.Value(&appIDOverrideKey); appid != nil {
+			ticket = appid.(string) + defaultTicketSuffix
+		}
+	}
+	// Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
 	if ticket == "" {
-		ticket = getDefaultTicket()
+		ticket = DefaultTicket()
 	}
 	req := &remotepb.Request{
 		ServiceName: &service,
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
index 2db33a774bc65fc3e8c6ad8863e795097418fd04..8c3eecec8d2c72afe2763eaae0092bff2a797585 100644
--- a/vendor/google.golang.org/appengine/internal/api_common.go
+++ b/vendor/google.golang.org/appengine/internal/api_common.go
@@ -5,6 +5,8 @@
 package internal
 
 import (
+	"os"
+
 	"github.com/golang/protobuf/proto"
 	netcontext "golang.org/x/net/context"
 )
@@ -84,3 +86,31 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{
 func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
 	return withNamespace(ctx, namespace)
 }
+
+// SetTestEnv sets the env variables for testing background ticket in Flex.
+func SetTestEnv() func() {
+	var environ = []struct {
+		key, value string
+	}{
+		{"GAE_LONG_APP_ID", "my-app-id"},
+		{"GAE_MINOR_VERSION", "067924799508853122"},
+		{"GAE_MODULE_INSTANCE", "0"},
+		{"GAE_MODULE_NAME", "default"},
+		{"GAE_MODULE_VERSION", "20150612t184001"},
+	}
+
+	for _, v := range environ {
+		old := os.Getenv(v.key)
+		os.Setenv(v.key, v.value)
+		v.value = old
+	}
+	return func() { // Restore old environment after the test completes.
+		for _, v := range environ {
+			if v.value == "" {
+				os.Unsetenv(v.key)
+				continue
+			}
+			os.Setenv(v.key, v.value)
+		}
+	}
+}
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
index 660658bed30e45e9a42df47807a40f1aed5713c3..110a8cf4253120ec640d22c488710f065d9fdb78 100644
--- a/vendor/google.golang.org/grpc/README.md
+++ b/vendor/google.golang.org/grpc/README.md
@@ -16,7 +16,7 @@ $ go get google.golang.org/grpc
 Prerequisites
 -------------
 
-This requires Go 1.5 or later .
+This requires Go 1.5 or later.
 
 Constraints
 -----------
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
index 52f4f10fc2553f416777fcd0a0f5f8903eb51387..c99024ee302e747783082e761d7aaf05006d702c 100644
--- a/vendor/google.golang.org/grpc/backoff.go
+++ b/vendor/google.golang.org/grpc/backoff.go
@@ -58,7 +58,7 @@ func setDefaults(bc *BackoffConfig) {
 	}
 }
 
-func (bc BackoffConfig) backoff(retries int) (t time.Duration) {
+func (bc BackoffConfig) backoff(retries int) time.Duration {
 	if retries == 0 {
 		return bc.baseDelay
 	}
diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go
index e217a2077c3108c918839b917a4b2b6f7ebebae0..9d943fbadae12ee3397ca1a8ec12adfc64bb5de0 100644
--- a/vendor/google.golang.org/grpc/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer.go
@@ -38,6 +38,7 @@ import (
 	"sync"
 
 	"golang.org/x/net/context"
+	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/naming"
@@ -315,7 +316,7 @@ func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Ad
 	if !opts.BlockingWait {
 		if len(rr.addrs) == 0 {
 			rr.mu.Unlock()
-			err = fmt.Errorf("there is no address available")
+			err = Errorf(codes.Unavailable, "there is no address available")
 			return
 		}
 		// Returns the next addr on rr.addrs for failfast RPCs.
diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go
index 788b3d92811731bfaf8d3d9aeaa45505f78ed260..772c817edd3d01870560cc285253f1d35ef7a853 100644
--- a/vendor/google.golang.org/grpc/call.go
+++ b/vendor/google.golang.org/grpc/call.go
@@ -49,9 +49,8 @@ import (
 // On error, it returns the error and indicates whether the call should be retried.
 //
 // TODO(zhaoq): Check whether the received message sequence is valid.
-func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error {
+func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) (err error) {
 	// Try to acquire header metadata from the server if there is any.
-	var err error
 	defer func() {
 		if err != nil {
 			if _, ok := err.(transport.ConnectionError); !ok {
@@ -61,7 +60,7 @@ func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, s
 	}()
 	c.headerMD, err = stream.Header()
 	if err != nil {
-		return err
+		return
 	}
 	p := &parser{r: stream}
 	for {
@@ -69,7 +68,7 @@ func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, s
 			if err == io.EOF {
 				break
 			}
-			return err
+			return
 		}
 	}
 	c.trailerMD = stream.Trailer()
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 11dce44fd3de6cca516a35aed52143d24e12936d..61674729a77baf64efad92ce912aaa858c1c309b 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -684,7 +684,11 @@ func (ac *addrConn) resetTransport(closeTransport bool) error {
 		}
 		ctx, cancel := context.WithTimeout(ac.ctx, timeout)
 		connectTime := time.Now()
-		newTransport, err := transport.NewClientTransport(ctx, ac.addr.Addr, ac.dopts.copts)
+		sinfo := transport.TargetInfo{
+			Addr:     ac.addr.Addr,
+			Metadata: ac.addr.Metadata,
+		}
+		newTransport, err := transport.NewClientTransport(ctx, sinfo, ac.dopts.copts)
 		if err != nil {
 			cancel()
 
@@ -803,7 +807,7 @@ func (ac *addrConn) transportMonitor() {
 }
 
 // wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or
-// iv) transport is in TransientFailure and there's no balancer/failfast is true.
+// iv) transport is in TransientFailure and there is a balancer/failfast is true.
 func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) {
 	for {
 		ac.mu.Lock()
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index debbd79aede0661843c0b048f91dbfb14d7dd9c9..e0bb187ef9758cae49b5fe9b550bf7f164c3dcc5 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -89,10 +89,12 @@ type service struct {
 type Server struct {
 	opts options
 
-	mu    sync.Mutex // guards following
-	lis   map[net.Listener]bool
-	conns map[io.Closer]bool
-	drain bool
+	mu     sync.Mutex // guards following
+	lis    map[net.Listener]bool
+	conns  map[io.Closer]bool
+	drain  bool
+	ctx    context.Context
+	cancel context.CancelFunc
 	// A CondVar to let GracefulStop() blocks until all the pending RPCs are finished
 	// and all the transport goes away.
 	cv     *sync.Cond
@@ -203,6 +205,7 @@ func NewServer(opt ...ServerOption) *Server {
 		m:     make(map[string]*service),
 	}
 	s.cv = sync.NewCond(&s.mu)
+	s.ctx, s.cancel = context.WithCancel(context.Background())
 	if EnableTracing {
 		_, file, line, _ := runtime.Caller(1)
 		s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
@@ -324,7 +327,7 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti
 // Serve accepts incoming connections on the listener lis, creating a new
 // ServerTransport and service goroutine for each. The service goroutines
 // read gRPC requests and then call the registered handlers to reply to them.
-// Serve returns when lis.Accept fails. lis will be closed when
+// Serve returns when lis.Accept fails with fatal errors.  lis will be closed when
 // this method returns.
 func (s *Server) Serve(lis net.Listener) error {
 	s.mu.Lock()
@@ -344,14 +347,38 @@ func (s *Server) Serve(lis net.Listener) error {
 		}
 		s.mu.Unlock()
 	}()
+
+	var tempDelay time.Duration // how long to sleep on accept failure
+
 	for {
 		rawConn, err := lis.Accept()
 		if err != nil {
+			if ne, ok := err.(interface {
+				Temporary() bool
+			}); ok && ne.Temporary() {
+				if tempDelay == 0 {
+					tempDelay = 5 * time.Millisecond
+				} else {
+					tempDelay *= 2
+				}
+				if max := 1 * time.Second; tempDelay > max {
+					tempDelay = max
+				}
+				s.mu.Lock()
+				s.printf("Accept error: %v; retrying in %v", err, tempDelay)
+				s.mu.Unlock()
+				select {
+				case <-time.After(tempDelay):
+				case <-s.ctx.Done():
+				}
+				continue
+			}
 			s.mu.Lock()
 			s.printf("done serving; Accept = %v", err)
 			s.mu.Unlock()
 			return err
 		}
+		tempDelay = 0
 		// Start a new goroutine to deal with rawConn
 		// so we don't stall this Accept loop goroutine.
 		go s.handleRawConn(rawConn)
@@ -500,7 +527,7 @@ func (s *Server) removeConn(c io.Closer) {
 	defer s.mu.Unlock()
 	if s.conns != nil {
 		delete(s.conns, c)
-		s.cv.Signal()
+		s.cv.Broadcast()
 	}
 }
 
@@ -801,7 +828,7 @@ func (s *Server) Stop() {
 	st := s.conns
 	s.conns = nil
 	// interrupt GracefulStop if Stop and GracefulStop are called concurrently.
-	s.cv.Signal()
+	s.cv.Broadcast()
 	s.mu.Unlock()
 
 	for lis := range listeners {
@@ -812,6 +839,7 @@ func (s *Server) Stop() {
 	}
 
 	s.mu.Lock()
+	s.cancel()
 	if s.events != nil {
 		s.events.Finish()
 		s.events = nil
@@ -824,16 +852,19 @@ func (s *Server) Stop() {
 func (s *Server) GracefulStop() {
 	s.mu.Lock()
 	defer s.mu.Unlock()
-	if s.drain == true || s.conns == nil {
+	if s.conns == nil {
 		return
 	}
-	s.drain = true
 	for lis := range s.lis {
 		lis.Close()
 	}
 	s.lis = nil
-	for c := range s.conns {
-		c.(transport.ServerTransport).Drain()
+	s.cancel()
+	if !s.drain {
+		for c := range s.conns {
+			c.(transport.ServerTransport).Drain()
+		}
+		s.drain = true
 	}
 	for len(s.conns) != 0 {
 		s.cv.Wait()
@@ -865,12 +896,26 @@ func (s *Server) testingCloseConns() {
 	s.mu.Unlock()
 }
 
-// SendHeader sends header metadata. It may be called at most once from a unary
-// RPC handler. The ctx is the RPC handler's Context or one derived from it.
-func SendHeader(ctx context.Context, md metadata.MD) error {
+// SetHeader sets the header metadata.
+// When called multiple times, all the provided metadata will be merged.
+// All the metadata will be sent out when one of the following happens:
+//  - grpc.SendHeader() is called;
+//  - The first response is sent out;
+//  - An RPC status is sent out (error or success).
+func SetHeader(ctx context.Context, md metadata.MD) error {
 	if md.Len() == 0 {
 		return nil
 	}
+	stream, ok := transport.StreamFromContext(ctx)
+	if !ok {
+		return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
+	}
+	return stream.SetHeader(md)
+}
+
+// SendHeader sends header metadata. It may be called at most once.
+// The provided md and headers set by SetHeader() will be sent.
+func SendHeader(ctx context.Context, md metadata.MD) error {
 	stream, ok := transport.StreamFromContext(ctx)
 	if !ok {
 		return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
@@ -887,7 +932,6 @@ func SendHeader(ctx context.Context, md metadata.MD) error {
 
 // SetTrailer sets the trailer metadata that will be sent when an RPC returns.
 // When called more than once, all the provided metadata will be merged.
-// The ctx is the RPC handler's Context or one derived from it.
 func SetTrailer(ctx context.Context, md metadata.MD) error {
 	if md.Len() == 0 {
 		return nil
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index 68d777b50985d5050f01bd0f89bd5ceb9d6bef6a..46810544f19f7207519f86ddd911d6ef2e15f215 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -410,9 +410,16 @@ func (cs *clientStream) finish(err error) {
 
 // ServerStream defines the interface a server stream has to satisfy.
 type ServerStream interface {
-	// SendHeader sends the header metadata. It should not be called
-	// after SendProto. It fails if called multiple times or if
-	// called after SendProto.
+	// SetHeader sets the header metadata. It may be called multiple times.
+	// When call multiple times, all the provided metadata will be merged.
+	// All the metadata will be sent out when one of the following happens:
+	//  - ServerStream.SendHeader() is called;
+	//  - The first response is sent out;
+	//  - An RPC status is sent out (error or success).
+	SetHeader(metadata.MD) error
+	// SendHeader sends the header metadata.
+	// The provided md and headers set by SetHeader() will be sent.
+	// It fails if called multiple times.
 	SendHeader(metadata.MD) error
 	// SetTrailer sets the trailer metadata which will be sent with the RPC status.
 	// When called more than once, all the provided metadata will be merged.
@@ -441,6 +448,13 @@ func (ss *serverStream) Context() context.Context {
 	return ss.s.Context()
 }
 
+func (ss *serverStream) SetHeader(md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+	return ss.s.SetHeader(md)
+}
+
 func (ss *serverStream) SendHeader(md metadata.MD) error {
 	return ss.t.WriteHeader(ss.s, md)
 }
diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go
index 3c185541a54bd10080599418930b62c2ae7a78e6..2b0f68016af537501d178f8615168496261e283d 100644
--- a/vendor/google.golang.org/grpc/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/transport/http2_client.go
@@ -57,6 +57,7 @@ import (
 type http2Client struct {
 	target    string // server name/addr
 	userAgent string
+	md        interface{}
 	conn      net.Conn             // underlying communication channel
 	authInfo  credentials.AuthInfo // auth info about the connection
 	nextID    uint32               // the next stream ID to be used
@@ -107,7 +108,7 @@ type http2Client struct {
 	prevGoAwayID uint32
 }
 
-func dial(fn func(context.Context, string) (net.Conn, error), ctx context.Context, addr string) (net.Conn, error) {
+func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
 	if fn != nil {
 		return fn(ctx, addr)
 	}
@@ -145,9 +146,9 @@ func isTemporary(err error) bool {
 // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
 // and starts to receive messages on it. Non-nil error returns if construction
 // fails.
-func newHTTP2Client(ctx context.Context, addr string, opts ConnectOptions) (_ ClientTransport, err error) {
+func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (_ ClientTransport, err error) {
 	scheme := "http"
-	conn, err := dial(opts.Dialer, ctx, addr)
+	conn, err := dial(ctx, opts.Dialer, addr.Addr)
 	if err != nil {
 		return nil, connectionErrorf(true, err, "transport: %v", err)
 	}
@@ -160,7 +161,7 @@ func newHTTP2Client(ctx context.Context, addr string, opts ConnectOptions) (_ Cl
 	var authInfo credentials.AuthInfo
 	if creds := opts.TransportCredentials; creds != nil {
 		scheme = "https"
-		conn, authInfo, err = creds.ClientHandshake(ctx, addr, conn)
+		conn, authInfo, err = creds.ClientHandshake(ctx, addr.Addr, conn)
 		if err != nil {
 			// Credentials handshake errors are typically considered permanent
 			// to avoid retrying on e.g. bad certificates.
@@ -174,8 +175,9 @@ func newHTTP2Client(ctx context.Context, addr string, opts ConnectOptions) (_ Cl
 	}
 	var buf bytes.Buffer
 	t := &http2Client{
-		target:    addr,
+		target:    addr.Addr,
 		userAgent: ua,
+		md:        addr.Metadata,
 		conn:      conn,
 		authInfo:  authInfo,
 		// The client initiated stream id is odd starting from 1.
@@ -400,6 +402,16 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
 			}
 		}
 	}
+	if md, ok := t.md.(*metadata.MD); ok {
+		for k, v := range *md {
+			if isReservedHeader(k) {
+				continue
+			}
+			for _, entry := range v {
+				t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
+			}
+		}
+	}
 	first := true
 	// Sends the headers in a single batch even when they span multiple frames.
 	for !endHeaders {
@@ -790,6 +802,9 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame) {
 }
 
 func (t *http2Client) handlePing(f *http2.PingFrame) {
+	if f.IsAck() { // Do nothing.
+		return
+	}
 	pingAck := &ping{ack: true}
 	copy(pingAck.data[:], f.Data[:])
 	t.controlBuf.put(pingAck)
diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go
index f753c4f1ead60e3c2567e99309948611d83245ad..a62fb7c228f14216fa1ff27ffac7cc5dde755352 100644
--- a/vendor/google.golang.org/grpc/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/transport/http2_server.go
@@ -405,6 +405,9 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
 }
 
 func (t *http2Server) handlePing(f *http2.PingFrame) {
+	if f.IsAck() { // Do nothing.
+		return
+	}
 	pingAck := &ping{ack: true}
 	copy(pingAck.data[:], f.Data[:])
 	t.controlBuf.put(pingAck)
@@ -462,6 +465,14 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
 		return ErrIllegalHeaderWrite
 	}
 	s.headerOk = true
+	if md.Len() > 0 {
+		if s.header.Len() > 0 {
+			s.header = metadata.Join(s.header, md)
+		} else {
+			s.header = md
+		}
+	}
+	md = s.header
 	s.mu.Unlock()
 	if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
 		return err
@@ -493,7 +504,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
 // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
 // OK is adopted.
 func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
-	var headersSent bool
+	var headersSent, hasHeader bool
 	s.mu.Lock()
 	if s.state == streamDone {
 		s.mu.Unlock()
@@ -502,7 +513,16 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s
 	if s.headerOk {
 		headersSent = true
 	}
+	if s.header.Len() > 0 {
+		hasHeader = true
+	}
 	s.mu.Unlock()
+
+	if !headersSent && hasHeader {
+		t.WriteHeader(s, nil)
+		headersSent = true
+	}
+
 	if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
 		return err
 	}
@@ -548,29 +568,10 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
 	}
 	if !s.headerOk {
 		writeHeaderFrame = true
-		s.headerOk = true
 	}
 	s.mu.Unlock()
 	if writeHeaderFrame {
-		if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
-			return err
-		}
-		t.hBuf.Reset()
-		t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
-		t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
-		if s.sendCompress != "" {
-			t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
-		}
-		p := http2.HeadersFrameParam{
-			StreamID:      s.id,
-			BlockFragment: t.hBuf.Bytes(),
-			EndHeaders:    true,
-		}
-		if err := t.framer.writeHeaders(false, p); err != nil {
-			t.Close()
-			return connectionErrorf(true, err, "transport: %v", err)
-		}
-		t.writableChan <- 0
+		t.WriteHeader(s, nil)
 	}
 	r := bytes.NewBuffer(data)
 	for {
diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go
index 3d6b6a6d511656c94e5c8d903d9e35806429fdd5..413f7493b44833934ff3ebd2383d1d02dc630ace 100644
--- a/vendor/google.golang.org/grpc/transport/transport.go
+++ b/vendor/google.golang.org/grpc/transport/transport.go
@@ -286,9 +286,27 @@ func (s *Stream) StatusDesc() string {
 	return s.statusDesc
 }
 
+// SetHeader sets the header metadata. This can be called multiple times.
+// Server side only.
+func (s *Stream) SetHeader(md metadata.MD) error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if s.headerOk || s.state == streamDone {
+		return ErrIllegalHeaderWrite
+	}
+	if md.Len() == 0 {
+		return nil
+	}
+	s.header = metadata.Join(s.header, md)
+	return nil
+}
+
 // SetTrailer sets the trailer metadata which will be sent with the RPC status
 // by the server. This can be called multiple times. Server side only.
 func (s *Stream) SetTrailer(md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	s.trailer = metadata.Join(s.trailer, md)
@@ -343,7 +361,7 @@ func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32, authI
 	return newHTTP2Server(conn, maxStreams, authInfo)
 }
 
-// ConnectOptions covers all relevant options for dialing a server.
+// ConnectOptions covers all relevant options for communicating with the server.
 type ConnectOptions struct {
 	// UserAgent is the application user agent.
 	UserAgent string
@@ -355,9 +373,15 @@ type ConnectOptions struct {
 	TransportCredentials credentials.TransportCredentials
 }
 
+// TargetInfo contains the information of the target such as network address and metadata.
+type TargetInfo struct {
+	Addr     string
+	Metadata interface{}
+}
+
 // NewClientTransport establishes the transport with the required ConnectOptions
 // and returns it to the caller.
-func NewClientTransport(ctx context.Context, target string, opts ConnectOptions) (ClientTransport, error) {
+func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions) (ClientTransport, error) {
 	return newHTTP2Client(ctx, target, opts)
 }
 
diff --git a/vendor/vendor.json b/vendor/vendor.json
index bbe3b5f64820bbf372344971c7889ae51d1f3a17..526d10b0d4936e45e8be3b8ff81c77e61c6abebe 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -21,176 +21,182 @@
 		{
 			"checksumSHA1": "ZLRh6zW4/DnVsGpgtt+ZiIaEFKc=",
 			"path": "cloud.google.com/go/compute/metadata",
-			"revision": "5af4269f950e91e917bab77f1138139023c868c2",
-			"revisionTime": "2016-10-07T18:02:40Z"
+			"revision": "7ee19e74f7fd11897ae3b7c4782d8205e810faa7",
+			"revisionTime": "2016-10-31T01:45:47Z"
 		},
 		{
 			"checksumSHA1": "hiJXjkFEGy+sDFf6O58Ocdy9Rnk=",
 			"path": "cloud.google.com/go/internal",
-			"revision": "5af4269f950e91e917bab77f1138139023c868c2",
-			"revisionTime": "2016-10-07T18:02:40Z"
+			"revision": "7ee19e74f7fd11897ae3b7c4782d8205e810faa7",
+			"revisionTime": "2016-10-31T01:45:47Z"
 		},
 		{
-			"checksumSHA1": "2KjLp82zorcecUxEeoUqIWZtf1E=",
+			"checksumSHA1": "W2xJ0+fvugRhRi1PMi64bYofBbU=",
+			"path": "cloud.google.com/go/internal/optional",
+			"revision": "7ee19e74f7fd11897ae3b7c4782d8205e810faa7",
+			"revisionTime": "2016-10-31T01:45:47Z"
+		},
+		{
+			"checksumSHA1": "0l3lg9DRQ16WhfwQhhT3Q/dQdMU=",
 			"path": "cloud.google.com/go/storage",
-			"revision": "5af4269f950e91e917bab77f1138139023c868c2",
-			"revisionTime": "2016-10-07T18:02:40Z"
+			"revision": "7ee19e74f7fd11897ae3b7c4782d8205e810faa7",
+			"revisionTime": "2016-10-31T01:45:47Z"
 		},
 		{
-			"checksumSHA1": "dq1GRxE/Qsvp1pawHFQ3vaFoYX4=",
+			"checksumSHA1": "NeKH+twA+3z7EzaKQQdN5FIhJP4=",
 			"path": "github.com/aws/aws-sdk-go/aws",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
 			"checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=",
 			"path": "github.com/aws/aws-sdk-go/aws/awserr",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
 			"checksumSHA1": "+q4vdl3l1Wom8K1wfIpJ4jlFsbY=",
 			"path": "github.com/aws/aws-sdk-go/aws/awsutil",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
 			"checksumSHA1": "/232RBWA3KnT7U+wciPS2+wmvR0=",
 			"path": "github.com/aws/aws-sdk-go/aws/client",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
 			"checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=",
 			"path": "github.com/aws/aws-sdk-go/aws/client/metadata",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
 			"checksumSHA1": "c1N3Loy3AS9zD+m5CzpPNAED39U=",
 			"path": "github.com/aws/aws-sdk-go/aws/corehandlers",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
-			"checksumSHA1": "dNZNaOPfBPnzE2CBnfhXXZ9g9jU=",
+			"checksumSHA1": "zu5C95rmCZff6NYZb62lEaT5ibE=",
 			"path": "github.com/aws/aws-sdk-go/aws/credentials",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
 			"checksumSHA1": "KQiUK/zr3mqnAXD7x/X55/iNme0=",
 			"path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
 			"checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=",
 			"path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
 			"checksumSHA1": "4Ipx+5xN0gso+cENC2MHMWmQlR4=",
 			"path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
 			"checksumSHA1": "DwhFsNluCFEwqzyp3hbJR3q2Wqs=",
 			"path": "github.com/aws/aws-sdk-go/aws/defaults",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
 			"checksumSHA1": "8E0fEBUJY/1lJOyVxzTxMGQGInk=",
 			"path": "github.com/aws/aws-sdk-go/aws/ec2metadata",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
 			"checksumSHA1": "5Ac22YMTBmrX/CXaEIXzWljr8UY=",
 			"path": "github.com/aws/aws-sdk-go/aws/request",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
-			"checksumSHA1": "BXuya7NrVg2BtHOM4ED5zAwDkg4=",
+			"checksumSHA1": "eOo6evLMAxQfo7Qkc5/h5euN1Sw=",
 			"path": "github.com/aws/aws-sdk-go/aws/session",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
-			"checksumSHA1": "OgyO1NRszPKURsDpob+zxUR157A=",
+			"checksumSHA1": "diXvBs1LRC0RJ9WK6sllWKdzC04=",
 			"path": "github.com/aws/aws-sdk-go/aws/signer/v4",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
-			"checksumSHA1": "Bm6UrYb2QCzpYseLwwgw6aetgRc=",
+			"checksumSHA1": "Esab5F8KswqkTdB4TtjSvZgs56k=",
 			"path": "github.com/aws/aws-sdk-go/private/endpoints",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
 			"checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=",
 			"path": "github.com/aws/aws-sdk-go/private/protocol",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
-			"checksumSHA1": "isoix7lTx4qIq2zI2xFADtti5SI=",
+			"checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=",
 			"path": "github.com/aws/aws-sdk-go/private/protocol/query",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
 			"checksumSHA1": "5xzix1R8prUyWxgLnzUQoxTsfik=",
 			"path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
 			"checksumSHA1": "TW/7U+/8ormL7acf6z2rv2hDD+s=",
 			"path": "github.com/aws/aws-sdk-go/private/protocol/rest",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
-			"checksumSHA1": "Y6Db2GGfGD9LPpcJIPj8vXE8BbQ=",
+			"checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=",
 			"path": "github.com/aws/aws-sdk-go/private/protocol/restxml",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
 			"checksumSHA1": "eUEkjyMPAuekKBE4ou+nM9tXEas=",
 			"path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
 			"checksumSHA1": "Eo9yODN5U99BK0pMzoqnBm7PCrY=",
 			"path": "github.com/aws/aws-sdk-go/private/waiter",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
-			"checksumSHA1": "bTI27l3ylL0rGFLnFGkaBTnG1lE=",
+			"checksumSHA1": "qePzsxMg/6Y4mf02KUl8mnJJWFE=",
 			"path": "github.com/aws/aws-sdk-go/service/s3",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
-			"checksumSHA1": "jqpZ6GIYgpVv5yihtUcLtpBq3iw=",
+			"checksumSHA1": "eOOeajRwz6FG6QF+F4LQvbm3Ilk=",
 			"path": "github.com/aws/aws-sdk-go/service/sts",
-			"revision": "c0d7d3282e4c14991a4814de7eae4774e388de61",
-			"revisionTime": "2016-10-07T22:43:33Z"
+			"revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
+			"revisionTime": "2016-10-27T22:14:16Z"
 		},
 		{
-			"checksumSHA1": "Lf3uUXTkKK5DJ37BxQvxO1Fq+K8=",
+			"checksumSHA1": "dvabztWVQX8f6oMLRyv4dLH+TGY=",
 			"path": "github.com/davecgh/go-spew/spew",
-			"revision": "6d212800a42e8ab5c146b8ace3490ee17e5225f9",
-			"revisionTime": "2016-09-07T16:21:46Z"
+			"revision": "346938d642f2ec3594ed81d874461961cd0faa76",
+			"revisionTime": "2016-10-29T20:57:26Z"
 		},
 		{
 			"checksumSHA1": "KCWVxG+J8SxHGlGiUghe0KBGsa8=",
@@ -199,10 +205,10 @@
 			"revisionTime": "2016-08-07T23:55:29Z"
 		},
 		{
-			"checksumSHA1": "e2o/P8ZZ8Iz+um6/nyLUEg7S2H8=",
+			"checksumSHA1": "hveFTNQ9YEyYRs6SWuXM+XU9qRI=",
 			"path": "github.com/fsnotify/fsnotify",
-			"revision": "944cff21b3baf3ced9a880365682152ba577d348",
-			"revisionTime": "2016-10-05T04:06:20Z"
+			"revision": "fd9ec7deca8bf46ecd2a795baaacf2b3a9be1197",
+			"revisionTime": "2016-10-26T20:31:22Z"
 		},
 		{
 			"checksumSHA1": "cVyhKIRI2gQrgpn5qrBeAqErmWM=",
@@ -211,22 +217,22 @@
 			"revisionTime": "2016-08-27T06:11:18Z"
 		},
 		{
-			"checksumSHA1": "jEXpLrWXoQvH/zk1lW5Si0swr6Y=",
+			"checksumSHA1": "++NbuL6/cpFR712EXGnWusURqC0=",
 			"path": "github.com/go-sql-driver/mysql",
-			"revision": "0b58b37b664c21f3010e836f1b931e1d0b0b0685",
-			"revisionTime": "2016-08-02T11:38:42Z"
+			"revision": "2a6c6079c7eff49a7e9d641e109d922f124a3e4c",
+			"revisionTime": "2016-10-26T16:05:54Z"
 		},
 		{
-			"checksumSHA1": "qjr2SKQanbmna221z0Ce2n0hnDE=",
+			"checksumSHA1": "SVXOQdpDBh0ihdZ5aIflgdA+Rpw=",
 			"path": "github.com/golang/protobuf/proto",
-			"revision": "df1d3ca07d2d07bba352d5b73c4313b4e2a6203e",
-			"revisionTime": "2016-09-27T20:09:49Z"
+			"revision": "98fa357170587e470c5f27d3c3ea0947b71eb455",
+			"revisionTime": "2016-10-12T20:53:35Z"
 		},
 		{
-			"checksumSHA1": "WsQM7/GEq4355WFQ4FMjutGnvNU=",
+			"checksumSHA1": "TSS9EqiOoJcVUMNb46zVngVrAO8=",
 			"path": "github.com/google/go-github/github",
-			"revision": "3e246d29992beb5a5d2fc253adebea86997c45b0",
-			"revisionTime": "2016-09-22T19:31:51Z"
+			"revision": "f7fcf6f52ff94adf1cc0ded41e7768d2ad729972",
+			"revisionTime": "2016-10-28T15:10:40Z"
 		},
 		{
 			"checksumSHA1": "yyAzHoiVLu+xywYI2BDyRq6sOqE=",
@@ -234,6 +240,12 @@
 			"revision": "9235644dd9e52eeae6fa48efd539fdc351a0af53",
 			"revisionTime": "2016-03-11T01:20:12Z"
 		},
+		{
+			"checksumSHA1": "aOMWp7Ut7soV3u/pf5DgBOuZ+3E=",
+			"path": "github.com/googleapis/gax-go",
+			"revision": "4f7da601ca02aa546c5ef911a0b6595fdadf4e18",
+			"revisionTime": "2016-10-18T00:53:01Z"
+		},
 		{
 			"checksumSHA1": "g/V4qrXjUGG9B+e3hB+4NAYJ5Gs=",
 			"path": "github.com/gorilla/context",
@@ -241,16 +253,16 @@
 			"revisionTime": "2016-08-17T18:46:32Z"
 		},
 		{
-			"checksumSHA1": "bjWdAqudYNGaTnoj7Se1pEKqWF8=",
+			"checksumSHA1": "h6Nxp2ashaaMJYWE//+X83Hpi4A=",
 			"path": "github.com/gorilla/csrf",
-			"revision": "bbe668740d1d0d07e117e840a4b704e06e719da7",
-			"revisionTime": "2016-10-02T18:00:12Z"
+			"revision": "fdae182b1882857ae6a246467084c30af79be824",
+			"revisionTime": "2016-10-23T17:09:07Z"
 		},
 		{
-			"checksumSHA1": "6/9VW/AyJyjRXLu+nbhqGYvOO2k=",
+			"checksumSHA1": "wu3jCY1Ny2tPdsj36W6YH1Bm2No=",
 			"path": "github.com/gorilla/handlers",
-			"revision": "a5775781a543af3c6b9f5baf10995e4d14168950",
-			"revisionTime": "2016-08-16T18:47:29Z"
+			"revision": "e1b2144f2167de0e1042d1d35e5cba5119d4fb5d",
+			"revisionTime": "2016-10-28T13:32:15Z"
 		},
 		{
 			"checksumSHA1": "urMd7A9QPAJYY0GZJL9qBhlUmD8=",
@@ -297,74 +309,74 @@
 		{
 			"checksumSHA1": "8OPDk+bKyRGJoKcS4QNw9F7dpE8=",
 			"path": "github.com/hashicorp/hcl",
-			"revision": "6f5bfed9a0a22222fbe4e731ae3481730ba41e93",
-			"revisionTime": "2016-10-08T07:35:57Z"
+			"revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
+			"revisionTime": "2016-10-28T23:32:40Z"
 		},
 		{
 			"checksumSHA1": "XQmjDva9JCGGkIecOgwtBEMCJhU=",
 			"path": "github.com/hashicorp/hcl/hcl/ast",
-			"revision": "6f5bfed9a0a22222fbe4e731ae3481730ba41e93",
-			"revisionTime": "2016-10-08T07:35:57Z"
+			"revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
+			"revisionTime": "2016-10-28T23:32:40Z"
 		},
 		{
-			"checksumSHA1": "un4pN4yL5bl6LL3CgWacFbIeHVg=",
+			"checksumSHA1": "croNloscHsjX87X+4/cKOURf1EY=",
 			"path": "github.com/hashicorp/hcl/hcl/parser",
-			"revision": "6f5bfed9a0a22222fbe4e731ae3481730ba41e93",
-			"revisionTime": "2016-10-08T07:35:57Z"
+			"revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
+			"revisionTime": "2016-10-28T23:32:40Z"
 		},
 		{
 			"checksumSHA1": "lgR7PSAZ0RtvAc9OCtCnNsF/x8g=",
 			"path": "github.com/hashicorp/hcl/hcl/scanner",
-			"revision": "6f5bfed9a0a22222fbe4e731ae3481730ba41e93",
-			"revisionTime": "2016-10-08T07:35:57Z"
+			"revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
+			"revisionTime": "2016-10-28T23:32:40Z"
 		},
 		{
 			"checksumSHA1": "JlZmnzqdmFFyb1+2afLyR3BOE/8=",
 			"path": "github.com/hashicorp/hcl/hcl/strconv",
-			"revision": "6f5bfed9a0a22222fbe4e731ae3481730ba41e93",
-			"revisionTime": "2016-10-08T07:35:57Z"
+			"revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
+			"revisionTime": "2016-10-28T23:32:40Z"
 		},
 		{
 			"checksumSHA1": "c6yprzj06ASwCo18TtbbNNBHljA=",
 			"path": "github.com/hashicorp/hcl/hcl/token",
-			"revision": "6f5bfed9a0a22222fbe4e731ae3481730ba41e93",
-			"revisionTime": "2016-10-08T07:35:57Z"
+			"revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
+			"revisionTime": "2016-10-28T23:32:40Z"
 		},
 		{
-			"checksumSHA1": "fpQQdjFUZOoslYuFNKZMSO0N0ik=",
+			"checksumSHA1": "138aCV5n8n7tkGYMsMVQQnnLq+0=",
 			"path": "github.com/hashicorp/hcl/json/parser",
-			"revision": "6f5bfed9a0a22222fbe4e731ae3481730ba41e93",
-			"revisionTime": "2016-10-08T07:35:57Z"
+			"revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
+			"revisionTime": "2016-10-28T23:32:40Z"
 		},
 		{
 			"checksumSHA1": "YdvFsNOMSWMLnY6fcliWQa0O5Fw=",
 			"path": "github.com/hashicorp/hcl/json/scanner",
-			"revision": "6f5bfed9a0a22222fbe4e731ae3481730ba41e93",
-			"revisionTime": "2016-10-08T07:35:57Z"
+			"revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
+			"revisionTime": "2016-10-28T23:32:40Z"
 		},
 		{
 			"checksumSHA1": "fNlXQCQEnb+B3k5UDL/r15xtSJY=",
 			"path": "github.com/hashicorp/hcl/json/token",
-			"revision": "6f5bfed9a0a22222fbe4e731ae3481730ba41e93",
-			"revisionTime": "2016-10-08T07:35:57Z"
+			"revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
+			"revisionTime": "2016-10-28T23:32:40Z"
 		},
 		{
-			"checksumSHA1": "LaOQaDi4rc0QVgWtzDrQqa7hJgs=",
+			"checksumSHA1": "2fkVZIzvxIGBLhSiVnkTgGiqpQ4=",
 			"path": "github.com/hashicorp/vault/api",
-			"revision": "80281c16fcaab7aa2ad0a67816c0bdc307476a80",
-			"revisionTime": "2016-10-07T15:06:01Z"
+			"revision": "e78065ec4ea0f022789e60f0c10c8a996f6d23d4",
+			"revisionTime": "2016-10-30T17:09:56Z"
 		},
 		{
 			"checksumSHA1": "ft77GtqeZEeCXioGpF/s6DlGm/U=",
 			"path": "github.com/hashicorp/vault/helper/compressutil",
-			"revision": "80281c16fcaab7aa2ad0a67816c0bdc307476a80",
-			"revisionTime": "2016-10-07T15:06:01Z"
+			"revision": "e78065ec4ea0f022789e60f0c10c8a996f6d23d4",
+			"revisionTime": "2016-10-30T17:09:56Z"
 		},
 		{
 			"checksumSHA1": "yUiSTPf0QUuL2r/81sjuytqBoeQ=",
 			"path": "github.com/hashicorp/vault/helper/jsonutil",
-			"revision": "80281c16fcaab7aa2ad0a67816c0bdc307476a80",
-			"revisionTime": "2016-10-07T15:06:01Z"
+			"revision": "e78065ec4ea0f022789e60f0c10c8a996f6d23d4",
+			"revisionTime": "2016-10-30T17:09:56Z"
 		},
 		{
 			"checksumSHA1": "0ZrwvB6KoGPj2PoDNSEJwxQ6Mog=",
@@ -385,10 +397,10 @@
 			"revisionTime": "2016-09-08T09:36:58Z"
 		},
 		{
-			"checksumSHA1": "0YgW0+rmiIzLlvhIKlQ0s0olPg8=",
+			"checksumSHA1": "9FJUwn3EIgASVki+p8IHgWVC5vQ=",
 			"path": "github.com/mattn/go-sqlite3",
-			"revision": "e5a3c16c5c1d80b24f633e68aecd6b0702786d3d",
-			"revisionTime": "2016-10-04T16:01:36Z"
+			"revision": "86681de00adef4f8040947b7d35f97000fc5a230",
+			"revisionTime": "2016-10-28T14:22:18Z"
 		},
 		{
 			"checksumSHA1": "AXacfEchaUqT5RGmPmMXsOWRhv8=",
@@ -397,10 +409,10 @@
 			"revisionTime": "2016-06-21T17:42:43Z"
 		},
 		{
-			"checksumSHA1": "HzhmHrGdk67cMJ2xt5ToPxhwLWk=",
+			"checksumSHA1": "UuXgD2dDojfS8AViUEe15gLIWZE=",
 			"path": "github.com/mitchellh/mapstructure",
-			"revision": "a6ef2f080c66d0a2e94e97cf74f80f772855da63",
-			"revisionTime": "2016-10-06T23:39:02Z"
+			"revision": "f3009df150dadf309fdee4a54ed65c124afad715",
+			"revisionTime": "2016-10-20T16:18:36Z"
 		},
 		{
 			"checksumSHA1": "8Y05Pz7onrQPcVWW6JStSsYRh6E=",
@@ -423,8 +435,8 @@
 		{
 			"checksumSHA1": "ynJSWoF6v+3zMnh9R0QmmG6iGV8=",
 			"path": "github.com/pkg/errors",
-			"revision": "839d9e913e063e28dfd0e6c7b7512793e0a48be9",
-			"revisionTime": "2016-10-02T05:25:12Z"
+			"revision": "248dadf4e9068a0b3e79f02ed0a610d935de5302",
+			"revisionTime": "2016-10-29T09:36:37Z"
 		},
 		{
 			"checksumSHA1": "k9SlQdp/DTB72G/u4aNecX/fFIg=",
@@ -487,16 +499,16 @@
 			"revisionTime": "2016-03-01T12:00:06Z"
 		},
 		{
-			"checksumSHA1": "UVxLV6wxC8kPxlhShSfwWchKYJQ=",
+			"checksumSHA1": "GxPD7A0NjMDom1xte0mghkpzr0E=",
 			"path": "github.com/spf13/pflag",
-			"revision": "b83537d79690b75cac5e021b036ae16792bf0f20",
-			"revisionTime": "2016-10-06T16:53:40Z"
+			"revision": "5ccb023bc27df288a957c5e994cd44fd19619465",
+			"revisionTime": "2016-10-24T13:13:51Z"
 		},
 		{
-			"checksumSHA1": "mgd16Kh5W/YEw7lEovBxxnoOZ4A=",
+			"checksumSHA1": "802GjFNHMmnFXEIkQ137ucUUacI=",
 			"path": "github.com/spf13/viper",
-			"revision": "ec4eb2fa8549869ae7a2accd4fcc83d1c0555c15",
-			"revisionTime": "2016-10-08T08:00:18Z"
+			"revision": "651d9d916abc3c3d6a91a12549495caba5edffd2",
+			"revisionTime": "2016-10-29T21:33:52Z"
 		},
 		{
 			"checksumSHA1": "Q2V7Zs3diLmLfmfbiuLpSxETSuY=",
@@ -523,346 +535,346 @@
 			"revisionTime": "2016-09-23T17:06:11Z"
 		},
 		{
-			"checksumSHA1": "h+pFYiRHBogczS8/F1NoN3Ata44=",
+			"checksumSHA1": "dwOedwBJ1EIK9+S3t108Bx054Y8=",
 			"path": "golang.org/x/crypto/curve25519",
-			"revision": "d172538b2cfce0c13cee31e647d0367aa8cd2486",
-			"revisionTime": "2015-06-02T14:54:44Z"
+			"revision": "b2fa06b6af4b7c9bfeb8569ab7b17f04550717bf",
+			"revisionTime": "2016-10-28T17:07:08Z"
 		},
 		{
 			"checksumSHA1": "wGb//LjBPNxYHqk+dcLo7BjPXK8=",
 			"path": "golang.org/x/crypto/ed25519",
-			"revision": "d172538b2cfce0c13cee31e647d0367aa8cd2486",
-			"revisionTime": "2015-06-02T14:54:44Z"
+			"revision": "b2fa06b6af4b7c9bfeb8569ab7b17f04550717bf",
+			"revisionTime": "2016-10-28T17:07:08Z"
 		},
 		{
 			"checksumSHA1": "LXFcVx8I587SnWmKycSDEq9yvK8=",
 			"path": "golang.org/x/crypto/ed25519/internal/edwards25519",
-			"revision": "d172538b2cfce0c13cee31e647d0367aa8cd2486",
-			"revisionTime": "2015-06-02T14:54:44Z"
+			"revision": "b2fa06b6af4b7c9bfeb8569ab7b17f04550717bf",
+			"revisionTime": "2016-10-28T17:07:08Z"
 		},
 		{
-			"checksumSHA1": "1LydpuiE3oBdkbYvSdKKwe9lsLs=",
+			"checksumSHA1": "LlElMHeTC34ng8eHzjvtUhAgrr8=",
 			"path": "golang.org/x/crypto/ssh",
-			"revision": "d172538b2cfce0c13cee31e647d0367aa8cd2486",
-			"revisionTime": "2015-06-02T14:54:44Z"
+			"revision": "b2fa06b6af4b7c9bfeb8569ab7b17f04550717bf",
+			"revisionTime": "2016-10-28T17:07:08Z"
 		},
 		{
 			"checksumSHA1": "SJ3Ma3Ozavxpbh1usZWBCnzMKIc=",
 			"path": "golang.org/x/crypto/ssh/agent",
-			"revision": "d172538b2cfce0c13cee31e647d0367aa8cd2486",
-			"revisionTime": "2015-06-02T14:54:44Z"
+			"revision": "b2fa06b6af4b7c9bfeb8569ab7b17f04550717bf",
+			"revisionTime": "2016-10-28T17:07:08Z"
 		},
 		{
-			"checksumSHA1": "9jjO5GjLa0XF/nfWihF02RoH4qc=",
+			"checksumSHA1": "4hQNaJUg67lF/QcO0NKzUeqlaew=",
 			"path": "golang.org/x/net/context",
-			"revision": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6",
-			"revisionTime": "2016-10-05T16:48:53Z"
+			"revision": "4cfeeeb61ae8daa2a4b06572024786e56f23504c",
+			"revisionTime": "2016-10-30T07:22:20Z"
 		},
 		{
 			"checksumSHA1": "WHc3uByvGaMcnSoI21fhzYgbOgg=",
 			"path": "golang.org/x/net/context/ctxhttp",
-			"revision": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6",
-			"revisionTime": "2016-10-05T16:48:53Z"
+			"revision": "4cfeeeb61ae8daa2a4b06572024786e56f23504c",
+			"revisionTime": "2016-10-30T07:22:20Z"
 		},
 		{
-			"checksumSHA1": "TUt1YBoSgtklP3MKWuOJwXBrkRw=",
+			"checksumSHA1": "9h3b5EBNNgpvZbn6B+AbdRAmcIg=",
 			"path": "golang.org/x/net/http2",
-			"revision": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6",
-			"revisionTime": "2016-10-05T16:48:53Z"
+			"revision": "4cfeeeb61ae8daa2a4b06572024786e56f23504c",
+			"revisionTime": "2016-10-30T07:22:20Z"
 		},
 		{
 			"checksumSHA1": "HzuGD7AwgC0p1az1WAQnEFnEk98=",
 			"path": "golang.org/x/net/http2/hpack",
-			"revision": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6",
-			"revisionTime": "2016-10-05T16:48:53Z"
+			"revision": "4cfeeeb61ae8daa2a4b06572024786e56f23504c",
+			"revisionTime": "2016-10-30T07:22:20Z"
 		},
 		{
 			"checksumSHA1": "GIGmSrYACByf5JDIP9ByBZksY80=",
 			"path": "golang.org/x/net/idna",
-			"revision": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6",
-			"revisionTime": "2016-10-05T16:48:53Z"
+			"revision": "4cfeeeb61ae8daa2a4b06572024786e56f23504c",
+			"revisionTime": "2016-10-30T07:22:20Z"
 		},
 		{
 			"checksumSHA1": "/k7k6eJDkxXx6K9Zpo/OwNm58XM=",
 			"path": "golang.org/x/net/internal/timeseries",
-			"revision": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6",
-			"revisionTime": "2016-10-05T16:48:53Z"
+			"revision": "4cfeeeb61ae8daa2a4b06572024786e56f23504c",
+			"revisionTime": "2016-10-30T07:22:20Z"
 		},
 		{
 			"checksumSHA1": "3xyuaSNmClqG4YWC7g0isQIbUTc=",
 			"path": "golang.org/x/net/lex/httplex",
-			"revision": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6",
-			"revisionTime": "2016-10-05T16:48:53Z"
+			"revision": "4cfeeeb61ae8daa2a4b06572024786e56f23504c",
+			"revisionTime": "2016-10-30T07:22:20Z"
 		},
 		{
 			"checksumSHA1": "4MMbG0LI3ghvWooRn36RmDrFIB0=",
 			"path": "golang.org/x/net/trace",
-			"revision": "f4b625ec9b21d620bb5ce57f2dfc3e08ca97fce6",
-			"revisionTime": "2016-10-05T16:48:53Z"
+			"revision": "4cfeeeb61ae8daa2a4b06572024786e56f23504c",
+			"revisionTime": "2016-10-30T07:22:20Z"
 		},
 		{
 			"checksumSHA1": "XH7CgbL5Z8COUc+MKrYqS3FFosY=",
 			"path": "golang.org/x/oauth2",
-			"revision": "1e695b1c8febf17aad3bfa7bf0a819ef94b98ad5",
-			"revisionTime": "2016-10-06T21:47:20Z"
+			"revision": "25b4fb1468cb89700c7c060cb99f30581a61f5e3",
+			"revisionTime": "2016-10-25T17:59:40Z"
 		},
 		{
 			"checksumSHA1": "Yokz/Wl4zeuOZG2ev8LuaLtMotE=",
 			"path": "golang.org/x/oauth2/github",
-			"revision": "1e695b1c8febf17aad3bfa7bf0a819ef94b98ad5",
-			"revisionTime": "2016-10-06T21:47:20Z"
+			"revision": "25b4fb1468cb89700c7c060cb99f30581a61f5e3",
+			"revisionTime": "2016-10-25T17:59:40Z"
 		},
 		{
 			"checksumSHA1": "92TBjKPPMEcAfNqc2xWF8fSfZMg=",
 			"path": "golang.org/x/oauth2/google",
-			"revision": "1e695b1c8febf17aad3bfa7bf0a819ef94b98ad5",
-			"revisionTime": "2016-10-06T21:47:20Z"
+			"revision": "25b4fb1468cb89700c7c060cb99f30581a61f5e3",
+			"revisionTime": "2016-10-25T17:59:40Z"
 		},
 		{
 			"checksumSHA1": "D3v/aqfB9swlaZcSksCoF+lbOqo=",
 			"path": "golang.org/x/oauth2/internal",
-			"revision": "1e695b1c8febf17aad3bfa7bf0a819ef94b98ad5",
-			"revisionTime": "2016-10-06T21:47:20Z"
+			"revision": "25b4fb1468cb89700c7c060cb99f30581a61f5e3",
+			"revisionTime": "2016-10-25T17:59:40Z"
 		},
 		{
 			"checksumSHA1": "huVltYnXdRFDJLgp/ZP9IALzG7g=",
 			"path": "golang.org/x/oauth2/jws",
-			"revision": "1e695b1c8febf17aad3bfa7bf0a819ef94b98ad5",
-			"revisionTime": "2016-10-06T21:47:20Z"
+			"revision": "25b4fb1468cb89700c7c060cb99f30581a61f5e3",
+			"revisionTime": "2016-10-25T17:59:40Z"
 		},
 		{
 			"checksumSHA1": "McqNj0/805YfYQJQGomeB0s+EcU=",
 			"path": "golang.org/x/oauth2/jwt",
-			"revision": "1e695b1c8febf17aad3bfa7bf0a819ef94b98ad5",
-			"revisionTime": "2016-10-06T21:47:20Z"
+			"revision": "25b4fb1468cb89700c7c060cb99f30581a61f5e3",
+			"revisionTime": "2016-10-25T17:59:40Z"
 		},
 		{
-			"checksumSHA1": "u4rtURsE9Cn7TK4OPwH8axBMK6M=",
+			"checksumSHA1": "aVgPDgwY3/t4J/JOw9H3FVMHqh0=",
 			"path": "golang.org/x/sys/unix",
-			"revision": "8d1157a435470616f975ff9bb013bea8d0962067",
-			"revisionTime": "2016-10-06T02:47:49Z"
+			"revision": "c200b10b5d5e122be351b67af224adc6128af5bf",
+			"revisionTime": "2016-10-22T18:22:21Z"
 		},
 		{
-			"checksumSHA1": "ZgBUmCCorTWurKNFm5Vb/U5t0xw=",
+			"checksumSHA1": "B3d1je1cb+pDWpniE8CCuC6xJQI=",
 			"path": "golang.org/x/text/internal/gen",
-			"revision": "ede1cb9f9f2f84c3bace9ca113fd740fc916cdd0",
-			"revisionTime": "2016-09-29T15:02:56Z"
+			"revision": "a8b38433e35b65ba247bb267317037dee1b70cea",
+			"revisionTime": "2016-10-19T13:35:53Z"
 		},
 		{
 			"checksumSHA1": "47nwiUyVBY2RKoEGXmCSvusY4Js=",
 			"path": "golang.org/x/text/internal/triegen",
-			"revision": "ede1cb9f9f2f84c3bace9ca113fd740fc916cdd0",
-			"revisionTime": "2016-09-29T15:02:56Z"
+			"revision": "a8b38433e35b65ba247bb267317037dee1b70cea",
+			"revisionTime": "2016-10-19T13:35:53Z"
 		},
 		{
-			"checksumSHA1": "H/0MDIWRnNYJPmfNCKmWm5aIGcA=",
+			"checksumSHA1": "Yd5wMObzagIfCiKLpZbtBIrOUA4=",
 			"path": "golang.org/x/text/internal/ucd",
-			"revision": "ede1cb9f9f2f84c3bace9ca113fd740fc916cdd0",
-			"revisionTime": "2016-09-29T15:02:56Z"
+			"revision": "a8b38433e35b65ba247bb267317037dee1b70cea",
+			"revisionTime": "2016-10-19T13:35:53Z"
 		},
 		{
 			"checksumSHA1": "ziMb9+ANGRJSSIuxYdRbA+cDRBQ=",
 			"path": "golang.org/x/text/transform",
-			"revision": "ede1cb9f9f2f84c3bace9ca113fd740fc916cdd0",
-			"revisionTime": "2016-09-29T15:02:56Z"
+			"revision": "a8b38433e35b65ba247bb267317037dee1b70cea",
+			"revisionTime": "2016-10-19T13:35:53Z"
 		},
 		{
-			"checksumSHA1": "n94g6qdzv0fgQFGelH4/HXOthl0=",
+			"checksumSHA1": "i14IZXKECObKRUNvTr7xivSL1IU=",
 			"path": "golang.org/x/text/unicode/cldr",
-			"revision": "ede1cb9f9f2f84c3bace9ca113fd740fc916cdd0",
-			"revisionTime": "2016-09-29T15:02:56Z"
+			"revision": "a8b38433e35b65ba247bb267317037dee1b70cea",
+			"revisionTime": "2016-10-19T13:35:53Z"
 		},
 		{
-			"checksumSHA1": "Aj3JSVO324FCjEAGm4ZwmC79bbo=",
+			"checksumSHA1": "Vircurgvsnt4k26havmxPM67PUA=",
 			"path": "golang.org/x/text/unicode/norm",
-			"revision": "ede1cb9f9f2f84c3bace9ca113fd740fc916cdd0",
-			"revisionTime": "2016-09-29T15:02:56Z"
+			"revision": "a8b38433e35b65ba247bb267317037dee1b70cea",
+			"revisionTime": "2016-10-19T13:35:53Z"
 		},
 		{
 			"checksumSHA1": "TFmTijDJtWhpUwG86ER0n2Xp+/U=",
 			"path": "google.golang.org/api/gensupport",
-			"revision": "3cf64a039723963488f603d140d0aec154fdcd20",
-			"revisionTime": "2016-10-06T16:45:29Z"
+			"revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
+			"revisionTime": "2016-10-27T16:31:49Z"
 		},
 		{
-			"checksumSHA1": "ia5kPSNOC45E23oF6A00m976BHY=",
+			"checksumSHA1": "okBeNPclS/nvIkPEH/tZa3cxDnM=",
 			"path": "google.golang.org/api/googleapi",
-			"revision": "3cf64a039723963488f603d140d0aec154fdcd20",
-			"revisionTime": "2016-10-06T16:45:29Z"
+			"revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
+			"revisionTime": "2016-10-27T16:31:49Z"
 		},
 		{
 			"checksumSHA1": "1K0JxrUfDqAB3MyRiU1LKjfHyf4=",
 			"path": "google.golang.org/api/googleapi/internal/uritemplates",
-			"revision": "3cf64a039723963488f603d140d0aec154fdcd20",
-			"revisionTime": "2016-10-06T16:45:29Z"
+			"revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
+			"revisionTime": "2016-10-27T16:31:49Z"
 		},
 		{
 			"checksumSHA1": "kFHmY+4xvvjiLwhkuFg7ZIaGHmE=",
 			"path": "google.golang.org/api/internal",
-			"revision": "3cf64a039723963488f603d140d0aec154fdcd20",
-			"revisionTime": "2016-10-06T16:45:29Z"
+			"revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
+			"revisionTime": "2016-10-27T16:31:49Z"
 		},
 		{
 			"checksumSHA1": "slcGOTGSdukEPPSN81Q5WZGmhog=",
 			"path": "google.golang.org/api/iterator",
-			"revision": "3cf64a039723963488f603d140d0aec154fdcd20",
-			"revisionTime": "2016-10-06T16:45:29Z"
+			"revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
+			"revisionTime": "2016-10-27T16:31:49Z"
 		},
 		{
 			"checksumSHA1": "2veoulDfsbD5iUCMhnXAnPNHDNM=",
 			"path": "google.golang.org/api/oauth2/v2",
-			"revision": "3cf64a039723963488f603d140d0aec154fdcd20",
-			"revisionTime": "2016-10-06T16:45:29Z"
+			"revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
+			"revisionTime": "2016-10-27T16:31:49Z"
 		},
 		{
 			"checksumSHA1": "Lc4yBmQAP9J3f8XYa9nH/D0FVhE=",
 			"path": "google.golang.org/api/option",
-			"revision": "3cf64a039723963488f603d140d0aec154fdcd20",
-			"revisionTime": "2016-10-06T16:45:29Z"
+			"revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
+			"revisionTime": "2016-10-27T16:31:49Z"
 		},
 		{
-			"checksumSHA1": "q6pXPo7akhYZw2DoZJNfpwjb1q0=",
+			"checksumSHA1": "GLBNLgjn9wWpwkPPmjZaYk5auAI=",
 			"path": "google.golang.org/api/storage/v1",
-			"revision": "3cf64a039723963488f603d140d0aec154fdcd20",
-			"revisionTime": "2016-10-06T16:45:29Z"
+			"revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
+			"revisionTime": "2016-10-27T16:31:49Z"
 		},
 		{
 			"checksumSHA1": "Ms24PFj1glBAb69TtGuq9qeqcYA=",
 			"path": "google.golang.org/api/transport",
-			"revision": "3cf64a039723963488f603d140d0aec154fdcd20",
-			"revisionTime": "2016-10-06T16:45:29Z"
+			"revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
+			"revisionTime": "2016-10-27T16:31:49Z"
 		},
 		{
-			"checksumSHA1": "OUlby05bSL50Wk1/hbu92mpKmvk=",
+			"checksumSHA1": "BYNXzv50n5HWU4OpKBw9JlrKIRI=",
 			"path": "google.golang.org/appengine",
-			"revision": "3f4dbbc0ec153a39878fd524ece9f39732bd4998",
-			"revisionTime": "2016-10-06T20:09:41Z"
+			"revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
+			"revisionTime": "2016-10-25T16:43:32Z"
 		},
 		{
-			"checksumSHA1": "Iq/i8Bsr/xrLXTT14mBcyLYOyrY=",
+			"checksumSHA1": "vIZ71Qe81RHec1vNHpKG+CSx/es=",
 			"path": "google.golang.org/appengine/internal",
-			"revision": "3f4dbbc0ec153a39878fd524ece9f39732bd4998",
-			"revisionTime": "2016-10-06T20:09:41Z"
+			"revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
+			"revisionTime": "2016-10-25T16:43:32Z"
 		},
 		{
 			"checksumSHA1": "x6Thdfyasqd68dWZWqzWWeIfAfI=",
 			"path": "google.golang.org/appengine/internal/app_identity",
-			"revision": "3f4dbbc0ec153a39878fd524ece9f39732bd4998",
-			"revisionTime": "2016-10-06T20:09:41Z"
+			"revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
+			"revisionTime": "2016-10-25T16:43:32Z"
 		},
 		{
 			"checksumSHA1": "TsNO8P0xUlLNyh3Ic/tzSp/fDWM=",
 			"path": "google.golang.org/appengine/internal/base",
-			"revision": "3f4dbbc0ec153a39878fd524ece9f39732bd4998",
-			"revisionTime": "2016-10-06T20:09:41Z"
+			"revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
+			"revisionTime": "2016-10-25T16:43:32Z"
 		},
 		{
 			"checksumSHA1": "5QsV5oLGSfKZqTCVXP6NRz5T4Tw=",
 			"path": "google.golang.org/appengine/internal/datastore",
-			"revision": "3f4dbbc0ec153a39878fd524ece9f39732bd4998",
-			"revisionTime": "2016-10-06T20:09:41Z"
+			"revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
+			"revisionTime": "2016-10-25T16:43:32Z"
 		},
 		{
 			"checksumSHA1": "Gep2T9zmVYV8qZfK2gu3zrmG6QE=",
 			"path": "google.golang.org/appengine/internal/log",
-			"revision": "3f4dbbc0ec153a39878fd524ece9f39732bd4998",
-			"revisionTime": "2016-10-06T20:09:41Z"
+			"revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
+			"revisionTime": "2016-10-25T16:43:32Z"
 		},
 		{
 			"checksumSHA1": "eLZVX1EHLclFtQnjDIszsdyWRHo=",
 			"path": "google.golang.org/appengine/internal/modules",
-			"revision": "3f4dbbc0ec153a39878fd524ece9f39732bd4998",
-			"revisionTime": "2016-10-06T20:09:41Z"
+			"revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
+			"revisionTime": "2016-10-25T16:43:32Z"
 		},
 		{
 			"checksumSHA1": "a1XY7rz3BieOVqVI2Et6rKiwQCk=",
 			"path": "google.golang.org/appengine/internal/remote_api",
-			"revision": "3f4dbbc0ec153a39878fd524ece9f39732bd4998",
-			"revisionTime": "2016-10-06T20:09:41Z"
+			"revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
+			"revisionTime": "2016-10-25T16:43:32Z"
 		},
 		{
 			"checksumSHA1": "VA88sOHmVuIslrbHaWx9yEvjGjM=",
 			"path": "google.golang.org/appengine/internal/socket",
-			"revision": "3f4dbbc0ec153a39878fd524ece9f39732bd4998",
-			"revisionTime": "2016-10-06T20:09:41Z"
+			"revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
+			"revisionTime": "2016-10-25T16:43:32Z"
 		},
 		{
 			"checksumSHA1": "QtAbHtHmDzcf6vOV9eqlCpKgjiw=",
 			"path": "google.golang.org/appengine/internal/urlfetch",
-			"revision": "3f4dbbc0ec153a39878fd524ece9f39732bd4998",
-			"revisionTime": "2016-10-06T20:09:41Z"
+			"revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
+			"revisionTime": "2016-10-25T16:43:32Z"
 		},
 		{
 			"checksumSHA1": "MharNMGnQusRPdmBYXDxz2cCHPU=",
 			"path": "google.golang.org/appengine/socket",
-			"revision": "3f4dbbc0ec153a39878fd524ece9f39732bd4998",
-			"revisionTime": "2016-10-06T20:09:41Z"
+			"revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
+			"revisionTime": "2016-10-25T16:43:32Z"
 		},
 		{
 			"checksumSHA1": "akOV9pYnCbcPA8wJUutSQVibdyg=",
 			"path": "google.golang.org/appengine/urlfetch",
-			"revision": "3f4dbbc0ec153a39878fd524ece9f39732bd4998",
-			"revisionTime": "2016-10-06T20:09:41Z"
+			"revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
+			"revisionTime": "2016-10-25T16:43:32Z"
 		},
 		{
-			"checksumSHA1": "N0GmUbip6LljIQkixSZnQ7a76Fs=",
+			"checksumSHA1": "r2jmzn/o6RN6PT9FRRtBeAfgNEk=",
 			"path": "google.golang.org/grpc",
-			"revision": "2131fedea9b3fe419b5c06200a674cdbc7bf986d",
-			"revisionTime": "2016-10-08T00:11:57Z"
+			"revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
+			"revisionTime": "2016-10-26T23:20:24Z"
 		},
 		{
 			"checksumSHA1": "08icuA15HRkdYCt6H+Cs90RPQsY=",
 			"path": "google.golang.org/grpc/codes",
-			"revision": "2131fedea9b3fe419b5c06200a674cdbc7bf986d",
-			"revisionTime": "2016-10-08T00:11:57Z"
+			"revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
+			"revisionTime": "2016-10-26T23:20:24Z"
 		},
 		{
 			"checksumSHA1": "Vd1MU+Ojs7GeS6jE52vlxtXvIrI=",
 			"path": "google.golang.org/grpc/credentials",
-			"revision": "2131fedea9b3fe419b5c06200a674cdbc7bf986d",
-			"revisionTime": "2016-10-08T00:11:57Z"
+			"revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
+			"revisionTime": "2016-10-26T23:20:24Z"
 		},
 		{
 			"checksumSHA1": "5R1jXc9mAXky9tPEuWMikTrwCgE=",
 			"path": "google.golang.org/grpc/credentials/oauth",
-			"revision": "2131fedea9b3fe419b5c06200a674cdbc7bf986d",
-			"revisionTime": "2016-10-08T00:11:57Z"
+			"revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
+			"revisionTime": "2016-10-26T23:20:24Z"
 		},
 		{
 			"checksumSHA1": "3Lt5hNAG8qJAYSsNghR5uA1zQns=",
 			"path": "google.golang.org/grpc/grpclog",
-			"revision": "2131fedea9b3fe419b5c06200a674cdbc7bf986d",
-			"revisionTime": "2016-10-08T00:11:57Z"
+			"revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
+			"revisionTime": "2016-10-26T23:20:24Z"
 		},
 		{
 			"checksumSHA1": "T3Q0p8kzvXFnRkMaK/G8mCv6mc0=",
 			"path": "google.golang.org/grpc/internal",
-			"revision": "2131fedea9b3fe419b5c06200a674cdbc7bf986d",
-			"revisionTime": "2016-10-08T00:11:57Z"
+			"revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
+			"revisionTime": "2016-10-26T23:20:24Z"
 		},
 		{
 			"checksumSHA1": "P64GkSdsTZ8Nxop5HYqZJ6e+iHs=",
 			"path": "google.golang.org/grpc/metadata",
-			"revision": "2131fedea9b3fe419b5c06200a674cdbc7bf986d",
-			"revisionTime": "2016-10-08T00:11:57Z"
+			"revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
+			"revisionTime": "2016-10-26T23:20:24Z"
 		},
 		{
 			"checksumSHA1": "4GSUFhOQ0kdFlBH4D5OTeKy78z0=",
 			"path": "google.golang.org/grpc/naming",
-			"revision": "2131fedea9b3fe419b5c06200a674cdbc7bf986d",
-			"revisionTime": "2016-10-08T00:11:57Z"
+			"revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
+			"revisionTime": "2016-10-26T23:20:24Z"
 		},
 		{
 			"checksumSHA1": "3RRoLeH6X2//7tVClOVzxW2bY+E=",
 			"path": "google.golang.org/grpc/peer",
-			"revision": "2131fedea9b3fe419b5c06200a674cdbc7bf986d",
-			"revisionTime": "2016-10-08T00:11:57Z"
+			"revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
+			"revisionTime": "2016-10-26T23:20:24Z"
 		},
 		{
-			"checksumSHA1": "dzbx9oVtSfVgNE3lTl+r5xOXxcw=",
+			"checksumSHA1": "HQJrtiTtr5eiRsXQLut2R1Q9kuY=",
 			"path": "google.golang.org/grpc/transport",
-			"revision": "2131fedea9b3fe419b5c06200a674cdbc7bf986d",
-			"revisionTime": "2016-10-08T00:11:57Z"
+			"revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
+			"revisionTime": "2016-10-26T23:20:24Z"
 		},
 		{
 			"checksumSHA1": "1D8GzeoFGUs5FZOoyC2DpQg8c5Y=",