mirror of
https://github.com/sourcegraph/sourcegraph.git
synced 2026-02-06 19:21:50 +00:00
Unexport some externally irrelevant symbols from uploadstore (#63647)
These symbols aren't used outside of the package, so unexporting them for a cleaner API surface. Test plan: Go compiler doesn't complain.
This commit is contained in:
parent
9435fde3a2
commit
2e392e0e89
@ -123,7 +123,7 @@ func (s *gcsStore) Get(ctx context.Context, key string) (_ io.ReadCloser, err er
|
||||
return nil, errors.Wrap(err, "failed to get object")
|
||||
}
|
||||
|
||||
return NewExtraCloser(rc, done), nil
|
||||
return newExtraCloser(rc, done), nil
|
||||
}
|
||||
|
||||
func (s *gcsStore) Upload(ctx context.Context, key string, r io.Reader) (_ int64, err error) {
|
||||
@ -229,7 +229,7 @@ func (s *gcsStore) create(ctx context.Context, bucket gcsBucketHandle) error {
|
||||
}
|
||||
|
||||
func (s *gcsStore) deleteSources(ctx context.Context, bucket gcsBucketHandle, sources []string) error {
|
||||
return ForEachString(sources, func(index int, source string) error {
|
||||
return forEachString(sources, func(index int, source string) error {
|
||||
if err := bucket.Object(source).Delete(ctx); err != nil {
|
||||
return errors.Wrap(err, "failed to delete source object")
|
||||
}
|
||||
|
||||
@ -6,12 +6,12 @@ import (
|
||||
"github.com/sourcegraph/conc/pool"
|
||||
)
|
||||
|
||||
// ForEachString invokes the given callback once for each of the
|
||||
// forEachString invokes the given callback once for each of the
|
||||
// given string values. The callback function will receive the index as well
|
||||
// as the string value as parameters. Callbacks will be invoked in a number
|
||||
// of concurrent routines proportional to the maximum number of CPUs that
|
||||
// can be executing simultaneously.
|
||||
func ForEachString(values []string, f func(index int, value string) error) error {
|
||||
func forEachString(values []string, f func(index int, value string) error) error {
|
||||
p := pool.New().
|
||||
WithErrors().
|
||||
WithMaxGoroutines(runtime.GOMAXPROCS(0))
|
||||
|
||||
@ -13,8 +13,8 @@ func (c *closeWrapper) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewExtraCloser returns wraps a ReadCloser with an extra close function
|
||||
// newExtraCloser returns wraps a ReadCloser with an extra close function
|
||||
// that will be called after the underlying ReadCloser has been closed.
|
||||
func NewExtraCloser(rc io.ReadCloser, close func()) io.ReadCloser {
|
||||
func newExtraCloser(rc io.ReadCloser, close func()) io.ReadCloser {
|
||||
return &closeWrapper{ReadCloser: rc, close: close}
|
||||
}
|
||||
|
||||
@ -128,7 +128,7 @@ func (s *s3Store) List(ctx context.Context, prefix string) (_ *iterator.Iterator
|
||||
}
|
||||
|
||||
func (s *s3Store) Get(ctx context.Context, key string) (_ io.ReadCloser, err error) {
|
||||
ctx, _, endObservation := s.operations.Get.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
|
||||
ctx, traceLogger, endObservation := s.operations.Get.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
|
||||
attribute.String("key", key),
|
||||
}})
|
||||
done := func() { endObservation(1, observation.Args{}) }
|
||||
@ -144,7 +144,7 @@ func (s *s3Store) Get(ctx context.Context, key string) (_ io.ReadCloser, err err
|
||||
}
|
||||
|
||||
byteOffset += n
|
||||
s.operations.Get.Logger.Warn("Transient error while reading payload", log.String("key", key), log.Error(err))
|
||||
traceLogger.Warn("Transient error while reading payload", log.String("key", key), log.Error(err))
|
||||
|
||||
if n == 0 {
|
||||
zeroReads++
|
||||
@ -158,7 +158,7 @@ func (s *s3Store) Get(ctx context.Context, key string) (_ io.ReadCloser, err err
|
||||
}
|
||||
})
|
||||
|
||||
return NewExtraCloser(io.NopCloser(reader), done), nil
|
||||
return newExtraCloser(io.NopCloser(reader), done), nil
|
||||
}
|
||||
|
||||
// ioCopyHook is a pointer to io.Copy. This function is replaced in unit tests so that we can
|
||||
@ -243,7 +243,7 @@ func (s *s3Store) Compose(ctx context.Context, destination string, sources ...st
|
||||
var m sync.Mutex
|
||||
etags := map[int]*string{}
|
||||
|
||||
if err := ForEachString(sources, func(index int, source string) error {
|
||||
if err := forEachString(sources, func(index int, source string) error {
|
||||
partNumber := index + 1
|
||||
|
||||
copyResult, err := s.client.UploadPartCopy(ctx, &s3.UploadPartCopyInput{
|
||||
@ -374,7 +374,7 @@ func (s *s3Store) create(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (s *s3Store) deleteSources(ctx context.Context, bucket string, sources []string) error {
|
||||
return ForEachString(sources, func(index int, source string) error {
|
||||
return forEachString(sources, func(index int, source string) error {
|
||||
if _, err := s.client.DeleteObject(ctx, &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(source),
|
||||
|
||||
Loading…
Reference in New Issue
Block a user