Repository: jszwec/s3fs
Branch: main
Commit: 7b413ce12de4
Files: 12
Total size: 54.9 KB
Directory structure:
gitextract_kmeqcja3/
├── .github/
│ └── workflows/
│ └── go.yml
├── .gitignore
├── LICENSE
├── README.md
├── dir.go
├── file.go
├── fs.go
├── fs_test.go
├── go.mod
├── go.sum
└── test/
├── localstack/
│ └── docker-compose.yml
└── minio/
└── docker-compose.yml
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/workflows/go.yml
================================================
name: Go
on:
push:
branches: [ main, v1, v2 ]
pull_request:
branches: [ main, v1, v2 ]
jobs:
localstack:
runs-on: ubuntu-latest
services:
minio:
image: localstack/localstack:0.14.0
ports:
- "4566:4566"
- "4571:4571"
env:
SERVICES: s3
steps:
- uses: actions/checkout@v2
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.21
- name: Wait for localstack
run: 'for i in {1..20}; do sleep 3 && curl --silent --fail http://localhost:4566/health | grep "\"s3\": \"available\"" > /dev/null && break; done'
- name: Test
run: go test -v -endpoint='http://localhost:4566' -cover
minio:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.21
- name: Test
env:
SERVER_ENDPOINT: http://localhost:9000
ACCESS_KEY: minioadmin
SECRET_KEY: minioadmin
MINIO_ACCESS_KEY: minioadmin
MINIO_SECRET_KEY: minioadmin
S3FS_TEST_AWS_ACCESS_KEY_ID: minioadmin
S3FS_TEST_AWS_SECRET_ACCESS_KEY: minioadmin
run: |
wget -O /tmp/minio -q https://dl.minio.io/server/minio/release/linux-amd64/minio
chmod +x /tmp/minio
/tmp/minio server /tmp/data &
go test -v -endpoint='http://localhost:9000' -cover
================================================
FILE: .gitignore
================================================
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2021 Jacek Szwec
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: README.md
================================================
# s3fs [](https://pkg.go.dev/github.com/jszwec/s3fs) 
Package s3fs provides a S3 implementation for Go1.16 [filesystem](https://tip.golang.org/pkg/io/fs/#FS) interface.
Since S3 is a flat structure, s3fs simulates directories by using
prefixes and "/" delim. ModTime on directories is always zero value.
# SDK Versions
```github.com/jszwec/s3fs``` uses aws sdk v1
```github.com/jszwec/s3fs/v2``` uses aws sdk v2
# Example (SDK v1)
```go
const bucket = "my-bucket"
s, err := session.NewSession()
if err != nil {
log.Fatal(err)
}
s3fs := s3fs.New(s3.New(s), bucket)
// print out all files in s3 bucket.
_ = fs.WalkDir(s3fs, ".", func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() {
fmt.Println("dir:", path)
return nil
}
fmt.Println("file:", path)
return nil
})
```
# Installation
```
go get github.com/jszwec/s3fs
```
# Requirements
* Go1.16+
================================================
FILE: dir.go
================================================
package s3fs
import (
"context"
"errors"
"io"
"io/fs"
"path"
"sort"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/service/s3"
)
var _ fs.ReadDirFile = (*dir)(nil)
type dir struct {
fileInfo
s3cl Client
bucket string
marker *string
done bool
buf []fs.DirEntry
dirs map[dirEntry]bool
}
func (d *dir) Stat() (fs.FileInfo, error) {
return &d.fileInfo, nil
}
func (d *dir) Read([]byte) (int, error) {
return 0, &fs.PathError{
Op: "read",
Path: d.name,
Err: errors.New("is a directory"),
}
}
func (d *dir) Close() error {
return nil
}
func (d *dir) ReadDir(n int) (des []fs.DirEntry, err error) {
if n <= 0 {
switch err := d.readAll(); {
case err == nil:
case errors.Is(err, io.EOF):
return []fs.DirEntry{}, nil
default:
return nil, err
}
des, d.buf = d.buf, nil
return des, nil
}
loop:
for len(d.buf) < n {
switch err := d.readNext(); {
case err == nil:
continue
case errors.Is(err, io.EOF):
break loop
default:
return nil, err
}
}
offset := min(n, len(d.buf))
des, d.buf = d.buf[:offset:offset], d.buf[offset:]
if d.done && len(d.buf) == 0 {
err = io.EOF
}
return des, err
}
func (d *dir) readAll() error {
for !d.done {
switch err := d.readNext(); {
case err == nil:
continue
case errors.Is(err, io.EOF):
return nil
default:
return err
}
}
return io.EOF
}
func (d *dir) readNext() error {
if d.done {
return io.EOF
}
name := strings.TrimRight(d.name, "/")
switch {
case name == ".":
name = ""
default:
name += "/"
}
out, err := d.s3cl.ListObjects(
context.Background(),
&s3.ListObjectsInput{
Bucket: &d.bucket,
Delimiter: ptr("/"),
Prefix: &name,
Marker: d.marker,
})
if err != nil {
return err
}
if d.name != "." && len(out.CommonPrefixes)+len(out.Contents) == 0 {
return &fs.PathError{
Op: "readdir",
Path: strings.TrimSuffix(name, "/"),
Err: fs.ErrNotExist,
}
}
d.marker = out.NextMarker
d.done = out.IsTruncated != nil && !(*out.IsTruncated)
if d.dirs == nil {
d.dirs = make(map[dirEntry]bool)
}
for _, p := range out.CommonPrefixes {
if p.Prefix == nil {
continue
}
de := dirEntry{
fileInfo: fileInfo{
name: path.Base(*p.Prefix),
mode: fs.ModeDir,
},
}
if _, ok := d.dirs[de]; !ok {
d.dirs[de] = false
}
}
for _, o := range out.Contents {
if o.Key == nil {
continue
}
d.buf = append(d.buf, dirEntry{
fileInfo: fileInfo{
name: path.Base(*o.Key),
size: derefInt64(o.Size),
modTime: derefTime(o.LastModified),
},
})
}
d.mergeDirFiles()
if d.done {
return io.EOF
}
return nil
}
func (d *dir) mergeDirFiles() {
if d.buf == nil {
// according to fs docs ReadDir should never return nil slice,
// so we set it here.
d.buf = []fs.DirEntry{}
}
// we need a current len for sort.Search that doesn't change; otherwise
// we could not append to the same slice.
l := len(d.buf)
for de, used := range d.dirs {
if used {
continue
}
i := sort.Search(l, func(i int) bool {
return d.buf[i].Name() >= de.Name()
})
if i == l && !d.done {
continue
}
d.buf = append(d.buf, de)
d.dirs[de] = true
}
sort.Slice(d.buf, func(i, j int) bool {
return d.buf[i].Name() < d.buf[j].Name()
})
}
type dirEntry struct {
fileInfo
}
func (de dirEntry) Type() fs.FileMode { return de.Mode().Type() }
func (de dirEntry) Info() (fs.FileInfo, error) { return de.fileInfo, nil }
func min(a, b int) int {
if a < b {
return a
}
return b
}
func derefInt64(n *int64) int64 {
if n != nil {
return *n
}
return 0
}
func derefTime(t *time.Time) time.Time {
if t != nil {
return *t
}
return time.Time{}
}
================================================
FILE: file.go
================================================
package s3fs
import (
"context"
"errors"
"fmt"
"io"
"io/fs"
"net/http"
"path"
"time"
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
"github.com/aws/aws-sdk-go-v2/service/s3"
)
var (
_ fs.File = (*file)(nil)
_ fs.FileInfo = (*fileInfo)(nil)
_ io.Seeker = (*file)(nil)
)
type file struct {
cl Client
bucket string
name string
io.ReadCloser
stat func() (fs.FileInfo, error)
offset int64
eTag string
}
func openFile(cl Client, bucket string, name string) (fs.File, error) {
out, err := cl.GetObject(context.Background(), &s3.GetObjectInput{
Key: &name,
Bucket: &bucket,
})
if err != nil {
return nil, err
}
statFunc := getStatFunc(cl, bucket, name, *out)
return &file{
cl: cl,
bucket: bucket,
name: name,
ReadCloser: out.Body,
stat: statFunc,
offset: 0,
eTag: *out.ETag,
}, nil
}
func getStatFunc(cl Client, bucket string, name string, s3ObjOutput s3.GetObjectOutput) func() (fs.FileInfo, error) {
statFunc := func() (fs.FileInfo, error) {
return stat(cl, bucket, name)
}
if s3ObjOutput.ContentLength != nil && s3ObjOutput.LastModified != nil {
// if we got all the information from GetObjectOutput
// then we can cache fileinfo instead of making
// another call in case Stat is called.
statFunc = func() (fs.FileInfo, error) {
return &fileInfo{
name: path.Base(name),
size: *s3ObjOutput.ContentLength,
modTime: *s3ObjOutput.LastModified,
}, nil
}
}
return statFunc
}
func (f *file) Read(p []byte) (int, error) {
n, err := f.ReadCloser.Read(p)
f.offset += int64(n)
return n, err
}
func (f *file) Seek(offset int64, whence int) (int64, error) {
newOffset := f.offset
stat, err := f.Stat()
if err != nil {
return 0, err
}
size := stat.Size()
switch whence {
case io.SeekStart:
newOffset = offset
case io.SeekCurrent:
newOffset += offset
case io.SeekEnd:
newOffset = size + offset
default:
return 0, errors.New("s3fs.file.Seek: invalid whence")
}
// If the position has not moved, there is no need to make a new query
if f.offset == newOffset {
return newOffset, nil
}
if newOffset < 0 {
return 0, errors.New("s3fs.file.Seek: seeked to a negative position")
}
if f.eTag == "" {
return 0, errors.New("s3fs.file.Seek: cannot seek. remote file has no etag")
}
if err := f.Close(); err != nil {
return f.offset, err
}
if newOffset >= size {
f.ReadCloser = io.NopCloser(eofReader{})
f.offset = newOffset
return f.offset, nil
}
rawObject, err := f.cl.GetObject(
context.Background(),
&s3.GetObjectInput{
Bucket: &f.bucket,
Key: &f.name,
Range: ptr(fmt.Sprintf("bytes=%d-", newOffset)),
IfMatch: &f.eTag,
})
if err != nil {
if e := new(awshttp.ResponseError); errors.As(err, &e) {
if e.HTTPStatusCode() == http.StatusPreconditionFailed {
return 0, fmt.Errorf("s3fs.file.Seek: file has changed while seeking: %w", fs.ErrNotExist)
}
}
return 0, err
}
f.offset = newOffset
f.ReadCloser = rawObject.Body
return f.offset, nil
}
func (f file) Stat() (fs.FileInfo, error) { return f.stat() }
type fileInfo struct {
name string
size int64
mode fs.FileMode
modTime time.Time
}
func (fi fileInfo) Name() string { return path.Base(fi.name) }
func (fi fileInfo) Size() int64 { return fi.size }
func (fi fileInfo) Mode() fs.FileMode { return fi.mode }
func (fi fileInfo) ModTime() time.Time { return fi.modTime }
func (fi fileInfo) IsDir() bool { return fi.mode.IsDir() }
func (fi fileInfo) Sys() interface{} { return nil }
type eofReader struct{}
func (eofReader) Read([]byte) (int, error) { return 0, io.EOF }
func ptr[T any](v T) *T {
return &v
}
================================================
FILE: fs.go
================================================
// Package s3fs provides a S3 implementation for Go1.16 filesystem interface.
package s3fs
import (
"context"
"errors"
"io/fs"
"github.com/aws/aws-sdk-go-v2/aws/transport/http"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
)
var (
_ fs.FS = (*S3FS)(nil)
_ fs.StatFS = (*S3FS)(nil)
_ fs.ReadDirFS = (*S3FS)(nil)
)
var errNotDir = errors.New("not a dir")
// Option is a function that provides optional features to S3FS.
type Option func(*S3FS)
// WithReadSeeker enables Seek functionality on files opened with this fs.
//
// BUG(WilliamFrei): Seeking on S3 requires reopening the file at the specified
// position. This can cause problems if the file changed between opening
// and calling Seek. In that case, fs.ErrNotExist error is returned, which
// has to be handled by the caller.
func WithReadSeeker(fsys *S3FS) { fsys.readSeeker = true }
// Client wraps the s3 client methods that this package is using.
// This interface may change in the future and should not be relied on by
// packages using it.
type Client interface {
HeadObject(ctx context.Context, params *s3.HeadObjectInput, optFns ...func(*s3.Options)) (*s3.HeadObjectOutput, error)
ListObjects(ctx context.Context, params *s3.ListObjectsInput, optFns ...func(*s3.Options)) (*s3.ListObjectsOutput, error)
GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error)
}
// S3FS is a S3 filesystem implementation.
//
// S3 has a flat structure instead of a hierarchy. S3FS simulates directories
// by using prefixes and delims ("/"). Because directories are simulated, ModTime
// is always a default Time value (IsZero returns true).
type S3FS struct {
cl Client
bucket string
readSeeker bool
}
// New returns a new filesystem that works on the specified bucket.
func New(cl Client, bucket string, opts ...Option) *S3FS {
fsys := &S3FS{
cl: cl,
bucket: bucket,
}
for _, opt := range opts {
opt(fsys)
}
return fsys
}
// Open implements fs.FS.
func (f *S3FS) Open(name string) (fs.File, error) {
if !fs.ValidPath(name) {
return nil, &fs.PathError{
Op: "open",
Path: name,
Err: fs.ErrInvalid,
}
}
if name == "." {
return openDir(f.cl, f.bucket, name)
}
file, err := openFile(f.cl, f.bucket, name)
if err != nil {
if isNotFoundErr(err) {
switch d, err := openDir(f.cl, f.bucket, name); {
case err == nil:
return d, nil
case !isNotFoundErr(err) && !errors.Is(err, errNotDir) && !errors.Is(err, fs.ErrNotExist):
return nil, err
}
return nil, &fs.PathError{
Op: "open",
Path: name,
Err: fs.ErrNotExist,
}
}
return nil, &fs.PathError{
Op: "open",
Path: name,
Err: err,
}
}
if !f.readSeeker {
file = fileNoSeek{file}
}
return file, nil
}
// Stat implements fs.StatFS.
func (f *S3FS) Stat(name string) (fs.FileInfo, error) {
fi, err := stat(f.cl, f.bucket, name)
if err != nil {
return nil, &fs.PathError{
Op: "stat",
Path: name,
Err: err,
}
}
return fi, nil
}
// ReadDir implements fs.ReadDirFS.
func (f *S3FS) ReadDir(name string) ([]fs.DirEntry, error) {
d, err := openDir(f.cl, f.bucket, name)
if err != nil {
return nil, &fs.PathError{
Op: "readdir",
Path: name,
Err: err,
}
}
return d.ReadDir(-1)
}
func stat(s3cl Client, bucket, name string) (fs.FileInfo, error) {
if !fs.ValidPath(name) {
return nil, fs.ErrInvalid
}
if name == "." {
return &dir{
s3cl: s3cl,
bucket: bucket,
fileInfo: fileInfo{
name: ".",
mode: fs.ModeDir,
},
}, nil
}
head, err := s3cl.HeadObject(
context.Background(),
&s3.HeadObjectInput{
Bucket: &bucket,
Key: &name,
})
if err != nil {
if !isNotFoundErr(err) {
return nil, err
}
} else {
return &fileInfo{
name: name,
size: derefInt64(head.ContentLength),
mode: 0,
modTime: derefTime(head.LastModified),
}, nil
}
out, err := s3cl.ListObjects(
context.Background(),
&s3.ListObjectsInput{
Bucket: &bucket,
Delimiter: ptr("/"),
Prefix: ptr(name + "/"),
MaxKeys: ptr[int32](1),
})
if err != nil {
return nil, err
}
if len(out.CommonPrefixes) > 0 || len(out.Contents) > 0 {
return &dir{
s3cl: s3cl,
bucket: bucket,
fileInfo: fileInfo{
name: name,
mode: fs.ModeDir,
},
}, nil
}
return nil, fs.ErrNotExist
}
func openDir(s3cl Client, bucket, name string) (fs.ReadDirFile, error) {
fi, err := stat(s3cl, bucket, name)
if err != nil {
return nil, err
}
if d, ok := fi.(fs.ReadDirFile); ok {
return d, nil
}
return nil, errNotDir
}
func isNotFoundErr(err error) bool {
if e := new(types.NoSuchKey); errors.As(err, &e) {
return true
}
if e := new(http.ResponseError); errors.As(err, &e) {
// localstack workaround
if e.HTTPStatusCode() == 404 {
return true
}
}
return false
}
type fileNoSeek struct{ fs.File }
================================================
FILE: fs_test.go
================================================
package s3fs_test
import (
"bytes"
"context"
"crypto/tls"
"errors"
"flag"
"io"
"io/fs"
"net/http"
"os"
"reflect"
"strings"
"sync/atomic"
"testing"
"testing/fstest"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/jszwec/s3fs/v2"
)
var (
endpoint = flag.String("endpoint", "http://localhost:4566", "s3 endpoint")
bucket = flag.String("bucket", "test-github.com-jszwec-s3fs", "bucket name")
skipVerify = flag.Bool("skip-verify", true, "http insecure skip verify")
)
var (
accessKeyID = envDefault("S3FS_TEST_AWS_ACCESS_KEY_ID", "1")
secretKey = envDefault("S3FS_TEST_AWS_SECRET_ACCESS_KEY", "1")
region = envDefault("S3FS_TEST_AWS_REGION", "us-east-1")
)
func TestMain(m *testing.M) {
flag.Parse()
os.Exit(m.Run())
}
func TestSeeker(t *testing.T) {
s3cl, cl := newClient(t)
const testFile = "file.txt"
content := []byte("content")
createBucket(t, s3cl, *bucket)
cleanBucket(t, s3cl, *bucket)
writeFile(t, s3cl, *bucket, testFile, content)
t.Cleanup(func() {
cleanBucket(t, s3cl, *bucket)
t.Log("test stats:")
t.Log("ListObjects calls:", atomic.LoadInt64(&listC))
t.Log("GetObject calls:", atomic.LoadInt64(&getC))
})
t.Run("'s3fs.New' does not implement Seeker", func(t *testing.T) {
testFs := s3fs.New(cl, *bucket)
data, err := testFs.Open(testFile)
if err != nil {
t.Fatal(err)
}
_, ok := data.(io.Seeker)
if ok {
t.Fatalf("Expected 'data' to not implement the Seeker interface")
}
})
t.Run("seek throws error if file changed", func(t *testing.T) {
const otherTestFile = "otherFile.txt"
originalContent := []byte("con")
changedContent := []byte("tent")
writeFile(t, s3cl, *bucket, otherTestFile, originalContent)
testFs := s3fs.New(s3cl, *bucket, s3fs.WithReadSeeker)
data, err := testFs.Open(otherTestFile)
if err != nil {
t.Fatal(err)
}
if _, err := data.(io.Seeker).Seek(0, io.SeekEnd); err != nil {
t.Fatal(err)
}
deleteFile(t, s3cl, *bucket, otherTestFile)
writeFile(t, s3cl, *bucket, otherTestFile, changedContent)
_, err = data.(io.Seeker).Seek(0, io.SeekStart)
if !errors.Is(err, fs.ErrNotExist) {
t.Fatalf("want=%v; got %v", fs.ErrNotExist, err)
}
})
t.Run("seek once", func(t *testing.T) {
fixtures := []struct {
desc string
offset int64
whence int
expected int64
}{
{
desc: "whence SeekStart ",
offset: 2,
whence: io.SeekStart,
expected: 2,
},
{
desc: "whence SeekCurrent",
offset: 4,
whence: io.SeekCurrent,
expected: 4,
},
{
desc: "whence SeekEnd",
offset: -1,
whence: io.SeekEnd,
expected: int64(len(content)) - 1,
},
}
for _, f := range fixtures {
f := f
t.Run(f.desc, func(t *testing.T) {
testFs := s3fs.New(s3cl, *bucket, s3fs.WithReadSeeker)
data, err := testFs.Open(testFile)
if err != nil {
t.Fatal(err)
}
actual, err := data.(io.Seeker).Seek(f.offset, f.whence)
if err != nil {
t.Fatal(err)
}
if actual != f.expected {
t.Fatalf("Expected %d, got %d", f.expected, actual)
}
})
}
})
t.Run("seek with errors", func(t *testing.T) {
fixtures := []struct {
desc string
offset int64
whence int
errorMessage string
}{
{
desc: "seek before beginning with whence SeekCurrent",
offset: -1,
whence: io.SeekCurrent,
errorMessage: "s3fs.file.Seek: seeked to a negative position",
},
{
desc: "seek before beginning with whence SeekStart",
offset: -1,
whence: io.SeekStart,
errorMessage: "s3fs.file.Seek: seeked to a negative position",
},
{
desc: "seek with invalid whence",
offset: 0,
whence: 3,
errorMessage: "s3fs.file.Seek: invalid whence",
},
}
for _, f := range fixtures {
f := f
t.Run(f.desc, func(t *testing.T) {
testFs := s3fs.New(s3cl, *bucket, s3fs.WithReadSeeker)
data, err := testFs.Open(testFile)
if err != nil {
t.Fatal(err)
}
_, err = data.(io.Seeker).Seek(f.offset, f.whence)
if err == nil {
t.Fatalf("Expected error after seeking to invalid position, got nil")
}
if err.Error() != f.errorMessage {
t.Fatalf("Expected %s, got %v", f.errorMessage, err)
}
})
}
})
t.Run("seek from other starting position", func(t *testing.T) {
fixtures := []struct {
desc string
initialOffset int
offset int64
whence int
expected int64
}{
{
desc: "whence SeekStart",
initialOffset: 3,
offset: 2,
whence: io.SeekStart,
expected: 2,
},
{
desc: "whence SeekCurrent",
initialOffset: 3,
offset: 3,
whence: io.SeekCurrent,
expected: 6,
},
{
desc: "whence SeekEnd",
initialOffset: 3,
offset: -1,
whence: io.SeekEnd,
expected: int64(len(content)) - 1,
},
}
for _, f := range fixtures {
f := f
t.Run(f.desc, func(t *testing.T) {
testFs := s3fs.New(s3cl, *bucket, s3fs.WithReadSeeker)
data, err := testFs.Open(testFile)
if err != nil {
t.Fatal(err)
}
readBuffer := make([]byte, f.initialOffset)
readBytes, err := data.Read(readBuffer)
if err != nil {
t.Fatal(err)
}
if readBytes != f.initialOffset {
t.Fatalf("Read failed during test setup")
}
actual, err := data.(io.Seeker).Seek(f.offset, f.whence)
if err != nil {
t.Fatal(err)
}
if actual != f.expected {
t.Fatalf("Expected %d, got %d", f.expected, actual)
}
})
}
})
t.Run("seek then read", func(t *testing.T) {
fixtures := []struct {
desc string
readBytes int
offset int64
whence int
expected []byte
expectingEOF bool
}{
{
desc: "whence SeekStart",
readBytes: 3,
offset: 2,
whence: io.SeekStart,
expected: content[2:5],
expectingEOF: false,
},
{
desc: "whence SeekCurrent",
readBytes: 1,
offset: 1,
whence: io.SeekCurrent,
expected: []byte("o"),
expectingEOF: false,
},
{
desc: "seek to end then read 0",
readBytes: 0,
offset: 0,
whence: io.SeekEnd,
expected: []byte(""),
expectingEOF: true,
},
{
desc: "whence SeekStart with EOF",
readBytes: 2,
offset: 5,
whence: io.SeekStart,
expected: content[5:7],
expectingEOF: true,
},
{
desc: "whence SeekCurrent with EOF",
readBytes: 3,
offset: 4,
whence: io.SeekCurrent,
expected: content[4:7],
expectingEOF: true,
},
{
desc: "whence SeekEnd with EOF",
readBytes: 3,
offset: -3,
whence: io.SeekEnd,
expected: content[len(content)-3:],
expectingEOF: true,
},
{
desc: "zero offset and read more than fits the buffer",
readBytes: 100,
offset: 0,
whence: io.SeekStart,
expected: []byte("content"),
expectingEOF: true,
},
{
desc: "whence SeekStart offset and read more than fits the buffer",
readBytes: 100,
offset: 1,
whence: io.SeekStart,
expected: []byte("ontent"),
expectingEOF: true,
},
{
desc: "whence SeekCurrent offset and read more than fits the buffer",
readBytes: 100,
offset: 1,
whence: io.SeekCurrent,
expected: []byte("ontent"),
expectingEOF: true,
},
{
desc: "whence SeekEnd to the end of the file and then read",
readBytes: 10,
offset: 0,
whence: io.SeekEnd,
expected: []byte(""),
expectingEOF: true,
},
{
desc: "whence SeekEnd past the end of the file and then read",
readBytes: 10,
offset: 1,
whence: io.SeekEnd,
expected: []byte(""),
expectingEOF: true,
},
}
for _, f := range fixtures {
f := f
t.Run(f.desc, func(t *testing.T) {
testFs := s3fs.New(s3cl, *bucket, s3fs.WithReadSeeker)
data, err := testFs.Open(testFile)
if err != nil {
t.Fatal(err)
}
readSeekers := []struct {
desc string
seeker io.ReadSeeker
}{
{desc: "file", seeker: data.(io.ReadSeeker)},
{desc: "bytes reader", seeker: bytes.NewReader(content)},
}
for _, rs := range readSeekers {
rs := rs
t.Run(rs.desc, func(t *testing.T) {
_, err = rs.seeker.Seek(f.offset, f.whence)
if err != nil {
t.Fatal(err)
}
var buf bytes.Buffer
_, err := io.CopyN(&buf, rs.seeker, int64(f.readBytes))
if err != nil && !errors.Is(err, io.EOF) {
t.Fatal(err)
}
if buf.String() != string(f.expected) {
t.Errorf("expected %s, got %s", f.expected, buf.String())
}
if f.expectingEOF {
newlyReadBytes, err := rs.seeker.Read(make([]byte, 0))
if newlyReadBytes != 0 {
t.Fatalf("Read returned unexpected number of bytes: expected 0, got %d", newlyReadBytes)
}
if err == nil {
t.Fatalf("Expected io.EOF error, got nil")
}
if !errors.Is(err, io.EOF) {
t.Fatal(err)
}
}
})
}
})
}
})
t.Run("seek twice then read", func(t *testing.T) {
fixtures := []struct {
desc string
readBytes int
firstOffset int64
firstWhence int
secondOffset int64
expected []byte
expectingEOF bool
}{
{
desc: "whence SeekStart",
readBytes: 2,
firstOffset: 1,
firstWhence: io.SeekStart,
secondOffset: 2,
expected: content[3:5],
expectingEOF: false,
},
{
desc: "whence SeekCurrent",
readBytes: 1,
firstOffset: 2,
firstWhence: io.SeekCurrent,
secondOffset: 3,
expected: content[5:6],
expectingEOF: false,
},
{
desc: "whence SeekEnd",
readBytes: 2,
firstOffset: -4,
firstWhence: io.SeekEnd,
secondOffset: 1,
expected: content[4:6],
expectingEOF: false,
},
{
desc: "whence SeekStart with EOF",
readBytes: 5,
firstOffset: 1,
firstWhence: io.SeekStart,
secondOffset: 2,
expected: content[3:],
expectingEOF: true,
},
{
desc: "whence SeekCurrent with EOF",
readBytes: 2,
firstOffset: 2,
firstWhence: io.SeekCurrent,
secondOffset: 3,
expected: content[5:],
expectingEOF: true,
},
{
desc: "whence SeekEnd with EOF",
readBytes: 7,
firstOffset: -5,
firstWhence: io.SeekEnd,
secondOffset: 1,
expected: content[3:],
expectingEOF: true,
},
}
for _, f := range fixtures {
f := f
t.Run(f.desc, func(t *testing.T) {
testFs := s3fs.New(s3cl, *bucket, s3fs.WithReadSeeker)
data, err := testFs.Open(testFile)
if err != nil {
t.Fatal(err)
}
readSeekers := []struct {
desc string
seeker io.ReadSeeker
}{
{desc: "file", seeker: data.(io.ReadSeeker)},
{desc: "bytes reader", seeker: bytes.NewReader(content)},
}
for _, rs := range readSeekers {
rs := rs
t.Run(rs.desc, func(t *testing.T) {
_, err = rs.seeker.Seek(f.firstOffset, f.firstWhence)
if err != nil {
t.Fatal(err)
}
_, err = rs.seeker.Seek(f.secondOffset, io.SeekCurrent)
if err != nil {
t.Fatal(err)
}
var buf bytes.Buffer
_, err := io.CopyN(&buf, rs.seeker, int64(f.readBytes))
if err != nil && !errors.Is(err, io.EOF) {
t.Fatal(err)
}
if buf.String() != string(f.expected) {
t.Errorf("expected %s, got %s", f.expected, buf.String())
}
if f.expectingEOF {
newlyReadBytes, err := rs.seeker.Read(make([]byte, 0))
if newlyReadBytes != 0 {
t.Fatalf("Read returned unexpected number of bytes: expected 0, got %d", newlyReadBytes)
}
if err == nil {
t.Fatalf("Expected io.EOF error, got nil")
}
if !errors.Is(err, io.EOF) {
t.Fatal(err)
}
}
})
}
})
}
})
}
func TestFS(t *testing.T) {
s3cl, wrappedCl := newClient(t)
const testFile = "file.txt"
content := []byte("content")
allFiles := [...]string{
testFile,
"dir/a.txt",
"dir1/file1.txt",
"dir1/file2.txt",
"dir1/dir11/file.txt",
"dir2/file1.txt",
"x/file1.txt",
"y.txt",
"y2.txt",
"y3.txt",
"z/z/file1.txt",
"a.txt",
"a/b.txt",
}
createBucket(t, s3cl, *bucket)
cleanBucket(t, s3cl, *bucket)
t.Run("list empty bucket", func(t *testing.T) {
fi, err := s3fs.New(wrappedCl, *bucket).Open(".")
if err != nil {
t.Errorf("want err to be nil; got %v", err)
}
dir := fi.(fs.ReadDirFile)
fixtures := []struct {
desc string
n int
err error
}{
{"n > 0", 1, io.EOF},
{"n <= 0", -1, nil},
}
for _, f := range fixtures {
f := f
t.Run(f.desc, func(t *testing.T) {
des, err := dir.ReadDir(f.n)
if err != f.err {
t.Errorf("want err to be %v; got %v", f.err, err)
}
if des == nil {
t.Error("want des to not be a nil slice")
}
if len(des) > 0 {
t.Errorf("expected the directory to be empty; got %d elements", len(des))
}
})
}
})
for _, f := range allFiles {
writeFile(t, s3cl, *bucket, f, content)
}
t.Cleanup(func() {
cleanBucket(t, s3cl, *bucket)
t.Log("test stats:")
t.Log("ListObjects calls:", atomic.LoadInt64(&listC))
t.Log("GetObject calls:", atomic.LoadInt64(&getC))
})
testFn := func(t *testing.T, s3fs *s3fs.S3FS) {
t.Run("testing fstest", func(t *testing.T) {
if testing.Short() {
t.Skip("short test enabled")
}
t.Parallel()
if err := fstest.TestFS(s3fs, allFiles[:]...); err != nil {
t.Fatal(err)
}
})
t.Run("readfile", func(t *testing.T) {
t.Parallel()
t.Run("success", func(t *testing.T) {
data, err := fs.ReadFile(s3fs, testFile)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(data, []byte("content")) {
t.Errorf("expect: %s; got %s", data, []byte("content"))
}
})
t.Run("error", func(t *testing.T) {
t.Run("invalid path", func(t *testing.T) {
_, err := fs.ReadFile(s3fs, "/")
if err == nil {
t.Fatal("expected error")
}
var pathErr *fs.PathError
if !errors.As(err, &pathErr) {
t.Fatal("expected err to be *PathError")
}
expected := fs.PathError{
Op: "open",
Path: "/",
Err: fs.ErrInvalid,
}
if *pathErr != expected {
t.Fatalf("want %v; got %v", expected, *pathErr)
}
})
t.Run("directory", func(t *testing.T) {
_, err := fs.ReadFile(s3fs, ".")
if err == nil {
t.Fatal("expected error")
}
var perr *fs.PathError
if !errors.As(err, &perr) {
t.Fatal("expected err to be *PathError")
}
if perr.Op != "read" {
t.Errorf("want %v; got %v", "read", perr.Op)
}
if perr.Path != "." {
t.Errorf("want %v; got %v", ".", perr.Path)
}
if perr.Err.Error() != "is a directory" {
t.Errorf("want %v; got %v", "is a directory", perr.Err.Error())
}
})
})
})
t.Run("stat file", func(t *testing.T) {
t.Parallel()
test := func(t *testing.T, fi fs.FileInfo) {
t.Helper()
if fi.IsDir() {
t.Error("expected false")
}
if fi.Mode() != 0 {
t.Errorf("want %d; got %d", 0, fi.Mode())
}
if fi.Sys() != nil {
t.Error("expected Sys to be nil")
}
}
t.Run("file stat", func(t *testing.T) {
f, err := s3fs.Open(testFile)
if err != nil {
t.Fatal("expected err to be nil")
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
t.Fatal("expected err to be nil")
}
test(t, fi)
})
t.Run("fs stat", func(t *testing.T) {
fi, err := s3fs.Stat(testFile)
if err != nil {
t.Fatal("expected err to be nil")
}
test(t, fi)
})
t.Run("invalid path", func(t *testing.T) {
_, err := s3fs.Stat("/")
var pathErr *fs.PathError
if !errors.As(err, &pathErr) {
t.Fatal("expected err to be *PathError")
}
expected := fs.PathError{
Op: "stat",
Path: "/",
Err: fs.ErrInvalid,
}
if *pathErr != expected {
t.Fatalf("want %v; got %v", expected, *pathErr)
}
})
t.Run("does not exist", func(t *testing.T) {
_, err := s3fs.Stat("not-existing")
var pathErr *fs.PathError
if !errors.As(err, &pathErr) {
t.Fatal("expected err to be *PathError")
}
expected := fs.PathError{
Op: "stat",
Path: "not-existing",
Err: fs.ErrNotExist,
}
if *pathErr != expected {
t.Fatalf("want %v; got %v", expected, *pathErr)
}
})
})
t.Run("stat dir", func(t *testing.T) {
t.Parallel()
test := func(t *testing.T, fi fs.FileInfo) {
t.Helper()
if !fi.IsDir() {
t.Error("expected true")
}
if fi.Mode() != fs.ModeDir {
t.Errorf("want %d; got %d", fs.ModeDir, fi.Mode())
}
if fi.Sys() != nil {
t.Error("expected Sys to be nil")
}
}
t.Run("top level", func(t *testing.T) {
fi, err := s3fs.Stat(".")
if err != nil {
t.Fatal("expected err to be nil")
}
test(t, fi)
if fi.Name() != "." {
t.Errorf("want name=%q; got %q", ".", fi.Name())
}
})
t.Run("open z", func(t *testing.T) {
fi, err := s3fs.Stat("z")
if err != nil {
t.Fatal("expected err to be nil")
}
test(t, fi)
})
})
t.Run("readdir", func(t *testing.T) {
t.Parallel()
t.Run("success", func(t *testing.T) {
fixtures := []struct {
desc string
path string
names []string
modes []fs.FileMode
isDir []bool
size []int
}{
{
desc: "top level",
path: ".",
names: []string{"a", "a.txt", "dir", "dir1", "dir2", testFile, "x", "y.txt", "y2.txt", "y3.txt", "z"},
modes: []fs.FileMode{fs.ModeDir, 0, fs.ModeDir, fs.ModeDir, fs.ModeDir, 0, fs.ModeDir, 0, 0, 0, fs.ModeDir},
isDir: []bool{true, false, true, true, true, false, true, false, false, false, true},
size: []int{0, len(content), 0, 0, 0, len(content), 0, len(content), len(content), len(content), 0},
},
{
desc: "dir1",
path: "dir1",
names: []string{"dir11", "file1.txt", "file2.txt"},
modes: []fs.FileMode{fs.ModeDir, 0, 0},
isDir: []bool{true, false, false},
size: []int{0, len(content), len(content)},
},
{
desc: "dir11",
path: "dir1/dir11",
names: []string{"file.txt"},
modes: []fs.FileMode{0},
isDir: []bool{false},
size: []int{len(content)},
},
}
for _, f := range fixtures {
f := f
test := func(t *testing.T, des []fs.DirEntry) {
var (
names []string
modes []fs.FileMode
isDir []bool
size []int
)
for _, de := range des {
fi, err := de.Info()
if err != nil {
t.Fatal("expected nil; got ", err)
}
names = append(names, de.Name())
modes = append(modes, fi.Mode())
isDir = append(isDir, fi.IsDir())
size = append(size, int(fi.Size()))
}
for _, v := range []struct {
desc string
want, got interface{}
}{
{"names", f.names, names},
{"modes", f.modes, modes},
{"isDir", f.isDir, isDir},
{"size", f.size, size},
} {
if !reflect.DeepEqual(v.want, v.got) {
t.Errorf("%s: expected %v; got %v", v.desc, v.want, v.got)
}
}
}
t.Run("fs.ReadDir "+f.desc, func(t *testing.T) {
des, err := s3fs.ReadDir(f.path)
if err != nil {
t.Fatalf("expected err to be nil: %v", err)
}
test(t, des)
})
t.Run("file.ReadDir "+f.desc, func(t *testing.T) {
f, err := s3fs.Open(f.path)
if err != nil {
t.Fatalf("expected err to be nil: %v", err)
}
d, ok := f.(fs.ReadDirFile)
if !ok {
t.Fatal("expected file to be a directory")
}
des, err := d.ReadDir(-1)
if err != nil && !errors.Is(err, io.EOF) {
t.Fatalf("expected err to be nil: %v", err)
}
test(t, des)
})
}
})
t.Run("error", func(t *testing.T) {
fixtures := []struct {
desc string
path string
err fs.PathError
}{
{
desc: "invalid path",
path: "/",
err: fs.PathError{Op: "readdir", Path: "/", Err: fs.ErrInvalid},
},
{
desc: "does not exist",
path: "notexist",
err: fs.PathError{Op: "readdir", Path: "notexist", Err: fs.ErrNotExist},
},
{
desc: "does not exist",
path: "dir1/notexist",
err: fs.PathError{Op: "readdir", Path: "dir1/notexist", Err: fs.ErrNotExist},
},
{
desc: "readDir on a file",
path: "dir1/file1.txt",
err: fs.PathError{Op: "readdir", Path: "dir1/file1.txt", Err: errors.New("not a dir")},
},
}
for _, f := range fixtures {
t.Run(f.desc, func(t *testing.T) {
_, err := s3fs.ReadDir(f.path)
var perr *fs.PathError
if !errors.As(err, &perr) {
t.Fatalf("expected err to be *fs.PathError; got %[1]T: %[1]v", err)
}
if perr.Op != f.err.Op {
t.Errorf("want %v; got %v", f.err.Op, perr.Op)
}
if perr.Path != f.err.Path {
t.Errorf("want %v; got %v", f.err.Path, perr.Path)
}
if perr.Err.Error() != f.err.Err.Error() {
t.Errorf("want %v; got %v", f.err.Err.Error(), perr.Err.Error())
}
})
}
})
})
t.Run("subfs", func(t *testing.T) {
t.Run("existing", func(t *testing.T) {
fsys, err := fs.Sub(s3fs, "dir1/dir11")
if err != nil {
t.Fatal(err)
}
t.Run("fs.Stat", func(t *testing.T) {
fi, err := fs.Stat(fsys, "file.txt")
if err != nil {
t.Fatal(err)
}
if fi.Name() != "file.txt" {
t.Errorf("expected file.txt got %s", fi.Name())
}
t.Run("not exist", func(t *testing.T) {
_, err = fs.Stat(fsys, "not-exist")
var perr *fs.PathError
if !errors.As(err, &perr) {
t.Fatalf("expected err to be PathError: got %#v", err)
}
// currently we don't implement fs.SubFS.
// fs.Sub calls open instead of Stat.
if perr.Op != "open" {
t.Errorf("expected op to be open; got %s", perr.Op)
}
})
})
t.Run("fs.ReadDir", func(t *testing.T) {
files, err := fs.ReadDir(fsys, ".")
if err != nil {
t.Fatal(err)
}
if len(files) != 1 {
t.Fatalf("expected 1 file in dir1/dir11; got %d", len(files))
}
if files[0].Name() != "file.txt" {
t.Errorf("expected file to be file.txt; got %s", files[0].Name())
}
t.Run("not exist", func(t *testing.T) {
_, err := fs.ReadDir(fsys, "not-exist")
var perr *fs.PathError
if !errors.As(err, &perr) {
t.Fatalf("expected err to be PathError: got %#v", err)
}
if perr.Op != "readdir" {
t.Errorf("expected op to be readdir; got %s", perr.Op)
}
})
})
t.Run("open", func(t *testing.T) {
f, err := fsys.Open(".")
if err != nil {
t.Fatal(err)
}
defer f.Close()
dir, ok := f.(fs.ReadDirFile)
if !ok {
t.Fatal("expected file to be a directory")
}
fi, err := dir.Stat()
if err != nil {
t.Fatal(err)
}
if fi.Name() != "dir11" {
t.Errorf("expected dir name to bedir11; got %s", fi.Name())
}
files, err := dir.ReadDir(-1)
if err != nil {
t.Fatal(err)
}
if len(files) != 1 {
t.Fatalf("expected 1 file in dir1/dir11; got %d", len(files))
}
if files[0].Name() != "file.txt" {
t.Errorf("expected file to be file.txt; got %s", files[0].Name())
}
})
})
})
}
fixtures := []struct {
desc string
s3fs *s3fs.S3FS
}{
{desc: "standard", s3fs: s3fs.New(wrappedCl, *bucket)},
{desc: "max keys = 1", s3fs: s3fs.New(&client{MaxKeys: ptr[int32](1), Client: wrappedCl}, *bucket)},
{desc: "max keys = 2", s3fs: s3fs.New(&client{MaxKeys: ptr[int32](2), Client: wrappedCl}, *bucket)},
{desc: "max keys = 3", s3fs: s3fs.New(&client{MaxKeys: ptr[int32](3), Client: wrappedCl}, *bucket)},
}
for _, f := range fixtures {
f := f
t.Run(f.desc, func(t *testing.T) {
t.Parallel()
testFn(t, f.s3fs)
})
}
}
func TestDirRead(t *testing.T) {
type fileinfo struct {
name string
isDir bool
}
tests := []struct {
desc string
n int
outs []s3.ListObjectsOutput
expected [][]fileinfo
}{
{
desc: "all in one request - dir first",
n: 1,
outs: []s3.ListObjectsOutput{
newListOutput([]string{"a", "c", "e"}, []string{"b", "d", "f"}),
},
expected: [][]fileinfo{
{{"a", true}},
{{"b", false}},
{{"c", true}},
{{"d", false}},
{{"e", true}},
{{"f", false}},
},
},
{
desc: "all in one request - n = 0",
n: 0,
outs: []s3.ListObjectsOutput{
newListOutput([]string{"a", "c", "e"}, []string{"b", "d", "f"}),
},
expected: [][]fileinfo{
{
{"a", true},
{"b", false},
{"c", true},
{"d", false},
{"e", true},
{"f", false},
},
},
},
{
desc: "all in one request - n = 2",
n: 2,
outs: []s3.ListObjectsOutput{
newListOutput([]string{"a"}, nil),
newListOutput([]string{"c"}, []string{"b", "d"}),
newListOutput([]string{"e"}, nil),
newListOutput(nil, []string{"f"}),
},
expected: [][]fileinfo{
{
{"a", true},
{"b", false},
},
{
{"c", true},
{"d", false},
},
{
{"e", true},
{"f", false},
},
},
},
{
desc: "one per request - dir first",
n: 1,
outs: []s3.ListObjectsOutput{
newListOutput([]string{"a"}, nil),
newListOutput(nil, []string{"b"}),
newListOutput([]string{"c"}, []string{"d"}),
newListOutput([]string{"e"}, nil),
newListOutput(nil, []string{"f"}),
},
expected: [][]fileinfo{
{{"a", true}},
{{"b", false}},
{{"c", true}},
{{"d", false}},
{{"e", true}},
{{"f", false}},
},
},
{
desc: "all in one request - file first",
n: 1,
outs: []s3.ListObjectsOutput{
newListOutput([]string{"b", "d", "f"}, []string{"a", "c", "e"}),
},
expected: [][]fileinfo{
{{"a", false}},
{{"b", true}},
{{"c", false}},
{{"d", true}},
{{"e", false}},
{{"f", true}},
},
},
{
desc: "with dir duplicates",
n: 1,
outs: []s3.ListObjectsOutput{
newListOutput([]string{"a", "c"}, []string{"b"}),
newListOutput([]string{"c", "e", "c"}, []string{"d"}),
newListOutput([]string{"e", "a"}, []string{"f"}),
},
expected: [][]fileinfo{
{{"a", true}},
{{"b", false}},
{{"c", true}},
{{"d", false}},
{{"e", true}},
{{"f", false}},
},
},
{
desc: "all in one request - dirs only",
n: 1,
outs: []s3.ListObjectsOutput{
newListOutput([]string{"a", "c", "e"}, nil),
},
expected: [][]fileinfo{
{{"a", true}},
{{"c", true}},
{{"e", true}},
},
},
{
desc: "single dir per request - dirs only",
n: 1,
outs: []s3.ListObjectsOutput{
newListOutput([]string{"a"}, nil),
newListOutput([]string{"c"}, nil),
newListOutput([]string{"e"}, nil),
},
expected: [][]fileinfo{
{{"a", true}},
{{"c", true}},
{{"e", true}},
},
},
}
for _, test := range tests {
t.Run(test.desc, func(t *testing.T) {
f, err := s3fs.New(&mockClient{
outs: test.outs,
}, "test").Open(".")
if err != nil {
t.Fatal("expected err to be nil; got ", err)
}
fi, err := f.Stat()
if err != nil {
t.Fatal("expected err to be nil; got ", err)
}
if !fi.IsDir() {
t.Fatal("expected the file to be a directory")
}
var fis [][]fileinfo
for {
files, err := f.(fs.ReadDirFile).ReadDir(test.n)
if err != nil && !errors.Is(err, io.EOF) {
t.Fatal("did not expect err:", err)
}
if len(files) > 0 {
var out []fileinfo
for _, f := range files {
out = append(out, fileinfo{f.Name(), f.IsDir()})
}
fis = append(fis, out)
}
if test.n <= 0 || errors.Is(err, io.EOF) {
break
}
}
if !reflect.DeepEqual(fis, test.expected) {
t.Errorf("want %v; got %v", test.expected, fis)
}
})
}
}
type mockClient struct {
s3fs.Client
outs []s3.ListObjectsOutput
i int
}
func (c *mockClient) ListObjects(ctx context.Context, in *s3.ListObjectsInput, _ ...func(*s3.Options)) (*s3.ListObjectsOutput, error) {
defer func() { c.i++ }()
if c.i < len(c.outs) {
return &c.outs[c.i], nil
}
return &s3.ListObjectsOutput{
IsTruncated: ptr(false),
}, nil
}
func newListOutput(dirs, files []string) (out s3.ListObjectsOutput) {
for _, d := range dirs {
out.CommonPrefixes = append(out.CommonPrefixes, types.CommonPrefix{
Prefix: ptr(d),
})
}
for _, f := range files {
out.Contents = append(out.Contents, types.Object{
Key: ptr(f),
Size: ptr[int64](0),
LastModified: ptr(time.Time{}),
})
}
return out
}
type Client interface {
s3fs.Client
}
func newClient(t *testing.T) (*s3.Client, Client) {
t.Helper()
cl := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: *skipVerify,
},
},
}
client := s3.New(s3.Options{
BaseEndpoint: endpoint,
Credentials: aws.CredentialsProviderFunc(func(ctx context.Context) (aws.Credentials, error) {
return aws.Credentials{
AccessKeyID: accessKeyID,
SecretAccessKey: secretKey,
}, nil
}),
Region: region,
UsePathStyle: true,
HTTPClient: cl,
})
return client, &modTimeTruncateClient{&metricClient{client}}
}
func writeFile(t *testing.T, cl *s3.Client, bucket, name string, data []byte) {
t.Helper()
uploader := manager.NewUploader(cl)
_, err := uploader.Upload(context.Background(), &s3.PutObjectInput{
Body: strings.NewReader(string(data)),
Bucket: &bucket,
Key: &name,
})
if err != nil {
t.Fatal(err)
}
}
func deleteFile(t *testing.T, cl *s3.Client, bucket, name string) {
t.Helper()
_, err := cl.DeleteObject(
context.Background(),
&s3.DeleteObjectInput{
Bucket: ptr(bucket),
Key: &name,
})
if err != nil {
t.Fatal(err)
}
}
func createBucket(t *testing.T, cl *s3.Client, bucket string) {
t.Helper()
_, err := cl.CreateBucket(context.Background(), &s3.CreateBucketInput{
Bucket: &bucket,
})
if err != nil {
var e *types.BucketAlreadyOwnedByYou
if errors.As(err, &e) {
return
}
t.Fatal(err)
}
}
func cleanBucket(t *testing.T, cl *s3.Client, bucket string) {
t.Helper()
out, err := cl.ListObjects(
context.Background(),
&s3.ListObjectsInput{
Bucket: ptr(bucket),
})
if err != nil {
t.Fatal("failed to delete bucket:", err)
}
for _, o := range out.Contents {
_, err := cl.DeleteObject(
context.Background(),
&s3.DeleteObjectInput{
Bucket: ptr(bucket),
Key: o.Key,
})
if err != nil {
t.Error("failed to delete file:", err)
}
}
}
func envDefault(env, def string) string {
if os.Getenv(env) == "" {
return def
}
return os.Getenv(env)
}
type client struct {
MaxKeys *int32
s3fs.Client
}
func (c *client) ListObjects(ctx context.Context, in *s3.ListObjectsInput, _ ...func(*s3.Options)) (*s3.ListObjectsOutput, error) {
if c.MaxKeys != nil {
in.MaxKeys = c.MaxKeys
}
return c.Client.ListObjects(ctx, in)
}
type modTimeTruncateClient struct {
Client
}
// Minio returns modTime that includes microseconds if data comes from ListObjects
// while data coming from GetObject's modTimes are accurate down to seconds.
// To make this test pass while using Minio we build this client that truncates
// modTimes to Second.
func (c *modTimeTruncateClient) ListObjects(ctx context.Context, in *s3.ListObjectsInput, _ ...func(*s3.Options)) (*s3.ListObjectsOutput, error) {
out, err := c.Client.ListObjects(context.Background(), in)
if err != nil {
return out, err
}
for i, o := range out.Contents {
out.Contents[i].LastModified = ptr(o.LastModified.Truncate(time.Second))
}
return out, err
}
var (
// global metrics for this test.
listC int64
getC int64
)
type metricClient struct {
Client
}
func (c *metricClient) ListObjects(ctx context.Context, in *s3.ListObjectsInput, _ ...func(*s3.Options)) (*s3.ListObjectsOutput, error) {
atomic.AddInt64(&listC, 1)
return c.Client.ListObjects(ctx, in)
}
func (c *metricClient) GetObject(ctx context.Context, in *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error) {
atomic.AddInt64(&getC, 1)
return c.Client.GetObject(context.Background(), in)
}
func ptr[T any](v T) *T {
return &v
}
================================================
FILE: go.mod
================================================
module github.com/jszwec/s3fs/v2
go 1.21
require (
github.com/aws/aws-sdk-go-v2 v1.24.0
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7
github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5
)
require (
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9 // indirect
github.com/aws/smithy-go v1.19.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
)
================================================
FILE: go.sum
================================================
github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk=
github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4/go.mod h1:usURWEKSNNAcAZuzRn/9ZYPT8aZQkR7xcCtunK/LkJo=
github.com/aws/aws-sdk-go-v2/config v1.26.1 h1:z6DqMxclFGL3Zfo+4Q0rLnAZ6yVkzCRxhRMsiRQnD1o=
github.com/aws/aws-sdk-go-v2/config v1.26.1/go.mod h1:ZB+CuKHRbb5v5F0oJtGdhFTelmrxd4iWO1lf0rQwSAg=
github.com/aws/aws-sdk-go-v2/credentials v1.16.12 h1:v/WgB8NxprNvr5inKIiVVrXPuuTegM+K8nncFkr1usU=
github.com/aws/aws-sdk-go-v2/credentials v1.16.12/go.mod h1:X21k0FjEJe+/pauud82HYiQbEr9jRKY3kXEIQ4hXeTQ=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 h1:w98BT5w+ao1/r5sUuiH6JkVzjowOKeOJRHERyy1vh58=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10/go.mod h1:K2WGI7vUvkIv1HoNbfBA1bvIZ+9kL3YVmWxeKuLQsiw=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7 h1:FnLf60PtjXp8ZOzQfhJVsqF0OtYKQZWQfqOLshh8YXg=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7/go.mod h1:tDVvl8hyU6E9B8TrnNrZQEVkQlB8hjJwcgpPhgtlnNg=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 h1:v+HbZaCGmOwnTTVS86Fleq0vPzOd7tnJGbFhP0stNLs=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9/go.mod h1:Xjqy+Nyj7VDLBtCMkQYOw1QYfAEZCVLrfI0ezve8wd4=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 h1:N94sVhRACtXyVcjXxrwK1SKFIJrA9pOJ5yu2eSHnmls=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9/go.mod h1:hqamLz7g1/4EJP+GH5NBhcUMLjW+gKLQabgyz6/7WAU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9 h1:ugD6qzjYtB7zM5PN/ZIeaAIyefPaD82G8+SJopgvUpw=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9/go.mod h1:YD0aYBWCrPENpHolhKw2XDlTIWae2GKXT1T4o6N6hiM=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9 h1:/90OR2XbSYfXucBMJ4U14wrjlfleq/0SB6dZDPncgmo=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9/go.mod h1:dN/Of9/fNZet7UrQQ6kTDo/VSwKPIq94vjlU16bRARc=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 h1:Nf2sHxjMJR8CSImIVCONRi4g0Su3J+TSTbS7G0pUeMU=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9/go.mod h1:idky4TER38YIjr2cADF1/ugFMKvZV7p//pVeV5LZbF0=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9 h1:iEAeF6YC3l4FzlJPP9H3Ko1TXpdjdqWffxXjp8SY6uk=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9/go.mod h1:kjsXoK23q9Z/tLBrckZLLyvjhZoS+AGrzqzUfEClvMM=
github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5 h1:Keso8lIOS+IzI2MkPZyK6G0LYcK3My2LQ+T5bxghEAY=
github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5/go.mod h1:vADO6Jn+Rq4nDtfwNjhgR84qkZwiC6FqCaXdw/kYwjA=
github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 h1:ldSFWz9tEHAwHNmjx2Cvy1MjP5/L9kNoR0skc6wyOOM=
github.com/aws/aws-sdk-go-v2/service/sso v1.18.5/go.mod h1:CaFfXLYL376jgbP7VKC96uFcU8Rlavak0UlAwk1Dlhc=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 h1:2k9KmFawS63euAkY4/ixVNsYYwrwnd5fIvgEKkfZFNM=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5/go.mod h1:W+nd4wWDVkSUIox9bacmkBP5NMFQeTJ/xqNabpzSR38=
github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 h1:5UYvv8JUvllZsRnfrcMQ+hJ9jNICmcgKPAO1CER25Wg=
github.com/aws/aws-sdk-go-v2/service/sts v1.26.5/go.mod h1:XX5gh4CB7wAs4KhcF46G6C8a2i7eupU19dcAAE+EydU=
github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM=
github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
================================================
FILE: test/localstack/docker-compose.yml
================================================
version: '3.8'
services:
localstack:
container_name: "localstack"
image: localstack/localstack:0.14.0
network_mode: bridge
ports:
- "4566:4566"
- "4571:4571"
environment:
- SERVICES=s3
================================================
FILE: test/minio/docker-compose.yml
================================================
version: '3.8'
services:
minio:
image: minio/minio
ports:
- "9000:9000"
environment:
MINIO_ROOT_USER: minioadmin
MINIO_ROOT_PASSWORD: minioadmin
command: server /data
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
gitextract_kmeqcja3/
├── .github/
│ └── workflows/
│ └── go.yml
├── .gitignore
├── LICENSE
├── README.md
├── dir.go
├── file.go
├── fs.go
├── fs_test.go
├── go.mod
├── go.sum
└── test/
├── localstack/
│ └── docker-compose.yml
└── minio/
└── docker-compose.yml
SYMBOL INDEX (64 symbols across 4 files)
FILE: dir.go
type dir (line 18) | type dir struct
method Stat (line 28) | func (d *dir) Stat() (fs.FileInfo, error) {
method Read (line 32) | func (d *dir) Read([]byte) (int, error) {
method Close (line 40) | func (d *dir) Close() error {
method ReadDir (line 44) | func (d *dir) ReadDir(n int) (des []fs.DirEntry, err error) {
method readAll (line 80) | func (d *dir) readAll() error {
method readNext (line 94) | func (d *dir) readNext() error {
method mergeDirFiles (line 173) | func (d *dir) mergeDirFiles() {
type dirEntry (line 204) | type dirEntry struct
method Type (line 208) | func (de dirEntry) Type() fs.FileMode { return de.Mode().Type...
method Info (line 209) | func (de dirEntry) Info() (fs.FileInfo, error) { return de.fileInfo, n...
function min (line 211) | func min(a, b int) int {
function derefInt64 (line 218) | func derefInt64(n *int64) int64 {
function derefTime (line 225) | func derefTime(t *time.Time) time.Time {
FILE: file.go
type file (line 23) | type file struct
method Read (line 78) | func (f *file) Read(p []byte) (int, error) {
method Seek (line 84) | func (f *file) Seek(offset int64, whence int) (int64, error) {
method Stat (line 151) | func (f file) Stat() (fs.FileInfo, error) { return f.stat() }
function openFile (line 34) | func openFile(cl Client, bucket string, name string) (fs.File, error) {
function getStatFunc (line 57) | func getStatFunc(cl Client, bucket string, name string, s3ObjOutput s3.G...
type fileInfo (line 153) | type fileInfo struct
method Name (line 160) | func (fi fileInfo) Name() string { return path.Base(fi.name) }
method Size (line 161) | func (fi fileInfo) Size() int64 { return fi.size }
method Mode (line 162) | func (fi fileInfo) Mode() fs.FileMode { return fi.mode }
method ModTime (line 163) | func (fi fileInfo) ModTime() time.Time { return fi.modTime }
method IsDir (line 164) | func (fi fileInfo) IsDir() bool { return fi.mode.IsDir() }
method Sys (line 165) | func (fi fileInfo) Sys() interface{} { return nil }
type eofReader (line 167) | type eofReader struct
method Read (line 169) | func (eofReader) Read([]byte) (int, error) { return 0, io.EOF }
function ptr (line 171) | func ptr[T any](v T) *T {
FILE: fs.go
type Option (line 23) | type Option
function WithReadSeeker (line 31) | func WithReadSeeker(fsys *S3FS) { fsys.readSeeker = true }
type Client (line 36) | type Client interface
type S3FS (line 47) | type S3FS struct
method Open (line 68) | func (f *S3FS) Open(name string) (fs.File, error) {
method Stat (line 114) | func (f *S3FS) Stat(name string) (fs.FileInfo, error) {
method ReadDir (line 127) | func (f *S3FS) ReadDir(name string) ([]fs.DirEntry, error) {
function New (line 54) | func New(cl Client, bucket string, opts ...Option) *S3FS {
function stat (line 139) | func stat(s3cl Client, bucket, name string) (fs.FileInfo, error) {
function openDir (line 198) | func openDir(s3cl Client, bucket, name string) (fs.ReadDirFile, error) {
function isNotFoundErr (line 210) | func isNotFoundErr(err error) bool {
type fileNoSeek (line 225) | type fileNoSeek struct
FILE: fs_test.go
function TestMain (line 39) | func TestMain(m *testing.M) {
function TestSeeker (line 44) | func TestSeeker(t *testing.T) {
function TestFS (line 537) | func TestFS(t *testing.T) {
function TestDirRead (line 1068) | func TestDirRead(t *testing.T) {
type mockClient (line 1261) | type mockClient struct
method ListObjects (line 1267) | func (c *mockClient) ListObjects(ctx context.Context, in *s3.ListObjec...
function newListOutput (line 1278) | func newListOutput(dirs, files []string) (out s3.ListObjectsOutput) {
type Client (line 1295) | type Client interface
function newClient (line 1299) | func newClient(t *testing.T) (*s3.Client, Client) {
function writeFile (line 1326) | func writeFile(t *testing.T, cl *s3.Client, bucket, name string, data []...
function deleteFile (line 1340) | func deleteFile(t *testing.T, cl *s3.Client, bucket, name string) {
function createBucket (line 1354) | func createBucket(t *testing.T, cl *s3.Client, bucket string) {
function cleanBucket (line 1369) | func cleanBucket(t *testing.T, cl *s3.Client, bucket string) {
function envDefault (line 1394) | func envDefault(env, def string) string {
type client (line 1401) | type client struct
method ListObjects (line 1406) | func (c *client) ListObjects(ctx context.Context, in *s3.ListObjectsIn...
type modTimeTruncateClient (line 1413) | type modTimeTruncateClient struct
method ListObjects (line 1421) | func (c *modTimeTruncateClient) ListObjects(ctx context.Context, in *s...
type metricClient (line 1439) | type metricClient struct
method ListObjects (line 1443) | func (c *metricClient) ListObjects(ctx context.Context, in *s3.ListObj...
method GetObject (line 1448) | func (c *metricClient) GetObject(ctx context.Context, in *s3.GetObject...
function ptr (line 1453) | func ptr[T any](v T) *T {
Condensed preview — 12 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (65K chars).
[
{
"path": ".github/workflows/go.yml",
"chars": 1469,
"preview": "name: Go\n\non:\n push:\n branches: [ main, v1, v2 ]\n pull_request:\n branches: [ main, v1, v2 ]\n\njobs:\n localstack:"
},
{
"path": ".gitignore",
"chars": 269,
"preview": "# Binaries for programs and plugins\n*.exe\n*.exe~\n*.dll\n*.so\n*.dylib\n\n# Test binary, built with `go test -c`\n*.test\n\n# Ou"
},
{
"path": "LICENSE",
"chars": 1068,
"preview": "MIT License\n\nCopyright (c) 2021 Jacek Szwec\n\nPermission is hereby granted, free of charge, to any person obtaining a cop"
},
{
"path": "README.md",
"chars": 1091,
"preview": "# s3fs [](https://pkg.go.dev/github.com/jszwec/s3fs)"
},
{
"path": "dir.go",
"chars": 3716,
"preview": "package s3fs\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\"\n\t\"io/fs\"\n\t\"path\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/aws/aws-sdk-"
},
{
"path": "file.go",
"chars": 3739,
"preview": "package s3fs\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/fs\"\n\t\"net/http\"\n\t\"path\"\n\t\"time\"\n\n\tawshttp \"github.com/aws/"
},
{
"path": "fs.go",
"chars": 4962,
"preview": "// Package s3fs provides a S3 implementation for Go1.16 filesystem interface.\npackage s3fs\n\nimport (\n\t\"context\"\n\t\"errors"
},
{
"path": "fs_test.go",
"chars": 33183,
"preview": "package s3fs_test\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"flag\"\n\t\"io\"\n\t\"io/fs\"\n\t\"net/http\"\n\t\"os\"\n\t\"refle"
},
{
"path": "go.mod",
"chars": 904,
"preview": "module github.com/jszwec/s3fs/v2\n\ngo 1.21\n\nrequire (\n\tgithub.com/aws/aws-sdk-go-v2 v1.24.0\n\tgithub.com/aws/aws-sdk-go-v2"
},
{
"path": "go.sum",
"chars": 5240,
"preview": "github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk=\ngithub.com/aws/aws-sdk-go-v2 v1.24."
},
{
"path": "test/localstack/docker-compose.yml",
"chars": 225,
"preview": "version: '3.8'\n\nservices:\n localstack:\n container_name: \"localstack\"\n image: localstack/localstack:0.14.0\n net"
},
{
"path": "test/minio/docker-compose.yml",
"chars": 354,
"preview": "version: '3.8'\n\nservices:\n minio:\n image: minio/minio\n ports:\n - \"9000:9000\"\n environment:\n MINIO_RO"
}
]
About this extraction
This page contains the full source code of the jszwec/s3fs GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 12 files (54.9 KB), approximately 18.9k tokens, and a symbol index with 64 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.