Use io.SeekStart/End/Current constants now for go1.7+ #2154

This commit is contained in:
Nick Craig-Wood 2018-04-06 19:53:06 +01:00
parent 80588a5a6b
commit e5be471ce0
24 changed files with 64 additions and 63 deletions

View File

@ -1526,7 +1526,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
}
data := r.randomBytes(t, int64(left))
_, _ = f.Write(data)
_, _ = f.Seek(int64(0), 0)
_, _ = f.Seek(int64(0), io.SeekStart)
r.tempFiles = append(r.tempFiles, f)
return f
@ -1653,7 +1653,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
if err != nil {
return checkSample, err
}
_, _ = f.Seek(offset, 0)
_, _ = f.Seek(offset, io.SeekStart)
totalRead, err := io.ReadFull(f, checkSample)
checkSample = checkSample[:totalRead]
if err == io.EOF || err == io.ErrUnexpectedEOF {

View File

@ -5,7 +5,6 @@ package cache
import (
"fmt"
"io"
"os"
"sync"
"time"
@ -327,13 +326,13 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
var err error
switch whence {
case os.SEEK_SET:
case io.SeekStart:
fs.Debugf(r, "moving offset set from %v to %v", r.offset, offset)
r.offset = offset
case os.SEEK_CUR:
case io.SeekCurrent:
fs.Debugf(r, "moving offset cur from %v to %v", r.offset, r.offset+offset)
r.offset += offset
case os.SEEK_END:
case io.SeekEnd:
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
r.offset = r.cachedObject.Size() + offset
default:
@ -382,10 +381,10 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
if !closeOpen {
if do, ok := r.(fs.RangeSeeker); ok {
_, err = do.RangeSeek(offset, os.SEEK_SET, end-offset)
_, err = do.RangeSeek(offset, io.SeekStart, end-offset)
return r, err
} else if do, ok := r.(io.Seeker); ok {
_, err = do.Seek(offset, os.SEEK_SET)
_, err = do.Seek(offset, io.SeekStart)
return r, err
}
}

View File

@ -4,7 +4,6 @@ package cache
import (
"io"
"os"
"path"
"sync"
"time"
@ -223,7 +222,7 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
}
_, err = cacheReader.Seek(offset, os.SEEK_SET)
_, err = cacheReader.Seek(offset, io.SeekStart)
if err != nil {
return nil, err
}

View File

@ -781,7 +781,7 @@ func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *
}
fh.open = open // will be called by fh.RangeSeek
if doRangeSeek {
_, err = fh.RangeSeek(offset, 0, limit)
_, err = fh.RangeSeek(offset, io.SeekStart, limit)
if err != nil {
_ = fh.Close()
return nil, err
@ -908,7 +908,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
if fh.open == nil {
return 0, fh.finish(errors.New("can't seek - not initialised with newDecrypterSeek"))
}
if whence != 0 {
if whence != io.SeekStart {
return 0, fh.finish(errors.New("can only seek from the start"))
}

View File

@ -1016,7 +1016,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
if offset+limit > len(plaintext) {
continue
}
_, err := fh.RangeSeek(int64(offset), 0, int64(limit))
_, err := fh.RangeSeek(int64(offset), io.SeekStart, int64(limit))
assert.NoError(t, err)
check(fh, offset, limit)
@ -1083,7 +1083,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
}
fh, err := c.DecryptDataSeek(testOpen, 0, -1)
assert.NoError(t, err)
gotOffset, err := fh.RangeSeek(test.offset, 0, test.limit)
gotOffset, err := fh.RangeSeek(test.offset, io.SeekStart, test.limit)
assert.NoError(t, err)
assert.Equal(t, gotOffset, test.offset)
}

View File

@ -159,7 +159,7 @@ func (rx *resumableUpload) transferStatus() (start int64, err error) {
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
_, _ = chunk.Seek(0, 0)
_, _ = chunk.Seek(0, io.SeekStart)
req := rx.makeRequest(start, chunk, chunkSize)
res, err := rx.f.client.Do(req)
if err != nil {

View File

@ -894,7 +894,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, 0); err != nil {
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
@ -930,7 +930,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, 0); err != nil {
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
@ -953,7 +953,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
chunk = readers.NewRepeatableReaderBuffer(in, buf)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, 0); err != nil {
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)

View File

@ -732,7 +732,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
wrappedFd := readers.NewLimitedReadCloser(fd, limit)
if offset != 0 {
// seek the object
_, err = fd.Seek(offset, 0)
_, err = fd.Seek(offset, io.SeekStart)
// don't attempt to make checksums
return wrappedFd, err
}

View File

@ -1123,7 +1123,7 @@ func (o *Object) uploadFragment(url string, start int64, totalSize int64, chunk
// var response api.UploadFragmentResponse
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
_, _ = chunk.Seek(0, 0)
_, _ = chunk.Seek(0, io.SeekStart)
resp, err = o.fs.srv.Call(&opts)
if resp != nil {
defer fs.CheckClose(resp.Body, &err)

View File

@ -130,12 +130,12 @@ func (u *uploader) init() {
u.totalSize = -1
switch r := u.cfg.body.(type) {
case io.Seeker:
pos, _ := r.Seek(0, 1)
pos, _ := r.Seek(0, io.SeekCurrent)
defer func() {
_, _ = r.Seek(pos, 0)
_, _ = r.Seek(pos, io.SeekStart)
}()
n, err := r.Seek(0, 2)
n, err := r.Seek(0, io.SeekEnd)
if err != nil {
return
}

View File

@ -905,7 +905,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
return nil, errors.Wrap(err, "Open failed")
}
if offset > 0 {
off, err := sftpFile.Seek(offset, 0)
off, err := sftpFile.Seek(offset, io.SeekStart)
if err != nil || off != offset {
return nil, errors.Wrap(err, "Open Seek failed")
}

View File

@ -3,6 +3,7 @@
package mount
import (
"io"
"time"
"bazil.org/fuse"
@ -74,7 +75,7 @@ func (f *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenR
}
// See if seeking is supported and set FUSE hint accordingly
if _, err = handle.Seek(0, 1); err != nil {
if _, err = handle.Seek(0, io.SeekCurrent); err != nil {
resp.Flags |= fuse.OpenNonSeekable
}

View File

@ -27,7 +27,7 @@ func randomSeekTest(size int64, in *os.File, name string) {
}
log.Printf("Reading %d from %d", blockSize, start)
_, err := in.Seek(start, 0)
_, err := in.Seek(start, io.SeekStart)
if err != nil {
log.Fatalf("Seek failed on %q: %v", name, err)
}

View File

@ -32,11 +32,11 @@ func randomSeekTest(size int64, in1, in2 *os.File, file1, file2 string) {
}
log.Printf("Reading %d from %d", blockSize, start)
_, err := in1.Seek(start, 0)
_, err := in1.Seek(start, io.SeekStart)
if err != nil {
log.Fatalf("Seek failed on %q: %v", file1, err)
}
_, err = in2.Seek(start, 0)
_, err = in2.Seek(start, io.SeekStart)
if err != nil {
log.Fatalf("Seek failed on %q: %v", file2, err)
}

View File

@ -68,7 +68,7 @@ func seekTest(n int, file string) {
}
log.Printf("%s: Reading %d from %d", file, blockSize, start)
_, err = in.Seek(start, 0)
_, err = in.Seek(start, io.SeekStart)
if err != nil {
log.Fatalf("Seek failed on %q: %v", file, err)
}

View File

@ -66,7 +66,7 @@ func TestReadChecksum(t *testing.T) {
_, err = io.ReadFull(fd, buf)
assert.NoError(t, err)
// read at end
_, err = fd.Seek(int64(len(b)-len(buf)), 0)
_, err = fd.Seek(int64(len(b)-len(buf)), io.SeekStart)
assert.NoError(t, err)
_, err = io.ReadFull(fd, buf)
assert.NoError(t, err)
@ -89,7 +89,7 @@ func TestReadSeek(t *testing.T) {
assert.NoError(t, err)
// Seek to half way
_, err = fd.Seek(5, 0)
_, err = fd.Seek(5, io.SeekStart)
assert.NoError(t, err)
buf, err := ioutil.ReadAll(fd)
@ -97,7 +97,7 @@ func TestReadSeek(t *testing.T) {
assert.Equal(t, buf, []byte("HELLO"))
// Test seeking to the end
_, err = fd.Seek(10, 0)
_, err = fd.Seek(10, io.SeekStart)
assert.NoError(t, err)
buf, err = ioutil.ReadAll(fd)
@ -105,7 +105,7 @@ func TestReadSeek(t *testing.T) {
assert.Equal(t, buf, []byte(""))
// Test seeking beyond the end
_, err = fd.Seek(1000000, 0)
_, err = fd.Seek(1000000, io.SeekStart)
assert.NoError(t, err)
buf, err = ioutil.ReadAll(fd)
@ -113,7 +113,7 @@ func TestReadSeek(t *testing.T) {
assert.Equal(t, buf, []byte(""))
// Now back to the start
_, err = fd.Seek(0, 0)
_, err = fd.Seek(0, io.SeekStart)
assert.NoError(t, err)
buf, err = ioutil.ReadAll(fd)

View File

@ -107,9 +107,9 @@ func (cr *ChunkedReader) RangeSeek(offset int64, whence int, length int64) (int6
size := cr.o.Size()
switch whence {
case 0:
case io.SeekStart:
cr.offset = 0
case 2:
case io.SeekEnd:
cr.offset = size
}
cr.chunkOffset = cr.offset + offset

View File

@ -2,6 +2,7 @@
package log
import (
"io"
"log"
"os"
"reflect"
@ -71,7 +72,7 @@ func InitLogging() {
if err != nil {
log.Fatalf("Failed to open log file: %v", err)
}
_, err = f.Seek(0, os.SEEK_END)
_, err = f.Seek(0, io.SeekEnd)
if err != nil {
fs.Errorf(nil, "Failed to seek log file to end: %v", err)
}

View File

@ -24,11 +24,11 @@ func (r *RepeatableReader) Seek(offset int64, whence int) (int64, error) {
var abs int64
cacheLen := int64(len(r.b))
switch whence {
case 0: //io.SeekStart
case io.SeekStart:
abs = offset
case 1: //io.SeekCurrent
case io.SeekCurrent:
abs = r.i + offset
case 2: //io.SeekEnd
case io.SeekEnd:
abs = cacheLen + offset
default:
return 0, errors.New("fs.RepeatableReader.Seek: invalid whence")

View File

@ -32,7 +32,7 @@ func TestRepeatableReader(t *testing.T) {
// Test Seek Back to start
dst = make([]byte, 10)
pos, err = r.Seek(0, 0)
pos, err = r.Seek(0, io.SeekStart)
assert.Nil(t, err)
require.Equal(t, 0, int(pos))
@ -58,13 +58,13 @@ func TestRepeatableReader(t *testing.T) {
buf = bytes.NewBuffer(b)
r = NewRepeatableReader(buf)
// Should not allow seek past cache index
pos, err = r.Seek(5, 1)
pos, err = r.Seek(5, io.SeekCurrent)
assert.NotNil(t, err)
assert.Equal(t, "fs.RepeatableReader.Seek: offset is unavailable", err.Error())
assert.Equal(t, 0, int(pos))
// Should not allow seek to negative position start
pos, err = r.Seek(-1, 1)
pos, err = r.Seek(-1, io.SeekCurrent)
assert.NotNil(t, err)
assert.Equal(t, "fs.RepeatableReader.Seek: negative position", err.Error())
assert.Equal(t, 0, int(pos))
@ -78,15 +78,15 @@ func TestRepeatableReader(t *testing.T) {
// Should seek from index with io.SeekCurrent(1) whence
dst = make([]byte, 5)
_, _ = r.Read(dst)
pos, err = r.Seek(-3, 1)
pos, err = r.Seek(-3, io.SeekCurrent)
assert.Nil(t, err)
require.Equal(t, 2, int(pos))
pos, err = r.Seek(1, 1)
pos, err = r.Seek(1, io.SeekCurrent)
assert.Nil(t, err)
require.Equal(t, 3, int(pos))
// Should seek from cache end with io.SeekEnd(2) whence
pos, err = r.Seek(-3, 2)
pos, err = r.Seek(-3, io.SeekEnd)
assert.Nil(t, err)
require.Equal(t, 2, int(pos))

View File

@ -110,7 +110,7 @@ func (fh *ReadFileHandle) seek(offset int64, reopen bool) (err error) {
// Can we seek it directly?
if do, ok := oldReader.(io.Seeker); !reopen && ok {
fs.Debugf(fh.remote, "ReadFileHandle.seek from %d to %d (io.Seeker)", fh.offset, offset)
_, err = do.Seek(offset, 0)
_, err = do.Seek(offset, io.SeekStart)
if err != nil {
fs.Debugf(fh.remote, "ReadFileHandle.Read io.Seeker failed: %v", err)
return err
@ -144,9 +144,9 @@ func (fh *ReadFileHandle) Seek(offset int64, whence int) (n int64, err error) {
}
size := fh.size
switch whence {
case 0:
case io.SeekStart:
fh.roffset = 0
case 2:
case io.SeekEnd:
fh.roffset = size
}
fh.roffset += offset

View File

@ -87,25 +87,25 @@ func TestReadFileHandleSeek(t *testing.T) {
assert.Equal(t, "0", readString(t, fh, 1))
// 0 means relative to the origin of the file,
n, err := fh.Seek(5, 0)
n, err := fh.Seek(5, io.SeekStart)
assert.NoError(t, err)
assert.Equal(t, int64(5), n)
assert.Equal(t, "5", readString(t, fh, 1))
// 1 means relative to the current offset
n, err = fh.Seek(-3, 1)
n, err = fh.Seek(-3, io.SeekCurrent)
assert.NoError(t, err)
assert.Equal(t, int64(3), n)
assert.Equal(t, "3", readString(t, fh, 1))
// 2 means relative to the end.
n, err = fh.Seek(-3, 2)
n, err = fh.Seek(-3, io.SeekEnd)
assert.NoError(t, err)
assert.Equal(t, int64(13), n)
assert.Equal(t, "d", readString(t, fh, 1))
// Seek off the end
n, err = fh.Seek(100, 0)
n, err = fh.Seek(100, io.SeekStart)
assert.NoError(t, err)
// Get the error on read
@ -116,7 +116,7 @@ func TestReadFileHandleSeek(t *testing.T) {
// Check if noSeek is set we get an error
fh.noSeek = true
_, err = fh.Seek(0, 0)
_, err = fh.Seek(0, io.SeekStart)
assert.Equal(t, ESPIPE, err)
// Close

View File

@ -123,11 +123,11 @@ func TestRWFileHandleSeek(t *testing.T) {
assert.Equal(t, fh.opened, false)
// Check null seeks don't open the file
n, err := fh.Seek(0, 0)
n, err := fh.Seek(0, io.SeekStart)
assert.NoError(t, err)
assert.Equal(t, int64(0), n)
assert.Equal(t, fh.opened, false)
n, err = fh.Seek(0, 1)
n, err = fh.Seek(0, io.SeekCurrent)
assert.NoError(t, err)
assert.Equal(t, int64(0), n)
assert.Equal(t, fh.opened, false)
@ -135,25 +135,25 @@ func TestRWFileHandleSeek(t *testing.T) {
assert.Equal(t, "0", rwReadString(t, fh, 1))
// 0 means relative to the origin of the file,
n, err = fh.Seek(5, 0)
n, err = fh.Seek(5, io.SeekStart)
assert.NoError(t, err)
assert.Equal(t, int64(5), n)
assert.Equal(t, "5", rwReadString(t, fh, 1))
// 1 means relative to the current offset
n, err = fh.Seek(-3, 1)
n, err = fh.Seek(-3, io.SeekCurrent)
assert.NoError(t, err)
assert.Equal(t, int64(3), n)
assert.Equal(t, "3", rwReadString(t, fh, 1))
// 2 means relative to the end.
n, err = fh.Seek(-3, 2)
n, err = fh.Seek(-3, io.SeekEnd)
assert.NoError(t, err)
assert.Equal(t, int64(13), n)
assert.Equal(t, "d", rwReadString(t, fh, 1))
// Seek off the end
n, err = fh.Seek(100, 0)
n, err = fh.Seek(100, io.SeekStart)
assert.NoError(t, err)
// Get the error on read
@ -290,7 +290,7 @@ func TestRWFileHandleMethodsWrite(t *testing.T) {
assert.Equal(t, "file1", node.Name())
offset := func() int64 {
n, err := fh.Seek(0, 1)
n, err := fh.Seek(0, io.SeekCurrent)
require.NoError(t, err)
return n
}
@ -362,7 +362,7 @@ func TestRWFileHandleWriteAt(t *testing.T) {
defer cleanup(t, r, vfs)
offset := func() int64 {
n, err := fh.Seek(0, 1)
n, err := fh.Seek(0, io.SeekCurrent)
require.NoError(t, err)
return n
}

View File

@ -3,6 +3,7 @@
package vfs
import (
"io"
"os"
"testing"
@ -58,7 +59,7 @@ func TestVFSbaseHandle(t *testing.T) {
_, err = fh.Readdirnames(0)
assert.Equal(t, ENOSYS, err)
_, err = fh.Seek(0, 0)
_, err = fh.Seek(0, io.SeekStart)
assert.Equal(t, ENOSYS, err)
_, err = fh.Stat()