Skip to content

Commit

Permalink
Fix linter errors
Browse files Browse the repository at this point in the history
This commit fixes the linter errors popping up in
#1400
to unblock this PR, to allow rebase other branches
to master without getting stuck on linter errors.

Changes
* Remove io.util usage from contentcache
* fix formatting issue in name.go
* fix formatting issue in type_cache.go
* add missing error-check in file_test.go
* remove reference to ioutil in auth.go
* remove reference to ioutil in proxy.go
* remove unnecessary assignment to blank identifier
* remove unnecessary int64 check vs maxInt64
* remove reference to deprecated io/ioutil
* add missing return-error-check
  • Loading branch information
gargnitingoogle committed Oct 30, 2023
1 parent ca74705 commit 6d4f2a4
Show file tree
Hide file tree
Showing 9 changed files with 33 additions and 27 deletions.
4 changes: 2 additions & 2 deletions internal/auth/auth.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ package auth
import (
"context"
"fmt"
"io/ioutil"
"os"

"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
Expand All @@ -31,7 +31,7 @@ func newTokenSourceFromPath(
scope string,
) (ts oauth2.TokenSource, err error) {
// Read the file.
contents, err := ioutil.ReadFile(path)
contents, err := os.ReadFile(path)
if err != nil {
err = fmt.Errorf("ReadFile(%q): %w", path, err)
return
Expand Down
3 changes: 1 addition & 2 deletions internal/auth/proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
Expand Down Expand Up @@ -75,7 +74,7 @@ func (ts proxyTokenSource) Token() (token *oauth2.Token, err error) {
return
}

body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20))
if err != nil {
err = fmt.Errorf("proxyTokenSource cannot load body: %w", err)
return
Expand Down
21 changes: 15 additions & 6 deletions internal/contentcache/contentcache.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import (
"fmt"
"io"
"io/fs"
"io/ioutil"
"os"
"path"
"regexp"
Expand Down Expand Up @@ -83,7 +82,7 @@ func (c *ContentCache) WriteMetadataCheckpointFile(cacheFileName string, cacheFi
return
}
metadataFileName = fmt.Sprintf("%s.json", cacheFileName)
err = ioutil.WriteFile(metadataFileName, file, 0644)
err = os.WriteFile(metadataFileName, file, 0644)
if err != nil {
err = fmt.Errorf("WriteFile for JSON metadata: %w", err)
return
Expand Down Expand Up @@ -111,7 +110,7 @@ func (c *ContentCache) recoverFileFromCache(metadataFile fs.FileInfo) {
}
var metadata CacheFileObjectMetadata
metadataAbsolutePath := path.Join(c.tempDir, metadataFile.Name())
contents, err := ioutil.ReadFile(metadataAbsolutePath)
contents, err := os.ReadFile(metadataAbsolutePath)
if err != nil {
logger.Errorf("content cache: Skip metadata file %v due to read error: %s", metadataFile.Name(), err)
return
Expand Down Expand Up @@ -152,11 +151,21 @@ func (c *ContentCache) RecoverCache() error {
c.tempDir = "/tmp"
}
logger.Infof("Recovering cache:\n")
files, err := ioutil.ReadDir(c.tempDir)
dirEntries, err := os.ReadDir(c.tempDir)
if err != nil {
// if we fail to read the specified directory, log and return error
// We failed to get the list of directory entries
// in the temp directory, log and return error.
return fmt.Errorf("recover cache: %w", err)
}
files := make([]os.FileInfo, len(dirEntries))
for i, dirEntry := range dirEntries {
files[i], err = dirEntry.Info()
if err != nil {
// We failed to read a directory entry,
// log and return error.
return fmt.Errorf("recover cache: %w", err)
}
}
for _, metadataFile := range files {
c.recoverFileFromCache(metadataFile)
}
Expand Down Expand Up @@ -196,7 +205,7 @@ func (c *ContentCache) AddOrReplace(cacheObjectKey *CacheObjectKey, generation i
cacheObject.Destroy()
}
// Create a temporary cache file on disk
f, err := ioutil.TempFile(c.tempDir, CacheFilePrefix)
f, err := os.CreateTemp(c.tempDir, CacheFilePrefix)
if err != nil {
return nil, fmt.Errorf("TempFile: %w", err)
}
Expand Down
2 changes: 1 addition & 1 deletion internal/fs/fs.go
Original file line number Diff line number Diff line change
Expand Up @@ -594,7 +594,7 @@ func (fs *fileSystem) checkInvariants() {
//////////////////////////////////

// INVARIANT: For all keys k in handles, k < nextHandleID
for k, _ := range fs.handles {
for k := range fs.handles {
if k >= fs.nextHandleID {
panic(fmt.Sprintf("Illegal handle ID: %v", k))
}
Expand Down
1 change: 1 addition & 0 deletions internal/fs/inode/file_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -848,6 +848,7 @@ func (t *FileTest) TestSetMtimeForLocalFileShouldUpdateLocalFileAttributes() {
t.createInodeWithLocalParam("test", true)
createTime := t.in.mtimeClock.Now()
err = t.in.CreateEmptyTempFile()
AssertEq(nil, err)
// Validate the attributes on an empty file.
attrs, err = t.in.Attributes(t.ctx)
AssertEq(nil, err)
Expand Down
5 changes: 3 additions & 2 deletions internal/fs/inode/name.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,9 @@ import (
)

// Name is the inode's name that can be interpreted in 2 ways:
// (1) LocalName: the name of the inode in the local file system.
// (2) GcsObjectName: the name of its gcs object backed by the inode.
//
// (1) LocalName: the name of the inode in the local file system.
// (2) GcsObjectName: the name of its gcs object backed by the inode.
type Name struct {
// The value of bucketName can be:
// - "", when single gcs bucket is explicitly mounted for the file system.
Expand Down
8 changes: 4 additions & 4 deletions internal/fs/inode/type_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,10 @@ type cacheEntry struct {
// A cache that maps from a name to information about the type of the object
// with that name. Each name N is in one of the following states:
//
// * Nothing is known about N.
// * We have recorded that N is a file.
// * We have recorded that N is a directory.
// * We have recorded that N is both a file and a directory.
// - Nothing is known about N.
// - We have recorded that N is a file.
// - We have recorded that N is a directory.
// - We have recorded that N is both a file and a directory.
//
// Must be created with newTypeCache. May be contained in a larger struct.
// External synchronization is required.
Expand Down
10 changes: 6 additions & 4 deletions internal/gcsx/append_object_creator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ package gcsx
import (
"errors"
"fmt"
"io/ioutil"
"io"
"strings"
"testing"
"time"
Expand Down Expand Up @@ -111,13 +111,14 @@ func (t *AppendObjectCreatorTest) CallsCreateObject() {
WillOnce(DoAll(SaveArg(1, &req), Return(nil, errors.New(""))))

// Call
t.call()
_, err := t.call()
AssertNe(nil, err)

AssertNe(nil, req)
ExpectTrue(strings.HasPrefix(req.Name, prefix), "Name: %s", req.Name)
ExpectThat(req.GenerationPrecondition, Pointee(Equals(0)))

b, err := ioutil.ReadAll(req.Contents)
b, err := io.ReadAll(req.Contents)
AssertEq(nil, err)
ExpectEq(t.srcContents, string(b))
}
Expand Down Expand Up @@ -177,7 +178,8 @@ func (t *AppendObjectCreatorTest) CallsComposeObjects() {
WillOnce(Return(nil))

// Call
t.call()
_, err := t.call()
AssertNe(nil, err)

AssertNe(nil, req)
ExpectEq(t.srcObject.Name, req.DstName)
Expand Down
6 changes: 0 additions & 6 deletions internal/gcsx/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ import (
"errors"
"fmt"
"io"
"math"
"math/rand"
"strings"
"testing"
Expand Down Expand Up @@ -137,11 +136,6 @@ func (t *IntegrationTest) objectGeneration(name string) (gen int64) {
panic(err)
}

// Check the result.
if o.Generation > math.MaxInt64 {
panic(fmt.Sprintf("Out of range: %v", o.Generation))
}

gen = o.Generation
return
}
Expand Down

0 comments on commit 6d4f2a4

Please sign in to comment.