From 1eaf754360d109d88c2301f1b869b9d7ec186b22 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Tue, 3 Jan 2023 14:45:37 -0800 Subject: [PATCH 01/50] Print out lint results --- azure-pipelines.yml | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 97a1710f82..7484c25701 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -32,10 +32,22 @@ jobs: version: '1.19.2' - script: | - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.46.2 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin echo 'Installation complete' - ./bin/golangci-lint --version - ./bin/golangci-lint run e2etest + $(go env GOPATH)/bin/golangci-lint --version + $(go env GOPATH)/bin/golangci-lint run --tests=false --max-issues-per-linter=0 > lint.log + result=$(cat lint.log | wc -l) + if [ $result -ne 0 ]; then + echo "-----------------------------------" + echo "Below issues are found in static analysis" + cat lint.log + echo "-----------------------------------" + exit 1 + else + echo "-----------------------------------" + echo "No issues are found in static analysis" + echo "-----------------------------------" + fi displayName: 'Golang Lint Check - Linux' workingDirectory: $(System.DefaultWorkingDirectory) condition: eq(variables.type, 'linux') From 19062e53187952f50aae48df558fe5d07e94e346 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 4 Jan 2023 09:47:36 -0800 Subject: [PATCH 02/50] Fixed errcheck errors --- azbfs/zc_policy_retry.go | 4 +- azbfs/zc_util_validate.go | 2 +- cmd/copy.go | 6 +- common/logger_unix.go | 36 ++++++------ common/mmf_linux.go | 3 +- common/oauthTokenManager.go | 9 +-- common/writeThoughFile.go | 2 +- jobsAdmin/JobsAdmin.go | 34 ++++++------ jobsAdmin/init.go | 106 ++++++++++++++++++------------------ ste/xferRetrypolicy.go | 2 +- 10 files changed, 103 insertions(+), 101 deletions(-) diff --git a/azbfs/zc_policy_retry.go b/azbfs/zc_policy_retry.go index a7f56a0457..b37dbcebb1 100644 --- a/azbfs/zc_policy_retry.go +++ b/azbfs/zc_policy_retry.go @@ -134,7 +134,7 @@ func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never // Note: forked from the standard package url.go // The content is exactly the same but the spaces are encoded as %20 instead of + // TODO: remove after the service fix -// Encode encodes the values into ``URL encoded'' form +// Encode encodes the values into “URL encoded” form // ("bar=baz&foo=quux") sorted by key. func alternativeEncode(v url.Values) string { if v == nil { @@ -284,7 +284,7 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { } if response != nil && response.Response() != nil { // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection - io.Copy(ioutil.Discard, response.Response().Body) + _, _ = io.Copy(ioutil.Discard, response.Response().Body) response.Response().Body.Close() } // If retrying, cancel the current per-try timeout context diff --git a/azbfs/zc_util_validate.go b/azbfs/zc_util_validate.go index b10a56985b..149e6eee04 100644 --- a/azbfs/zc_util_validate.go +++ b/azbfs/zc_util_validate.go @@ -58,7 +58,7 @@ func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) int64 { if err != nil { panic("failed to seek stream") } - body.Seek(0, io.SeekStart) + _, _ = body.Seek(0, io.SeekStart) return count } diff --git a/cmd/copy.go b/cmd/copy.go index ce20ca3ad6..1ed52e79fa 100644 --- a/cmd/copy.go +++ b/cmd/copy.go @@ -1990,13 +1990,13 @@ func init() { // permanently hidden // Hide the list-of-files flag since it is implemented only for Storage Explorer. - cpCmd.PersistentFlags().MarkHidden("list-of-files") - cpCmd.PersistentFlags().MarkHidden("s2s-get-properties-in-backend") + _ = cpCmd.PersistentFlags().MarkHidden("list-of-files") + _ = cpCmd.PersistentFlags().MarkHidden("s2s-get-properties-in-backend") // temp, to assist users with change in param names, by providing a clearer message when these obsolete ones are accidentally used cpCmd.PersistentFlags().StringVar(&raw.legacyInclude, "include", "", "Legacy include param. DO NOT USE") cpCmd.PersistentFlags().StringVar(&raw.legacyExclude, "exclude", "", "Legacy exclude param. DO NOT USE") - cpCmd.PersistentFlags().MarkHidden("include") + _ = cpCmd.PersistentFlags().MarkHidden("include") cpCmd.PersistentFlags().MarkHidden("exclude") // Hide the flush-threshold flag since it is implemented only for CI. diff --git a/common/logger_unix.go b/common/logger_unix.go index 63e31a52e6..e89f10d924 100644 --- a/common/logger_unix.go +++ b/common/logger_unix.go @@ -1,4 +1,6 @@ +//go:build linux || darwin // +build linux darwin + // Copyright Microsoft // // Permission is hereby granted, free of charge, to any person obtaining a copy @@ -23,29 +25,28 @@ package common import ( "fmt" - "runtime" "log/syslog" + "runtime" "github.com/Azure/azure-pipeline-go/pipeline" ) -////////////////////////////////////////// +// //////////////////////////////////////// type sysLogger struct { // minimum loglevel represents the minimum severity of log messages which can be logged to Job Log file. // any message with severity higher than this will be ignored. jobID JobID minimumLevelToLog pipeline.LogLevel // The maximum customer-desired log level for this job - writer *syslog.Writer // The Job's logger + writer *syslog.Writer // The Job's logger logSuffix string sanitizer pipeline.LogSanitizer } - func NewSysLogger(jobID JobID, minimumLevelToLog LogLevel, logSuffix string) ILoggerResetable { return &sysLogger{ jobID: jobID, minimumLevelToLog: minimumLevelToLog.ToPipelineLogLevel(), - logSuffix: logSuffix, + logSuffix: logSuffix, sanitizer: NewAzCopyLogSanitizer(), } } @@ -53,16 +54,16 @@ func NewSysLogger(jobID JobID, minimumLevelToLog LogLevel, logSuffix string) ILo func (sl *sysLogger) OpenLog() { if sl.minimumLevelToLog == pipeline.LogNone { return - } + } writer, err := syslog.New(syslog.LOG_NOTICE, fmt.Sprintf("%s %s", sl.logSuffix, sl.jobID.String())) PanicIfErr(err) sl.writer = writer // Log the Azcopy Version - sl.writer.Notice("AzcopyVersion " + AzcopyVersion) + _ = sl.writer.Notice("AzcopyVersion " + AzcopyVersion) // Log the OS Environment and OS Architecture - sl.writer.Notice("OS-Environment " + runtime.GOOS) - sl.writer.Notice("OS-Architecture " + runtime.GOARCH) + _ = sl.writer.Notice("OS-Environment " + runtime.GOOS) + _ = sl.writer.Notice("OS-Architecture " + runtime.GOARCH) } func (sl *sysLogger) MinimumLogLevel() pipeline.LogLevel { @@ -85,9 +86,8 @@ func (sl *sysLogger) CloseLog() { sl.writer.Close() } - func (sl *sysLogger) Panic(err error) { - sl.writer.Crit(err.Error()) // We do NOT panic here as the app would terminate; + _ = sl.writer.Crit(err.Error()) // We do NOT panic here as the app would terminate; //we just log it. We should never reach this line of code! } @@ -103,16 +103,16 @@ func (sl *sysLogger) Log(loglevel pipeline.LogLevel, msg string) { case pipeline.LogNone: //nothing to do case pipeline.LogFatal: - w.Emerg(msg) + _ = w.Emerg(msg) case pipeline.LogPanic: - w.Crit(msg) + _ = w.Crit(msg) case pipeline.LogError: - w.Err(msg) + _ = w.Err(msg) case pipeline.LogWarning: - w.Warning(msg) + _ = w.Warning(msg) case pipeline.LogInfo: - w.Info(msg) + _ = w.Info(msg) case pipeline.LogDebug: - w.Debug(msg) + _ = w.Debug(msg) } -} \ No newline at end of file +} diff --git a/common/mmf_linux.go b/common/mmf_linux.go index 911e34506f..d21b8c10a4 100644 --- a/common/mmf_linux.go +++ b/common/mmf_linux.go @@ -1,3 +1,4 @@ +//go:build linux || darwin // +build linux darwin // Copyright © 2017 Microsoft @@ -55,7 +56,7 @@ func NewMMF(file *os.File, writable bool, offset int64, length int64) (*MMF, err } addr, err := syscall.Mmap(int(file.Fd()), offset, int(length), prot, flags) if !writable { - syscall.Madvise(addr, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED) + _ = syscall.Madvise(addr, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED) } return &MMF{slice: (addr), isMapped: true, lock: sync.RWMutex{}}, err } diff --git a/common/oauthTokenManager.go b/common/oauthTokenManager.go index 1d5fae0bc3..ba9c55ea2b 100644 --- a/common/oauthTokenManager.go +++ b/common/oauthTokenManager.go @@ -108,9 +108,10 @@ func newAzcopyHTTPClient() *http.Client { } // GetTokenInfo gets token info, it follows rule: -// 1. If there is token passed from environment variable(note this is only for testing purpose), -// use token passed from environment variable. -// 2. Otherwise, try to get token from cache. +// 1. If there is token passed from environment variable(note this is only for testing purpose), +// use token passed from environment variable. +// 2. Otherwise, try to get token from cache. +// // This method either successfully return token, or return error. func (uotm *UserOAuthTokenManager) GetTokenInfo(ctx context.Context) (*OAuthTokenInfo, error) { if uotm.stashedInfo != nil { @@ -879,7 +880,7 @@ func (credInfo *OAuthTokenInfo) GetNewTokenFromMSI(ctx context.Context) (*adal.T } defer func() { // resp and Body should not be nil - io.Copy(ioutil.Discard, resp.Body) + _, _ = io.Copy(ioutil.Discard, resp.Body) resp.Body.Close() }() diff --git a/common/writeThoughFile.go b/common/writeThoughFile.go index 82cc574448..161d965cc3 100644 --- a/common/writeThoughFile.go +++ b/common/writeThoughFile.go @@ -59,7 +59,7 @@ func CreateDirectoryIfNotExist(directory string, tracker FolderCreationTracker) // stat errors can be present in write-only scenarios, when the directory isn't present, etc. // as a result, we care more about the mkdir error than the stat error, because that's the tell. // first make sure the parent directory exists but we ignore any error that comes back - CreateParentDirectoryIfNotExist(directory, tracker) + _ = CreateParentDirectoryIfNotExist(directory, tracker) // then create the directory mkDirErr := os.Mkdir(directory, os.ModePerm) diff --git a/jobsAdmin/JobsAdmin.go b/jobsAdmin/JobsAdmin.go index 97e3419aaa..5354104b6a 100755 --- a/jobsAdmin/JobsAdmin.go +++ b/jobsAdmin/JobsAdmin.go @@ -366,7 +366,7 @@ func (ja *jobsAdmin) ResurrectJob(jobId common.JobID, sourceSAS string, destinat // are include in the result files := func(prefix, ext string) []os.FileInfo { var files []os.FileInfo - filepath.Walk(ja.planDir, func(path string, fileInfo os.FileInfo, _ error) error { + _ = filepath.Walk(ja.planDir, func(path string, fileInfo os.FileInfo, _ error) error { if !fileInfo.IsDir() && fileInfo.Size() != 0 && strings.HasPrefix(fileInfo.Name(), prefix) && strings.HasSuffix(fileInfo.Name(), ext) { files = append(files, fileInfo) } @@ -403,7 +403,7 @@ func (ja *jobsAdmin) ResurrectJobParts() { // Get all the Job part plan files in the plan directory files := func(ext string) []os.FileInfo { var files []os.FileInfo - filepath.Walk(ja.planDir, func(path string, fileInfo os.FileInfo, _ error) error { + _ = filepath.Walk(ja.planDir, func(path string, fileInfo os.FileInfo, _ error) error { if !fileInfo.IsDir() && fileInfo.Size() != 0 && strings.HasSuffix(fileInfo.Name(), ext) { files = append(files, fileInfo) } @@ -430,7 +430,7 @@ func (ja *jobsAdmin) ListJobs(givenStatus common.JobStatus) common.ListJobsRespo ret := common.ListJobsResponse{JobIDDetails: []common.JobIDDetails{}} files := func(ext string) []os.FileInfo { var files []os.FileInfo - filepath.Walk(ja.planDir, func(path string, fileInfo os.FileInfo, _ error) error { + _ = filepath.Walk(ja.planDir, func(path string, fileInfo os.FileInfo, _ error) error { if !fileInfo.IsDir() && strings.HasSuffix(fileInfo.Name(), ext) { files = append(files, fileInfo) } @@ -453,7 +453,7 @@ func (ja *jobsAdmin) ListJobs(givenStatus common.JobStatus) common.ListJobsRespo if givenStatus == common.EJobStatus.All() || givenStatus == jpph.JobStatus() { ret.JobIDDetails = append(ret.JobIDDetails, common.JobIDDetails{JobId: jobID, CommandString: jpph.CommandString(), - StartTime: jpph.StartTime, JobStatus: jpph.JobStatus()}) + StartTime: jpph.StartTime, JobStatus: jpph.JobStatus()}) } mmf.Unmap() @@ -582,7 +582,7 @@ func (ja *jobsAdmin) TryGetPerformanceAdvice(bytesInJob uint64, filesInJob uint3 a := ste.NewPerformanceAdvisor(p, ja.commandLineMbpsCap, int64(megabitsPerSec), finalReason, finalConcurrency, dir, averageBytesPerFile, isToAzureFiles) return a.GetAdvice() } - + //Structs for messageHandler /* PerfAdjustment message. */ @@ -594,7 +594,7 @@ func (ja *jobsAdmin) messageHandler(inputChan <-chan *common.LCMMsg) { toBitsPerSec := func(megaBitsPerSec int64) int64 { return megaBitsPerSec * 1000 * 1000 / 8 } - + const minIntervalBetweenPerfAdjustment = time.Minute lastPerfAdjustTime := time.Now().Add(-2 * minIntervalBetweenPerfAdjustment) var err error @@ -602,30 +602,30 @@ func (ja *jobsAdmin) messageHandler(inputChan <-chan *common.LCMMsg) { for { msg := <-inputChan var msgType common.LCMMsgType - msgType.Parse(msg.Req.MsgType) // MsgType is already verified by LCM + _ = msgType.Parse(msg.Req.MsgType) // MsgType is already verified by LCM switch msgType { case common.ELCMMsgType.PerformanceAdjustment(): var resp common.PerfAdjustmentResp var perfAdjustmentReq common.PerfAdjustmentReq if time.Since(lastPerfAdjustTime) < minIntervalBetweenPerfAdjustment { - err = fmt.Errorf("Performance Adjustment already in progress. Please try after " + - lastPerfAdjustTime.Add(minIntervalBetweenPerfAdjustment).Format(time.RFC3339)) + err = fmt.Errorf("Performance Adjustment already in progress. Please try after " + + lastPerfAdjustTime.Add(minIntervalBetweenPerfAdjustment).Format(time.RFC3339)) } - + if e := json.Unmarshal([]byte(msg.Req.Value), &perfAdjustmentReq); e != nil { err = fmt.Errorf("parsing %s failed with %s", msg.Req.Value, e.Error()) } if perfAdjustmentReq.Throughput < 0 { err = fmt.Errorf("invalid value %d for cap-mbps. cap-mpbs should be greater than 0", - perfAdjustmentReq.Throughput) + perfAdjustmentReq.Throughput) } if err == nil { lastPerfAdjustTime = time.Now() ja.UpdateTargetBandwidth(toBitsPerSec(perfAdjustmentReq.Throughput)) - + resp.Status = true resp.AdjustedThroughPut = perfAdjustmentReq.Throughput resp.NextAdjustmentAfter = lastPerfAdjustTime.Add(minIntervalBetweenPerfAdjustment) @@ -637,11 +637,11 @@ func (ja *jobsAdmin) messageHandler(inputChan <-chan *common.LCMMsg) { resp.Err = err.Error() } - msg.SetResponse(&common.LCMMsgResp { + msg.SetResponse(&common.LCMMsgResp{ TimeStamp: time.Now(), - MsgType: msg.Req.MsgType, - Value: resp, - Err: err, + MsgType: msg.Req.MsgType, + Value: resp, + Err: err, }) msg.Reply() @@ -660,7 +660,7 @@ type jobIDToJobMgr struct { nocopy common.NoCopy lock sync.RWMutex m map[common.JobID]ste.IJobMgr -} +} func newJobIDToJobMgr() jobIDToJobMgr { return jobIDToJobMgr{m: make(map[common.JobID]ste.IJobMgr)} diff --git a/jobsAdmin/init.go b/jobsAdmin/init.go index 0c40bcaefe..1f43680cae 100755 --- a/jobsAdmin/init.go +++ b/jobsAdmin/init.go @@ -30,7 +30,7 @@ import ( "time" "github.com/Azure/azure-pipeline-go/pipeline" - + "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-azcopy/v10/ste" ) @@ -86,7 +86,7 @@ func MainSTE(concurrency ste.ConcurrencySettings, targetRateInMegaBitsPerSec flo if err != nil { JobsAdmin.Panic(fmt.Errorf("error deserializing HTTP request")) } - json.Unmarshal(body, v) + _ = json.Unmarshal(body, v) } serialize := func(v interface{}, response http.ResponseWriter) { payload, err := json.Marshal(response) @@ -95,7 +95,7 @@ func MainSTE(concurrency ste.ConcurrencySettings, targetRateInMegaBitsPerSec flo } // sending successful response back to front end response.WriteHeader(http.StatusAccepted) - response.Write(payload) + _, _ = response.Write(payload) } http.HandleFunc(common.ERpcCmd.CopyJobPartOrder().Pattern(), func(writer http.ResponseWriter, request *http.Request) { @@ -217,60 +217,60 @@ func CancelPauseJobOrder(jobID common.JobID, desiredJobStatus common.JobStatus) } /* - // Search for the Part 0 of the Job, since the Part 0 status concludes the actual status of the Job - jpm, found := jm.JobPartMgr(0) - if !found { - return common.CancelPauseResumeResponse{ - CancelledPauseResumed: false, - ErrorMsg: fmt.Sprintf("job with JobId %s has a missing 0th part", jobID.String()), - } - } - - jpp0 := jpm.Plan() - var jr common.CancelPauseResumeResponse - switch jpp0.JobStatus() { // Current status - case common.EJobStatus.Completed(): // You can't change state of a completed job - jr = common.CancelPauseResumeResponse{ - CancelledPauseResumed: false, - ErrorMsg: fmt.Sprintf("Can't %s JobID=%v because it has already completed", verb, jobID), - } - case common.EJobStatus.Cancelled(): - // If the status of Job is cancelled, it means that it has already been cancelled - // No need to cancel further - jr = common.CancelPauseResumeResponse{ - CancelledPauseResumed: false, - ErrorMsg: fmt.Sprintf("cannot cancel the job %s since it is already cancelled", jobID), - } - case common.EJobStatus.Cancelling(): - // If the status of Job is cancelling, it means that it has already been requested for cancellation - // No need to cancel further - jr = common.CancelPauseResumeResponse{ - CancelledPauseResumed: true, - ErrorMsg: fmt.Sprintf("cannot cancel the job %s since it has already been requested for cancellation", jobID), + // Search for the Part 0 of the Job, since the Part 0 status concludes the actual status of the Job + jpm, found := jm.JobPartMgr(0) + if !found { + return common.CancelPauseResumeResponse{ + CancelledPauseResumed: false, + ErrorMsg: fmt.Sprintf("job with JobId %s has a missing 0th part", jobID.String()), + } } - case common.EJobStatus.InProgress(): - // If the Job status is in Progress and Job is not completely ordered - // Job cannot be resumed later, hence graceful cancellation is not required - // hence sending the response immediately. Response CancelPauseResumeResponse - // returned has CancelledPauseResumed set to false, because that will let - // Job immediately stop. - fallthrough - case common.EJobStatus.Paused(): // Logically, It's OK to pause an already-paused job - jpp0.SetJobStatus(desiredJobStatus) - msg := fmt.Sprintf("JobID=%v %s", jobID, - common.IffString(desiredJobStatus == common.EJobStatus.Paused(), "paused", "canceled")) - if jm.ShouldLog(pipeline.LogInfo) { - jm.Log(pipeline.LogInfo, msg) - } - jm.Cancel() // Stop all inflight-chunks/transfer for this job (this includes all parts) - jr = common.CancelPauseResumeResponse{ - CancelledPauseResumed: true, - ErrorMsg: msg, + jpp0 := jpm.Plan() + var jr common.CancelPauseResumeResponse + switch jpp0.JobStatus() { // Current status + case common.EJobStatus.Completed(): // You can't change state of a completed job + jr = common.CancelPauseResumeResponse{ + CancelledPauseResumed: false, + ErrorMsg: fmt.Sprintf("Can't %s JobID=%v because it has already completed", verb, jobID), + } + case common.EJobStatus.Cancelled(): + // If the status of Job is cancelled, it means that it has already been cancelled + // No need to cancel further + jr = common.CancelPauseResumeResponse{ + CancelledPauseResumed: false, + ErrorMsg: fmt.Sprintf("cannot cancel the job %s since it is already cancelled", jobID), + } + case common.EJobStatus.Cancelling(): + // If the status of Job is cancelling, it means that it has already been requested for cancellation + // No need to cancel further + jr = common.CancelPauseResumeResponse{ + CancelledPauseResumed: true, + ErrorMsg: fmt.Sprintf("cannot cancel the job %s since it has already been requested for cancellation", jobID), + } + case common.EJobStatus.InProgress(): + // If the Job status is in Progress and Job is not completely ordered + // Job cannot be resumed later, hence graceful cancellation is not required + // hence sending the response immediately. Response CancelPauseResumeResponse + // returned has CancelledPauseResumed set to false, because that will let + // Job immediately stop. + fallthrough + case common.EJobStatus.Paused(): // Logically, It's OK to pause an already-paused job + jpp0.SetJobStatus(desiredJobStatus) + msg := fmt.Sprintf("JobID=%v %s", jobID, + common.IffString(desiredJobStatus == common.EJobStatus.Paused(), "paused", "canceled")) + + if jm.ShouldLog(pipeline.LogInfo) { + jm.Log(pipeline.LogInfo, msg) + } + jm.Cancel() // Stop all inflight-chunks/transfer for this job (this includes all parts) + jr = common.CancelPauseResumeResponse{ + CancelledPauseResumed: true, + ErrorMsg: msg, + } } + return jr } - return jr -} */ func ResumeJobOrder(req common.ResumeJobRequest) common.CancelPauseResumeResponse { // Strip '?' if present as first character of the source sas / destination sas diff --git a/ste/xferRetrypolicy.go b/ste/xferRetrypolicy.go index ec88658753..f36f5f1fe5 100644 --- a/ste/xferRetrypolicy.go +++ b/ste/xferRetrypolicy.go @@ -283,7 +283,7 @@ func NewBFSXferRetryPolicyFactory(o XferRetryOptions) pipeline.Factory { } if response.Response() != nil { // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection - io.Copy(ioutil.Discard, response.Response().Body) + _, _ = io.Copy(ioutil.Discard, response.Response().Body) response.Response().Body.Close() } // If retrying, cancel the current per-try timeout context From 8aaee4a7f6535294fb738b39be76b1a0c6877d46 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 4 Jan 2023 10:32:38 -0800 Subject: [PATCH 03/50] Fixed some gosimple errors --- azbfs/zc_credential_shared_key.go | 4 ++-- azbfs/zc_policy_retry.go | 2 +- cmd/copy.go | 6 +++--- cmd/jobsClean.go | 2 +- cmd/jobsResume.go | 2 +- cmd/pathUtils.go | 2 +- cmd/removeEnumerator.go | 2 +- cmd/zc_enumerator.go | 2 +- cmd/zc_traverser_blobfs.go | 4 ++-- cmd/zc_traverser_list.go | 3 +-- common/CountPerSecond.go | 2 +- common/cpuMonitor.go | 4 ++-- common/credCache_linux.go | 2 +- common/decompressingWriter.go | 2 -- common/emptyChunkReader.go | 2 +- common/lifecyleMgr.go | 2 -- ste/s2sCopier-URLToBlob.go | 3 +-- ste/sender-pageBlob.go | 4 ++-- ste/xfer-anyToRemote-file.go | 4 ++-- ste/xferRetrypolicy.go | 4 ++-- testSuite/cmd/clean.go | 2 +- testSuite/cmd/create.go | 4 ++-- testSuite/cmd/testblobFS.go | 2 +- 23 files changed, 30 insertions(+), 36 deletions(-) diff --git a/azbfs/zc_credential_shared_key.go b/azbfs/zc_credential_shared_key.go index dead933ef7..27c0f4ede6 100644 --- a/azbfs/zc_credential_shared_key.go +++ b/azbfs/zc_credential_shared_key.go @@ -143,7 +143,7 @@ func buildCanonicalizedHeader(headers http.Header) string { ch.WriteRune(':') ch.WriteString(strings.Join(cm[key], ",")) } - return string(ch.Bytes()) + return ch.String() } func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) string { @@ -190,5 +190,5 @@ func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) string { cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) } } - return string(cr.Bytes()) + return cr.String() } diff --git a/azbfs/zc_policy_retry.go b/azbfs/zc_policy_retry.go index b37dbcebb1..00d1ccd58e 100644 --- a/azbfs/zc_policy_retry.go +++ b/azbfs/zc_policy_retry.go @@ -217,7 +217,7 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { // Set the server-side timeout query parameter "timeout=[seconds]" timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two - t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline + t := int32(time.Until(deadline).Seconds()) // Duration from now until user's ctx reaches its deadline logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t) if t < timeout { timeout = t diff --git a/cmd/copy.go b/cmd/copy.go index 1ed52e79fa..7982126a50 100644 --- a/cmd/copy.go +++ b/cmd/copy.go @@ -1072,7 +1072,7 @@ func validateMetadataString(metadata string) error { if err != nil { return err } - for k, _ := range metadataMap { + for k := range metadataMap { if strings.ContainsAny(k, " !#$%^&*,<>{}|\\:.()+'\"?/") { return fmt.Errorf("invalid metadata key value '%s': can't have spaces or special characters", k) } @@ -1631,7 +1631,7 @@ func (cca *CookedCopyCmdArgs) ReportProgressOrExit(lcm common.LifecycleMgr) (tot totalKnownCount = summary.TotalTransfers // if json is not desired, and job is done, then we generate a special end message to conclude the job - duration := time.Now().Sub(cca.jobStartTime) // report the total run time of the job + duration := time.Since(cca.jobStartTime) // report the total run time of the job var computeThroughput = func() float64 { // compute the average throughput for the last time interval @@ -1864,7 +1864,7 @@ func init() { if userFromTo == common.EFromTo.PipeBlob() { // Case 1: PipeBlob. Check for the std input pipe stdinPipeIn, err := isStdinPipeIn() - if stdinPipeIn == false || err != nil { + if !stdinPipeIn || err != nil { return fmt.Errorf("fatal: failed to read from Stdin due to error: %s", err) } raw.src = pipeLocation diff --git a/cmd/jobsClean.go b/cmd/jobsClean.go index a9ffe2027f..21ae47f556 100644 --- a/cmd/jobsClean.go +++ b/cmd/jobsClean.go @@ -61,7 +61,7 @@ func init() { if err == nil { if withStatus == common.EJobStatus.All() { glcm.Exit(func(format common.OutputFormat) string { - return fmt.Sprintf("Successfully removed all jobs.") + return "Successfully removed all jobs." }, common.EExitCode.Success()) } else { glcm.Exit(func(format common.OutputFormat) string { diff --git a/cmd/jobsResume.go b/cmd/jobsResume.go index 97edac966a..9b0ad87261 100644 --- a/cmd/jobsResume.go +++ b/cmd/jobsResume.go @@ -93,7 +93,7 @@ func (cca *resumeJobController) ReportProgressOrExit(lcm common.LifecycleMgr) (t totalKnownCount = summary.TotalTransfers // if json is not desired, and job is done, then we generate a special end message to conclude the job - duration := time.Now().Sub(cca.jobStartTime) // report the total run time of the job + duration := time.Since(cca.jobStartTime) // report the total run time of the job var computeThroughput = func() float64 { // compute the average throughput for the last time interval diff --git a/cmd/pathUtils.go b/cmd/pathUtils.go index d12aa87ee8..76df2649de 100644 --- a/cmd/pathUtils.go +++ b/cmd/pathUtils.go @@ -315,7 +315,7 @@ func splitQueryFromSaslessResource(resource string, loc common.Location) (mainUr // All of the below functions only really do one thing at the moment. // They've been separated from copyEnumeratorInit.go in order to make the code more maintainable, should we want more destinations in the future. func getPathBeforeFirstWildcard(path string) string { - if strings.Index(path, "*") == -1 { + if !strings.Contains(path, "*") { return path } diff --git a/cmd/removeEnumerator.go b/cmd/removeEnumerator.go index c598a30cf0..de96365af5 100755 --- a/cmd/removeEnumerator.go +++ b/cmd/removeEnumerator.go @@ -317,7 +317,7 @@ func removeSingleBfsResource(ctx context.Context, urlParts azbfs.BfsURLParts, p for _, v := range listResp.Paths { entityType := "directory" - if v.IsDirectory == nil || *v.IsDirectory == false { + if v.IsDirectory == nil || !*v.IsDirectory { entityType = "file" } diff --git a/cmd/zc_enumerator.go b/cmd/zc_enumerator.go index c62bcb7357..cc50fe73c2 100755 --- a/cmd/zc_enumerator.go +++ b/cmd/zc_enumerator.go @@ -385,7 +385,7 @@ func InitResourceTraverser(resource common.ResourceString, location common.Locat _, err := common.OSStat(resource.ValueLocal()) // If wildcard is present and this isn't an existing file/folder, glob and feed the globbed list into a list enum. - if strings.Index(resource.ValueLocal(), "*") != -1 && err != nil { + if strings.Contains(resource.ValueLocal(), "*") && err != nil { basePath := getPathBeforeFirstWildcard(resource.ValueLocal()) matches, err := filepath.Glob(resource.ValueLocal()) diff --git a/cmd/zc_traverser_blobfs.go b/cmd/zc_traverser_blobfs.go index 35e0d646da..3a9785705f 100644 --- a/cmd/zc_traverser_blobfs.go +++ b/cmd/zc_traverser_blobfs.go @@ -170,7 +170,7 @@ func (t *blobFSTraverser) Traverse(preprocessor objectMorpher, processor objectP for _, v := range dlr.Paths { var entityType common.EntityType lmt := v.LastModifiedTime() - if v.IsDirectory == nil || *v.IsDirectory == false { + if v.IsDirectory == nil || !*v.IsDirectory { entityType = common.EEntityType.File() contentProps = md5OnlyAdapter{md5: t.getContentMd5(t.ctx, dirUrl, v)} size = *v.ContentLength @@ -219,7 +219,7 @@ func (t *blobFSTraverser) Traverse(preprocessor objectMorpher, processor objectP var fileListBuilder strings.Builder for _, v := range dlr.Paths { - if v.IsDirectory == nil || *v.IsDirectory == false { + if v.IsDirectory == nil || !*v.IsDirectory { // it's a file fmt.Fprintf(&fileListBuilder, " %s,", *v.Name) } else { diff --git a/cmd/zc_traverser_list.go b/cmd/zc_traverser_list.go index 7916392488..91853c579c 100755 --- a/cmd/zc_traverser_list.go +++ b/cmd/zc_traverser_list.go @@ -92,9 +92,8 @@ func newListTraverser(parent common.ResourceString, parentType common.Location, ctx *context.Context, recursive, followSymlinks, getProperties bool, listChan chan string, includeDirectoryStubs bool, incrementEnumerationCounter enumerationCounterFunc, s2sPreserveBlobTags bool, logLevel pipeline.LogLevel, cpkOptions common.CpkOptions) ResourceTraverser { - var traverserGenerator childTraverserGenerator - traverserGenerator = func(relativeChildPath string) (ResourceTraverser, error) { + traverserGenerator := func(relativeChildPath string) (ResourceTraverser, error) { source := parent.Clone() if parentType != common.ELocation.Local() { // assume child path is not URL-encoded yet, this is consistent with the behavior of previous implementation diff --git a/common/CountPerSecond.go b/common/CountPerSecond.go index 43d391ab00..c2d9af608d 100644 --- a/common/CountPerSecond.go +++ b/common/CountPerSecond.go @@ -33,7 +33,7 @@ func (cps *countPerSecond) Add(delta uint64) uint64 { func (cps *countPerSecond) LatestRate() float64 { cps.nocopy.Check() - dur := time.Now().Sub(time.Unix(cps.start, 0)) + dur := time.Since(time.Unix(cps.start, 0)) if dur <= 0 { dur = 1 } diff --git a/common/cpuMonitor.go b/common/cpuMonitor.go index fb53a14c6f..0012589411 100644 --- a/common/cpuMonitor.go +++ b/common/cpuMonitor.go @@ -57,7 +57,7 @@ func NewCalibratedCpuUsageMonitor() CPUMonitor { // start it running and wait until it has self-calibrated calibration := make(chan struct{}) go c.computationWorker(calibration) - _ = <-calibration + <-calibration return c } @@ -92,7 +92,7 @@ func (c *cpuUsageMonitor) computationWorker(calibrationDone chan struct{}) { // run a separate loop to do the probes/measurements go c.monitoringWorker(waitTime, durations) - _ = <-durations // discard first value, it doesn't seem very reliable + <-durations // discard first value, it doesn't seem very reliable // get the next 3 and average them, as our baseline. We chose 3 somewhat arbitrarily x := <-durations diff --git a/common/credCache_linux.go b/common/credCache_linux.go index c04cfbef22..fc158bc6b1 100644 --- a/common/credCache_linux.go +++ b/common/credCache_linux.go @@ -49,7 +49,7 @@ func NewCredCache(options CredCacheOptions) *CredCache { } runtime.SetFinalizer(c, func(CredCache *CredCache) { - if CredCache.isPermSet == false && CredCache.key != nil { + if !CredCache.isPermSet && CredCache.key != nil { // Indicates Permission is by default ProcessAll, which is not safe and try to recycle the key. // Note: there is no method to grant permission during adding key, // this mechanism is added to ensure key exists only if its permission is set properly. diff --git a/common/decompressingWriter.go b/common/decompressingWriter.go index 420ea554a0..ea75bc6c82 100644 --- a/common/decompressingWriter.go +++ b/common/decompressingWriter.go @@ -91,8 +91,6 @@ func (d decompressingWriter) worker(tp CompressionType, preader *io.PipeReader, b := decompressingWriterBufferPool.RentSlice(decompressingWriterCopyBufferSize) _, err = io.CopyBuffer(destination, dec, b) // returns err==nil if hits EOF, as per docs decompressingWriterBufferPool.ReturnSlice(b) - - return } // Write, conceptually, takes a slice of compressed data, decompresses it, and writes it into the final destination. diff --git a/common/emptyChunkReader.go b/common/emptyChunkReader.go index c8610f5131..127c17295f 100644 --- a/common/emptyChunkReader.go +++ b/common/emptyChunkReader.go @@ -62,5 +62,5 @@ func (cr *emptyChunkReader) Length() int64 { } func (cr *emptyChunkReader) WriteBufferTo(h hash.Hash) { - return // no content to write + // no content to write } diff --git a/common/lifecyleMgr.go b/common/lifecyleMgr.go index 02d57fedd9..cb570e566e 100644 --- a/common/lifecyleMgr.go +++ b/common/lifecyleMgr.go @@ -428,9 +428,7 @@ func (lcm *lifecycleMgr) processNoneOutput(msgToOutput outputMessage) { lcm.closeFunc() os.Exit(int(msgToOutput.exitCode)) } - // ignore all other outputs - return } func (lcm *lifecycleMgr) processJSONOutput(msgToOutput outputMessage) { diff --git a/ste/s2sCopier-URLToBlob.go b/ste/s2sCopier-URLToBlob.go index 750717ce80..a88cf56c85 100644 --- a/ste/s2sCopier-URLToBlob.go +++ b/ste/s2sCopier-URLToBlob.go @@ -21,7 +21,6 @@ package ste import ( - "errors" "fmt" "net/url" @@ -56,7 +55,7 @@ func newURLToBlobCopier(jptm IJobPartTransferMgr, destination string, p pipeline // I don't think it would ever reach here if the source URL failed to parse, but this is a sanity check. if err != nil { - return nil, errors.New(fmt.Sprintf("Failed to parse URL %s in scheduler. Check sanity.", jptm.Info().Source)) + return nil, fmt.Errorf("Failed to parse URL %s in scheduler. Check sanity.", jptm.Info().Source) } fileName := srcURL.Path diff --git a/ste/sender-pageBlob.go b/ste/sender-pageBlob.go index 8b52e95bf7..4adcaf3d73 100644 --- a/ste/sender-pageBlob.go +++ b/ste/sender-pageBlob.go @@ -206,8 +206,8 @@ func (s *pageBlobSenderBase) Prologue(ps common.PrologueState) (destinationModif return } if s.srcSize != p.ContentLength() { - sizeErr := errors.New(fmt.Sprintf("source file is not same size as the destination page blob. Source size is %d bytes but destination size is %d bytes. Re-create the destination with exactly the right size. E.g. see parameter UploadSizeInBytes in PowerShell's New-AzDiskConfig. Ensure the source is a fixed-size VHD", - s.srcSize, p.ContentLength())) + sizeErr := fmt.Errorf("source file is not same size as the destination page blob. Source size is %d bytes but destination size is %d bytes. Re-create the destination with exactly the right size. E.g. see parameter UploadSizeInBytes in PowerShell's New-AzDiskConfig. Ensure the source is a fixed-size VHD", + s.srcSize, p.ContentLength()) s.jptm.FailActiveSend("Checking size of managed disk blob", sizeErr) return } diff --git a/ste/xfer-anyToRemote-file.go b/ste/xfer-anyToRemote-file.go index 81952a6a6f..3d56b5e6c5 100644 --- a/ste/xfer-anyToRemote-file.go +++ b/ste/xfer-anyToRemote-file.go @@ -81,7 +81,7 @@ func prepareDestAccountInfo(bURL azblob.BlobURL, jptm IJobPartTransferMgr, ctx c } } -//// TODO: Infer availability based upon blob size as well, for premium page blobs. +// // TODO: Infer availability based upon blob size as well, for premium page blobs. func BlobTierAllowed(destTier azblob.AccessTierType) bool { // If we failed to get the account info, just return true. // This is because we can't infer whether it's possible or not, and the setTier operation could possibly succeed (or fail) @@ -536,7 +536,7 @@ func epilogueWithCleanupSendToRemote(jptm IJobPartTransferMgr, s sender, sip ISo shouldCheckLength = false checkLengthFailureOnReadOnlyDst.Do(func() { var glcm = common.GetLifecycleMgr() - msg := fmt.Sprintf("Could not read destination length. If the destination is write-only, use --check-length=false on the command line.") + msg := "Could not read destination length. If the destination is write-only, use --check-length=false on the command line." glcm.Info(msg) if jptm.ShouldLog(pipeline.LogError) { jptm.Log(pipeline.LogError, msg) diff --git a/ste/xferRetrypolicy.go b/ste/xferRetrypolicy.go index f36f5f1fe5..b7b30c71e2 100644 --- a/ste/xferRetrypolicy.go +++ b/ste/xferRetrypolicy.go @@ -192,7 +192,7 @@ func NewBFSXferRetryPolicyFactory(o XferRetryOptions) pipeline.Factory { // Set the server-side timeout query parameter "timeout=[seconds]" timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two - t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline + t := int32(time.Until(deadline).Seconds()) // Duration from now until user's ctx reaches its deadline logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t) if t < timeout { timeout = t @@ -363,7 +363,7 @@ func NewBlobXferRetryPolicyFactory(o XferRetryOptions) pipeline.Factory { // Set the server-side timeout query parameter "timeout=[seconds]" timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two - t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline + t := int32(time.Until(deadline).Seconds()) // Duration from now until user's ctx reaches its deadline logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t) if t < timeout { timeout = t diff --git a/testSuite/cmd/clean.go b/testSuite/cmd/clean.go index 8dfbf57f39..ea769a5885 100644 --- a/testSuite/cmd/clean.go +++ b/testSuite/cmd/clean.go @@ -363,7 +363,7 @@ func cleanBfsFile(fileURLStr string) { fileURL := azbfs.NewFileURL(*u, createBlobFSPipeline(*u)) _, err = fileURL.Delete(ctx) if err != nil { - fmt.Println(fmt.Sprintf("error deleting the blob FS file, %v", err)) + fmt.Printf("error deleting the blob FS file, %v\n", err) os.Exit(1) } } diff --git a/testSuite/cmd/create.go b/testSuite/cmd/create.go index b0cb395928..8df2537425 100644 --- a/testSuite/cmd/create.go +++ b/testSuite/cmd/create.go @@ -277,7 +277,7 @@ func createBlob(blobURL string, blobSize uint32, metadata azblob.Metadata, blobH azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) if err != nil { - fmt.Println(fmt.Sprintf("error uploading the blob %v", err)) + fmt.Printf("error uploading the blob %v\n", err) os.Exit(1) } if putBlobResp.Response() != nil { @@ -355,7 +355,7 @@ func createFile(fileURLStr string, fileSize uint32, metadata azfile.Metadata, fi Metadata: metadata, }) if err != nil { - fmt.Println(fmt.Sprintf("error uploading the file %v", err)) + fmt.Printf("error uploading the file %v\n", err) os.Exit(1) } } diff --git a/testSuite/cmd/testblobFS.go b/testSuite/cmd/testblobFS.go index 4973617e86..ef1959b40b 100644 --- a/testSuite/cmd/testblobFS.go +++ b/testSuite/cmd/testblobFS.go @@ -216,7 +216,7 @@ func (tbfsc TestBlobFSCommand) verifyRemoteDir() { } // numberOfFilesinSubject keeps the count of number of files of at the destination numberOfFilesinSubject := int(0) - for continuationMarker != "" || firstListing == true { + for continuationMarker != "" || firstListing { firstListing = false continuationMarker = dResp.XMsContinuation() files := dResp.Files() From 3ddf75f3bbe690b5d7e5f8f2b7c6444d1d5b48b6 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 4 Jan 2023 10:35:01 -0800 Subject: [PATCH 04/50] Add Golang CI Lint Action --- .github/workflows/golangci-lint.yml | 39 +++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 .github/workflows/golangci-lint.yml diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 0000000000..6f30407f01 --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,39 @@ +name: golangci-lint +on: + push: + tags: + - v* + branches: + - dev + - main + pull_request: + - dev + - main +permissions: + contents: read + # Optional: allow read access to pull request. Use with `only-new-issues` option. + # pull-requests: read +jobs: + golangci: + strategy: + matrix: + go: [1.17] + os: [macos-latest, windows-latest] + name: lint + runs-on: ${{ matrix.os }} + steps: + - uses: actions/setup-go@v3 + with: + go-version: ${{ matrix.go }} + - uses: actions/checkout@v3 + - name: golangci-lint + uses: golangci/golangci-lint-action@v3 + with: + # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. + version: v1.29 + + # Optional: golangci-lint command line arguments. + # args: --issues-exit-code=0 + + # Optional: show only new issues if it's a pull request. The default value is `false`. + # only-new-issues: true \ No newline at end of file From 34838abfc68a71489e79d01be10dfc92958a73f1 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 4 Jan 2023 10:38:47 -0800 Subject: [PATCH 05/50] Fix CI yml file --- .github/workflows/golangci-lint.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 6f30407f01..05aa008891 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -1,14 +1,13 @@ name: golangci-lint on: push: - tags: - - v* branches: - dev - main pull_request: - - dev - - main + branches: + - dev + - main permissions: contents: read # Optional: allow read access to pull request. Use with `only-new-issues` option. From 9ff06107d61610ddb2b1c5811fc50c209738ea7c Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 4 Jan 2023 11:17:16 -0800 Subject: [PATCH 06/50] Fixed ineffassign errors --- cmd/copyEnumeratorInit.go | 4 ++-- cmd/credentialUtil.go | 5 ++--- cmd/jobsShow.go | 5 ++--- cmd/rpc.go | 4 +++- cmd/setPropertiesEnumerator.go | 2 +- cmd/zc_traverser_blobfs_account.go | 4 ++-- common/singleChunkReader.go | 2 +- ste/mgr-JobPartMgr.go | 8 +++++--- 8 files changed, 18 insertions(+), 16 deletions(-) diff --git a/cmd/copyEnumeratorInit.go b/cmd/copyEnumeratorInit.go index d54b0e823a..88cbe3c90e 100755 --- a/cmd/copyEnumeratorInit.go +++ b/cmd/copyEnumeratorInit.go @@ -31,7 +31,7 @@ type BucketToContainerNameResolver interface { func (cca *CookedCopyCmdArgs) initEnumerator(jobPartOrder common.CopyJobPartOrderRequest, ctx context.Context) (*CopyEnumerator, error) { var traverser ResourceTraverser - srcCredInfo := common.CredentialInfo{} + var srcCredInfo common.CredentialInfo var isPublic bool var err error @@ -452,7 +452,7 @@ func (cca *CookedCopyCmdArgs) createDstContainer(containerName string, dstWithSA } existingContainers[containerName] = true - dstCredInfo := common.CredentialInfo{} + var dstCredInfo common.CredentialInfo // 3minutes is enough time to list properties of a container, and create new if it does not exist. ctx, _ := context.WithTimeout(parentCtx, time.Minute*3) diff --git a/cmd/credentialUtil.go b/cmd/credentialUtil.go index 3ad2dcf61e..3d98da5ff0 100644 --- a/cmd/credentialUtil.go +++ b/cmd/credentialUtil.go @@ -90,7 +90,7 @@ func GetOAuthTokenManagerInstance() (*common.UserOAuthTokenManager, error) { glcm.Error("Invalid Auto-login type specified.") return } - + if tenantID := glcm.GetEnvironmentVariable(common.EEnvironmentVariable.TenantID()); tenantID != "" { lca.tenantID = tenantID } @@ -490,10 +490,9 @@ func checkAuthSafeForTarget(ct common.CredentialType, resource, extraSuffixesAAD return fmt.Errorf("Google Application Credentials to %s is not valid", resourceType.String()) } - host := "" u, err := url.Parse(resource) if err == nil { - host = u.Host + host := u.Host _, err := common.NewGCPURLParts(*u) if err != nil { return fmt.Errorf("GCP authentication to %s is not currently supported", host) diff --git a/cmd/jobsShow.go b/cmd/jobsShow.go index 921db79429..a4d9db7a87 100644 --- a/cmd/jobsShow.go +++ b/cmd/jobsShow.go @@ -79,10 +79,9 @@ func init() { // handles the list command // dispatches the list order to the transfer engine func HandleShowCommand(listRequest common.ListRequest) error { - rpcCmd := common.ERpcCmd.None() if listRequest.OfStatus == "" { resp := common.ListJobSummaryResponse{} - rpcCmd = common.ERpcCmd.ListJobSummary() + rpcCmd := common.ERpcCmd.ListJobSummary() Rpc(rpcCmd, &listRequest.JobID, &resp) PrintJobProgressSummary(resp) } else { @@ -95,7 +94,7 @@ func HandleShowCommand(listRequest common.ListRequest) error { return fmt.Errorf("cannot parse the given Transfer Status %s", listRequest.OfStatus) } resp := common.ListJobTransfersResponse{} - rpcCmd = common.ERpcCmd.ListJobTransfers() + rpcCmd := common.ERpcCmd.ListJobTransfers() Rpc(rpcCmd, lsRequest, &resp) PrintJobTransfers(resp) } diff --git a/cmd/rpc.go b/cmd/rpc.go index 482c8b655c..104e5550e1 100644 --- a/cmd/rpc.go +++ b/cmd/rpc.go @@ -55,7 +55,6 @@ func inprocSend(rpcCmd common.RpcCmd, requestData interface{}, responseData inte case common.ERpcCmd.ListJobTransfers(): *(responseData.(*common.ListJobTransfersResponse)) = jobsAdmin.ListJobTransfers(requestData.(common.ListJobTransfersRequest)) - case common.ERpcCmd.PauseJob(): responseData = jobsAdmin.CancelPauseJobOrder(requestData.(common.JobID), common.EJobStatus.Paused()) @@ -98,6 +97,9 @@ func (httpClient *HTTPClient) send(rpcCmd common.RpcCmd, requestData interface{} return fmt.Errorf("error marshalling request payload for command type %q", rpcCmd.String()) } request, err := http.NewRequest("POST", httpClient.url, bytes.NewReader(requestJson)) + if err != nil { + return err + } // adding the commandType as a query param q := request.URL.Query() q.Add("commandType", rpcCmd.String()) diff --git a/cmd/setPropertiesEnumerator.go b/cmd/setPropertiesEnumerator.go index 6c6f730ba7..4743be3a0b 100755 --- a/cmd/setPropertiesEnumerator.go +++ b/cmd/setPropertiesEnumerator.go @@ -37,7 +37,7 @@ func setPropertiesEnumerator(cca *CookedCopyCmdArgs) (enumerator *CopyEnumerator ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - srcCredInfo := common.CredentialInfo{} + var srcCredInfo common.CredentialInfo if srcCredInfo, _, err = GetCredentialInfoForLocation(ctx, cca.FromTo.From(), cca.Source.Value, cca.Source.SAS, true, cca.CpkOptions); err != nil { return nil, err diff --git a/cmd/zc_traverser_blobfs_account.go b/cmd/zc_traverser_blobfs_account.go index 3c5f7396c0..68b040e1d9 100644 --- a/cmd/zc_traverser_blobfs_account.go +++ b/cmd/zc_traverser_blobfs_account.go @@ -101,7 +101,7 @@ func (t *BlobFSAccountTraverser) listContainers() ([]string, error) { func (t *BlobFSAccountTraverser) Traverse(preprocessor objectMorpher, processor objectProcessor, filters []ObjectFilter) error { // listContainers will return the cached filesystem list if filesystems have already been listed by this traverser. - fsList, err := t.listContainers() + fsList, _ := t.listContainers() for _, v := range fsList { fileSystemURL := t.accountURL.NewFileSystemURL(v).URL() @@ -109,7 +109,7 @@ func (t *BlobFSAccountTraverser) Traverse(preprocessor objectMorpher, processor preprocessorForThisChild := preprocessor.FollowedBy(newContainerDecorator(v)) - err = fileSystemTraverser.Traverse(preprocessorForThisChild, processor, filters) + err := fileSystemTraverser.Traverse(preprocessorForThisChild, processor, filters) if err != nil { WarnStdoutAndScanningLog(fmt.Sprintf("failed to list files in filesystem %s: %s", v, err)) diff --git a/common/singleChunkReader.go b/common/singleChunkReader.go index d362c2ea8e..eddd5e01f0 100644 --- a/common/singleChunkReader.go +++ b/common/singleChunkReader.go @@ -454,7 +454,7 @@ func (cr *singleChunkReader) GetPrologueState() PrologueState { // unuse before Seek, since Seek is public cr.unuse() // MUST re-wind, so that the bytes we read will get transferred too! - _, err = cr.Seek(0, io.SeekStart) + _, _ = cr.Seek(0, io.SeekStart) return PrologueState{LeadingBytes: leadingBytes} } diff --git a/ste/mgr-JobPartMgr.go b/ste/mgr-JobPartMgr.go index 46324fa082..b4a13a7a86 100644 --- a/ste/mgr-JobPartMgr.go +++ b/ste/mgr-JobPartMgr.go @@ -469,7 +469,7 @@ func (jpm *jobPartMgr) ScheduleTransfers(jobCtx context.Context) { if plan.FromTo.To().IsRemote() { relDst, err = url.PathUnescape(relDst) } - relDst = strings.TrimPrefix(relSrc, common.AZCOPY_PATH_SEPARATOR_STRING) + relDst = strings.TrimPrefix(relDst, common.AZCOPY_PATH_SEPARATOR_STRING) common.PanicIfErr(err) _, srcOk := DebugSkipFiles[relSrc] @@ -522,15 +522,17 @@ func (jpm *jobPartMgr) createPipelines(ctx context.Context) { if jpm.credInfo.CredentialType == common.ECredentialType.Unknown() { credInfo = jpm.jobMgr.getInMemoryTransitJobState().CredentialInfo } - userAgent := common.UserAgent + // TODO: Double check this fix + var userAgent string if fromTo.From() == common.ELocation.S3() { userAgent = common.S3ImportUserAgent } else if fromTo.From() == common.ELocation.GCP() { userAgent = common.GCPImportUserAgent } else if fromTo.From() == common.ELocation.Benchmark() || fromTo.To() == common.ELocation.Benchmark() { userAgent = common.BenchmarkUserAgent + } else { + userAgent = common.GetLifecycleMgr().AddUserAgentPrefix(common.UserAgent) } - userAgent = common.GetLifecycleMgr().AddUserAgentPrefix(common.UserAgent) credOption := common.CredentialOpOptions{ LogInfo: func(str string) { jpm.Log(pipeline.LogInfo, str) }, From 9f61cad89269ba95e976df830bd95004ea6e30a7 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 4 Jan 2023 12:22:23 -0800 Subject: [PATCH 07/50] Fixed some staticcheck errors --- azbfs/zc_policy_request_log.go | 4 ++-- cmd/credentialUtil.go | 3 --- common/oauthTokenManager.go | 9 ++++----- ste/JobPartPlanFileName.go | 1 + ste/sourceInfoProvider-GCP.go | 5 +++-- testSuite/cmd/clean.go | 2 +- 6 files changed, 11 insertions(+), 13 deletions(-) diff --git a/azbfs/zc_policy_request_log.go b/azbfs/zc_policy_request_log.go index 2aaa16ef90..5035ad2a60 100644 --- a/azbfs/zc_policy_request_log.go +++ b/azbfs/zc_policy_request_log.go @@ -76,9 +76,9 @@ func NewRequestLogPolicyFactory_Deprecated(o RequestLogOptions) pipeline.Factory sc := response.Response().StatusCode if ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) { logLevel, forceLog = pipeline.LogError, !o.SyslogDisabled // Promote to Error any 4xx (except those listed is an error) or any 5xx - } else { - // For other status codes, we leave the level as is. } + // For other status codes, we leave the level as is. + } else { // This error did not get an HTTP response from the service; upgrade the severity to Error logLevel, forceLog = pipeline.LogError, !o.SyslogDisabled } diff --git a/cmd/credentialUtil.go b/cmd/credentialUtil.go index 3d98da5ff0..f5aa00760b 100644 --- a/cmd/credentialUtil.go +++ b/cmd/credentialUtil.go @@ -613,9 +613,6 @@ func GetCredentialInfoForLocation(ctx context.Context, location common.Location, } else { credInfo.OAuthTokenInfo = *tokenInfo } - } else if credInfo.CredentialType == common.ECredentialType.S3AccessKey() || credInfo.CredentialType == common.ECredentialType.S3PublicBucket() { - // nothing to do here. The extra fields for S3 are fleshed out at the time - // we make the S3Client } return diff --git a/common/oauthTokenManager.go b/common/oauthTokenManager.go index ba9c55ea2b..69656ba421 100644 --- a/common/oauthTokenManager.go +++ b/common/oauthTokenManager.go @@ -30,7 +30,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "os" @@ -303,7 +302,7 @@ func certLoginNoUOTM(tenantID, activeDirectoryEndpoint, certPath, certPass, appl return nil, err } - certData, err := ioutil.ReadFile(certPath) + certData, err := os.ReadFile(certPath) if err != nil { return nil, err } @@ -743,7 +742,7 @@ func (credInfo *OAuthTokenInfo) queryIMDS(ctx context.Context, msiEndpoint strin req.Header.Set("Metadata", "true") // Set context. - req.WithContext(ctx) + req = req.WithContext(ctx) // In case of some other process (Http Server) listening at 127.0.0.1:40342 , we do not want to wait forever for it to serve request msiTokenHTTPClient.Timeout = 10 * time.Second // Send request @@ -880,7 +879,7 @@ func (credInfo *OAuthTokenInfo) GetNewTokenFromMSI(ctx context.Context) (*adal.T } defer func() { // resp and Body should not be nil - _, _ = io.Copy(ioutil.Discard, resp.Body) + _, _ = io.Copy(io.Discard, resp.Body) resp.Body.Close() }() @@ -890,7 +889,7 @@ func (credInfo *OAuthTokenInfo) GetNewTokenFromMSI(ctx context.Context) (*adal.T return nil, fmt.Errorf("failed to get token from msi, status code: %v", resp.StatusCode) } - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/ste/JobPartPlanFileName.go b/ste/JobPartPlanFileName.go index 13ac8cfe7c..ece41b9962 100644 --- a/ste/JobPartPlanFileName.go +++ b/ste/JobPartPlanFileName.go @@ -43,6 +43,7 @@ func (jpfn JobPartPlanFileName) Parse() (jobID common.JobID, partNumber common.P jobId, err := common.ParseJobID(jpfnSplit[0]) if err != nil { err = fmt.Errorf("failed to parse the JobId from JobPartFileName %s. Failed with error %s", string(jpfn), err.Error()) + // TODO: return here on error? } jobID = jobId n, err := fmt.Sscanf(jpfnSplit[1], "%05d.steV%d", &partNumber, &dataSchemaVersion) diff --git a/ste/sourceInfoProvider-GCP.go b/ste/sourceInfoProvider-GCP.go index 17af5c0c9e..73f6ded675 100644 --- a/ste/sourceInfoProvider-GCP.go +++ b/ste/sourceInfoProvider-GCP.go @@ -6,7 +6,8 @@ import ( "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/common" "golang.org/x/oauth2/google" - "io/ioutil" + "os" + "net/url" "time" ) @@ -52,7 +53,7 @@ func newGCPSourceInfoProvider(jptm IJobPartTransferMgr) (ISourceInfoProvider, er return nil, err } glcm := common.GetLifecycleMgr() - jsonKey, err = ioutil.ReadFile(glcm.GetEnvironmentVariable(common.EEnvironmentVariable.GoogleAppCredentials())) + jsonKey, err = os.ReadFile(glcm.GetEnvironmentVariable(common.EEnvironmentVariable.GoogleAppCredentials())) if err != nil { return nil, fmt.Errorf("Cannot read JSON key file. Please verify you have correctly set GOOGLE_APPLICATION_CREDENTIALS environment variable") } diff --git a/testSuite/cmd/clean.go b/testSuite/cmd/clean.go index ea769a5885..03ed0e769a 100644 --- a/testSuite/cmd/clean.go +++ b/testSuite/cmd/clean.go @@ -547,7 +547,7 @@ func deleteGCPBucket(client *gcpUtils.Client, bucketName string) { break } if err == nil { - err = bucket.Object(attrs.Name).Delete(nil) + err = bucket.Object(attrs.Name).Delete(context.TODO()) if err != nil { fmt.Println("Could not clear GCS Buckets.") return From 4ced119e3272e7006de23f7d15bef0a9d95b5192 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 4 Jan 2023 12:29:25 -0800 Subject: [PATCH 08/50] Fixed errcheck issues --- cmd/copy.go | 6 +++--- common/logger_unix.go | 2 +- ste/xferRetrypolicy.go | 2 +- testSuite/cmd/create.go | 2 +- testSuite/cmd/testblob.go | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cmd/copy.go b/cmd/copy.go index 7982126a50..df651bbfaa 100644 --- a/cmd/copy.go +++ b/cmd/copy.go @@ -1997,13 +1997,13 @@ func init() { cpCmd.PersistentFlags().StringVar(&raw.legacyInclude, "include", "", "Legacy include param. DO NOT USE") cpCmd.PersistentFlags().StringVar(&raw.legacyExclude, "exclude", "", "Legacy exclude param. DO NOT USE") _ = cpCmd.PersistentFlags().MarkHidden("include") - cpCmd.PersistentFlags().MarkHidden("exclude") + _ = cpCmd.PersistentFlags().MarkHidden("exclude") // Hide the flush-threshold flag since it is implemented only for CI. cpCmd.PersistentFlags().Uint32Var(&ste.ADLSFlushThreshold, "flush-threshold", 7500, "Adjust the number of blocks to flush at once on accounts that have a hierarchical namespace.") - cpCmd.PersistentFlags().MarkHidden("flush-threshold") + _ = cpCmd.PersistentFlags().MarkHidden("flush-threshold") // Deprecate the old persist-smb-permissions flag - cpCmd.PersistentFlags().MarkHidden("preserve-smb-permissions") + _ = cpCmd.PersistentFlags().MarkHidden("preserve-smb-permissions") cpCmd.PersistentFlags().BoolVar(&raw.preservePermissions, PreservePermissionsFlag, false, "False by default. Preserves ACLs between aware resources (Windows and Azure Files, or ADLS Gen 2 to ADLS Gen 2). For Hierarchical Namespace accounts, you will need a container SAS or OAuth token with Modify Ownership and Modify Permissions permissions. For downloads, you will also need the --backup flag to restore permissions where the new Owner will not be the user running AzCopy. This flag applies to both files and folders, unless a file-only filter is specified (e.g. include-pattern).") } diff --git a/common/logger_unix.go b/common/logger_unix.go index e89f10d924..3277d5adba 100644 --- a/common/logger_unix.go +++ b/common/logger_unix.go @@ -82,7 +82,7 @@ func (sl *sysLogger) CloseLog() { return } - sl.writer.Notice("Closing Log") + _ = sl.writer.Notice("Closing Log") sl.writer.Close() } diff --git a/ste/xferRetrypolicy.go b/ste/xferRetrypolicy.go index b7b30c71e2..4fc2bd3d56 100644 --- a/ste/xferRetrypolicy.go +++ b/ste/xferRetrypolicy.go @@ -455,7 +455,7 @@ func NewBlobXferRetryPolicyFactory(o XferRetryOptions) pipeline.Factory { } if response.Response() != nil { // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection - io.Copy(ioutil.Discard, response.Response().Body) + _, _ = io.Copy(ioutil.Discard, response.Response().Body) response.Response().Body.Close() } // If retrying, cancel the current per-try timeout context diff --git a/testSuite/cmd/create.go b/testSuite/cmd/create.go index 8df2537425..709165c4ff 100644 --- a/testSuite/cmd/create.go +++ b/testSuite/cmd/create.go @@ -281,7 +281,7 @@ func createBlob(blobURL string, blobSize uint32, metadata azblob.Metadata, blobH os.Exit(1) } if putBlobResp.Response() != nil { - io.Copy(ioutil.Discard, putBlobResp.Response().Body) + _, _ = io.Copy(ioutil.Discard, putBlobResp.Response().Body) putBlobResp.Response().Body.Close() } } diff --git a/testSuite/cmd/testblob.go b/testSuite/cmd/testblob.go index 7cb99829c4..577093f5ae 100644 --- a/testSuite/cmd/testblob.go +++ b/testSuite/cmd/testblob.go @@ -333,7 +333,7 @@ func verifySinglePageBlobUpload(testBlobCmd TestBlobCommand) { } // Closing the blobProperties response body. if blobProperties.Response() != nil { - io.Copy(ioutil.Discard, blobProperties.Response().Body) + _, _ = io.Copy(ioutil.Discard, blobProperties.Response().Body) blobProperties.Response().Body.Close() } } From 7b89b52579f3fa8f21bbafa0f0e163281f239923 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 4 Jan 2023 13:23:08 -0800 Subject: [PATCH 09/50] Fixed some gosimple --- cmd/sync.go | 12 +++++----- cmd/zc_enumerator.go | 2 +- common/chunkStatusLogger.go | 7 +++--- common/cpuMonitor.go | 5 +---- common/fe-ste-models.go | 22 ++++++++++--------- .../parallel/zt_FileSystemCrawlerTest_test.go | 1 - testSuite/cmd/list.go | 4 ++-- testSuite/cmd/testblob.go | 2 +- testSuite/cmd/testblobFS.go | 2 +- 9 files changed, 27 insertions(+), 30 deletions(-) diff --git a/cmd/sync.go b/cmd/sync.go index 7ea2423041..7f1aa22721 100644 --- a/cmd/sync.go +++ b/cmd/sync.go @@ -546,7 +546,7 @@ func (cca *cookedSyncCmdArgs) getJsonOfSyncJobSummary(summary common.ListJobSumm } func (cca *cookedSyncCmdArgs) ReportProgressOrExit(lcm common.LifecycleMgr) (totalKnownCount uint32) { - duration := time.Now().Sub(cca.jobStartTime) // report the total run time of the job + duration := time.Since(cca.jobStartTime) // report the total run time of the job var summary common.ListJobSummaryResponse var throughput float64 var jobDone bool @@ -754,11 +754,11 @@ func init() { // TODO: enable for copy with IfSourceNewer // smb info/permissions can be persisted in the scenario of File -> File syncCmd.PersistentFlags().BoolVar(&raw.preserveSMBPermissions, "preserve-smb-permissions", false, "False by default. Preserves SMB ACLs between aware resources (Azure Files). This flag applies to both files and folders, unless a file-only filter is specified (e.g. include-pattern).") - syncCmd.PersistentFlags().BoolVar(&raw.preserveSMBInfo, "preserve-smb-info", (runtime.GOOS=="windows"), "Preserves SMB property info (last write time, creation time, attribute bits)"+ - " between SMB-aware resources (Windows and Azure Files). On windows, this flag will be set to true by default. If the source or destination is a "+ - "volume mounted on Linux using SMB protocol, this flag will have to be explicitly set to true. Only the attribute bits supported by Azure Files "+ - "will be transferred; any others will be ignored. This flag applies to both files and folders, unless a file-only filter is specified "+ - "(e.g. include-pattern). The info transferred for folders is the same as that for files, except for Last Write Time which is never preserved for folders.") + syncCmd.PersistentFlags().BoolVar(&raw.preserveSMBInfo, "preserve-smb-info", (runtime.GOOS == "windows"), "Preserves SMB property info (last write time, creation time, attribute bits)"+ + " between SMB-aware resources (Windows and Azure Files). On windows, this flag will be set to true by default. If the source or destination is a "+ + "volume mounted on Linux using SMB protocol, this flag will have to be explicitly set to true. Only the attribute bits supported by Azure Files "+ + "will be transferred; any others will be ignored. This flag applies to both files and folders, unless a file-only filter is specified "+ + "(e.g. include-pattern). The info transferred for folders is the same as that for files, except for Last Write Time which is never preserved for folders.") syncCmd.PersistentFlags().BoolVar(&raw.preservePOSIXProperties, "preserve-posix-properties", false, "'Preserves' property info gleaned from stat or statx into object metadata.") // TODO: enable when we support local <-> File diff --git a/cmd/zc_enumerator.go b/cmd/zc_enumerator.go index cc50fe73c2..6f1099b612 100755 --- a/cmd/zc_enumerator.go +++ b/cmd/zc_enumerator.go @@ -724,7 +724,7 @@ func (e *CopyEnumerator) enumerate() (err error) { // -------------------------------------- Helper Funcs -------------------------------------- \\ func passedFilters(filters []ObjectFilter, storedObject StoredObject) bool { - if filters != nil && len(filters) > 0 { + if len(filters) > 0 { // loop through the filters, if any of them fail, then return false for _, filter := range filters { msg, supported := filter.DoesSupportThisOS() diff --git a/common/chunkStatusLogger.go b/common/chunkStatusLogger.go index 573bfebf11..b47f3f8b30 100644 --- a/common/chunkStatusLogger.go +++ b/common/chunkStatusLogger.go @@ -310,10 +310,9 @@ func (csl *chunkStatusLogger) FlushLog() { // In order to be idempotent, we don't close any channel here, we just flush it - csl.unsavedEntries <- nil // tell writer that it it must flush, then wait until it has done so - select { - case <-csl.flushDone: - } + csl.unsavedEntries <- nil // tell writer that it must flush, then wait until it has done so + + <-csl.flushDone } // CloseLogger close the chunklogger thread. diff --git a/common/cpuMonitor.go b/common/cpuMonitor.go index 0012589411..e25252069d 100644 --- a/common/cpuMonitor.go +++ b/common/cpuMonitor.go @@ -140,10 +140,7 @@ func (c *cpuUsageMonitor) monitoringWorker(waitTime time.Duration, d chan time.D for { start := time.Now() - select { - case <-time.After(waitTime): - // noop - } + <-time.After(waitTime) // noop duration := time.Since(start) // how much longer than expected did it take for us to wake up? diff --git a/common/fe-ste-models.go b/common/fe-ste-models.go index a0a32f4e08..031929acdf 100644 --- a/common/fe-ste-models.go +++ b/common/fe-ste-models.go @@ -110,7 +110,7 @@ type PartNumber uint32 type Version uint32 type Status uint32 -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// var EDeleteSnapshotsOption = DeleteSnapshotsOption(0) type DeleteSnapshotsOption uint8 @@ -145,7 +145,7 @@ func (d DeleteSnapshotsOption) ToDeleteSnapshotsOptionType() azblob.DeleteSnapsh return azblob.DeleteSnapshotsOptionType(strings.ToLower(d.String())) } -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// var EPermanentDeleteOption = PermanentDeleteOption(3) // Default to "None" type PermanentDeleteOption uint8 @@ -610,7 +610,7 @@ func (ft *FromTo) IsPropertyOnlyTransfer() bool { var BenchmarkLmt = time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC) -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Enumerates the values for blob type. type BlobType uint8 @@ -674,9 +674,11 @@ type TransferStatus int32 // Must be 32-bit for atomic operations; negative #s r func (TransferStatus) NotStarted() TransferStatus { return TransferStatus(0) } // TODO confirm whether this is actually needed -// Outdated: -// Transfer started & at least 1 chunk has successfully been transferred. -// Used to resume a transfer that started to avoid transferring all chunks thereby improving performance +// +// Outdated: +// Transfer started & at least 1 chunk has successfully been transferred. +// Used to resume a transfer that started to avoid transferring all chunks thereby improving performance +// // Update(Jul 2020): This represents the state of transfer as soon as the file is scheduled. func (TransferStatus) Started() TransferStatus { return TransferStatus(1) } @@ -972,7 +974,7 @@ func (i *InvalidMetadataHandleOption) UnmarshalJSON(b []byte) error { return i.Parse(s) } -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// const ( DefaultBlockBlobBlockSize = 8 * 1024 * 1024 MaxBlockBlobBlockSize = 4000 * 1024 * 1024 @@ -1178,7 +1180,7 @@ func ToCommonBlobTagsMap(blobTagsString string) BlobTags { const metadataRenamedKeyPrefix = "rename_" const metadataKeyForRenamedOriginalKeyPrefix = "rename_key_" -var metadataKeyInvalidCharRegex = regexp.MustCompile("\\W") +var metadataKeyInvalidCharRegex = regexp.MustCompile(`\W`) var metadataKeyRenameErrStr = "failed to rename invalid metadata key %q" // ResolveInvalidKey resolves invalid metadata key with following steps: @@ -1561,7 +1563,7 @@ func GetClientProvidedKey(options CpkOptions) azblob.ClientProvidedKeyOptions { return ToClientProvidedKeyOptions(_cpkInfo, _cpkScopeInfo) } -//////////////////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////////////////// type SetPropertiesFlags uint32 // [0000000000...32 times] var ESetPropertiesFlags = SetPropertiesFlags(0) @@ -1584,7 +1586,7 @@ func (op *SetPropertiesFlags) ShouldTransferBlobTags() bool { return (*op)&ESetPropertiesFlags.SetBlobTags() == ESetPropertiesFlags.SetBlobTags() } -//////////////////////////////////////////////////////////////////////////////// +// ////////////////////////////////////////////////////////////////////////////// type RehydratePriorityType uint8 var ERehydratePriorityType = RehydratePriorityType(0) // setting default as none diff --git a/common/parallel/zt_FileSystemCrawlerTest_test.go b/common/parallel/zt_FileSystemCrawlerTest_test.go index af184c4b38..362925811d 100644 --- a/common/parallel/zt_FileSystemCrawlerTest_test.go +++ b/common/parallel/zt_FileSystemCrawlerTest_test.go @@ -37,7 +37,6 @@ func Test(t *testing.T) { chk.TestingT(t) } type fileSystemCrawlerSuite struct{} var _ = chk.Suite(&fileSystemCrawlerSuite{}) -var ctx = context.Background() var windowsSystemDirectory = "" diff --git a/testSuite/cmd/list.go b/testSuite/cmd/list.go index 2820463a42..ae5fc6716f 100644 --- a/testSuite/cmd/list.go +++ b/testSuite/cmd/list.go @@ -107,7 +107,7 @@ func listContainer(resourceUrl string, numberOfresource int64) { listBlob, err := containerUrl.ListBlobsFlatSegment(context.TODO(), marker, azblob.ListBlobsSegmentOptions{Prefix: searchPrefix}) if err != nil { - fmt.Println(fmt.Sprintf("cannot list blobs for download. Failed with error %s", err.Error())) + fmt.Printf("cannot list blobs for download. Failed with error %s\n", err.Error()) os.Exit(1) } @@ -124,7 +124,7 @@ func listContainer(resourceUrl string, numberOfresource int64) { } if numberOfblobs != numberOfresource { - fmt.Println(fmt.Sprintf("expected number of blobs / file %d inside the resource does not match the actual %d", numberOfresource, numberOfblobs)) + fmt.Printf("expected number of blobs / file %d inside the resource does not match the actual %d\n", numberOfresource, numberOfblobs) os.Exit(1) } } diff --git a/testSuite/cmd/testblob.go b/testSuite/cmd/testblob.go index 577093f5ae..892b1144fe 100644 --- a/testSuite/cmd/testblob.go +++ b/testSuite/cmd/testblob.go @@ -189,7 +189,7 @@ func verifyBlockBlobDirUpload(testBlobCmd TestBlobCommand) { 0, *size, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) if err != nil { - fmt.Println(fmt.Sprintf("error downloading the blob %s", blobInfo.Name)) + fmt.Printf("error downloading the blob %s\n", blobInfo.Name) os.Exit(1) } diff --git a/testSuite/cmd/testblobFS.go b/testSuite/cmd/testblobFS.go index ef1959b40b..dcad0c0971 100644 --- a/testSuite/cmd/testblobFS.go +++ b/testSuite/cmd/testblobFS.go @@ -311,7 +311,7 @@ func (tbfsc TestBlobFSCommand) verifyRemoteDir() { // If the number of files inside the directories locally and remote // is not same, validation fails. if numberOFFilesInObject != numberOfFilesinSubject { - fmt.Println(fmt.Sprintf("validation failed since there is difference in the number of files in source and destination")) + fmt.Println("validation failed since there is difference in the number of files in source and destination") os.Exit(1) } fmt.Println(fmt.Sprintf("successfully validated the source %s and destination %s", tbfsc.Object, tbfsc.Subject)) From 486c0bef3f7d1c343906c547a64275bee22695d4 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 4 Jan 2023 13:40:18 -0800 Subject: [PATCH 10/50] errcheck fixes --- cmd/root.go | 8 ++++---- testSuite/cmd/testblob.go | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cmd/root.go b/cmd/root.go index f6c1543f85..ee62aba1e0 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -227,12 +227,12 @@ func init() { rootCmd.PersistentFlags().StringVar(&debugSkipFiles, "debug-skip-files", "", "Used when debugging, to tell AzCopy to cancel the job midway. List of relative paths to skip in the STE.") // reserved for partner teams - rootCmd.PersistentFlags().MarkHidden("cancel-from-stdin") + _ = rootCmd.PersistentFlags().MarkHidden("cancel-from-stdin") // debug-only - rootCmd.PersistentFlags().MarkHidden("await-continue") - rootCmd.PersistentFlags().MarkHidden("await-open") - rootCmd.PersistentFlags().MarkHidden("debug-skip-files") + _ = rootCmd.PersistentFlags().MarkHidden("await-continue") + _ = rootCmd.PersistentFlags().MarkHidden("await-open") + _ = rootCmd.PersistentFlags().MarkHidden("debug-skip-files") } // always spins up a new goroutine, because sometimes the aka.ms URL can't be reached (e.g. a constrained environment where diff --git a/testSuite/cmd/testblob.go b/testSuite/cmd/testblob.go index 892b1144fe..b15950d585 100644 --- a/testSuite/cmd/testblob.go +++ b/testSuite/cmd/testblob.go @@ -509,7 +509,7 @@ func verifySingleBlockBlob(testBlobCmd TestBlobCommand) { } // Closing the blobProperties response. if blobProperties.Response() != nil { - io.Copy(ioutil.Discard, blobProperties.Response().Body) + _, _ = io.Copy(io.Discard, blobProperties.Response().Body) blobProperties.Response().Body.Close() } // If the access tier type of blob is set to Archive, then the blob is offline and reading the blob is not allowed, @@ -525,7 +525,7 @@ func verifySingleBlockBlob(testBlobCmd TestBlobCommand) { os.Exit(1) } // reading all the blob bytes. - blobBytesDownloaded, err := ioutil.ReadAll(get.Body(azblob.RetryReaderOptions{})) + blobBytesDownloaded, err := io.ReadAll(get.Body(azblob.RetryReaderOptions{})) if get.Response().Body != nil { get.Response().Body.Close() } @@ -678,7 +678,7 @@ func verifySingleAppendBlob(testBlobCmd TestBlobCommand) { } // Closing the blobProperties response body. if blobProperties.Response() != nil { - io.Copy(ioutil.Discard, blobProperties.Response().Body) + _, _ = io.Copy(io.Discard, blobProperties.Response().Body) blobProperties.Response().Body.Close() } } @@ -689,7 +689,7 @@ func verifySingleAppendBlob(testBlobCmd TestBlobCommand) { os.Exit(1) } // reading all the bytes downloaded. - blobBytesDownloaded, err := ioutil.ReadAll(get.Body(azblob.RetryReaderOptions{})) + blobBytesDownloaded, err := io.ReadAll(get.Body(azblob.RetryReaderOptions{})) if get.Response().Body != nil { get.Response().Body.Close() } From 329f98468fa021785b1d149f36740f2533509ef2 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 4 Jan 2023 14:59:39 -0800 Subject: [PATCH 11/50] Fixed few more issues --- azbfs/zc_policy_retry.go | 4 ++-- azbfs/zz_response_model.go | 5 ++--- azure-pipelines.yml | 2 +- cmd/copyEnumeratorInit.go | 2 +- cmd/credentialUtil.go | 1 + cmd/rpc.go | 2 +- cmd/zc_traverser_blob.go | 2 ++ cmd/zc_traverser_gcp.go | 1 - cmd/zc_traverser_gcp_service.go | 2 +- common/lifecyleMgr.go | 3 ++- ste/xferRetrypolicy.go | 7 +++---- ste/xferStatsPolicy.go | 8 ++++---- testSuite/cmd/create.go | 4 ++-- testSuite/cmd/list.go | 1 + testSuite/cmd/testblob.go | 6 +++--- testSuite/cmd/upload.go | 3 +-- 16 files changed, 27 insertions(+), 26 deletions(-) diff --git a/azbfs/zc_policy_retry.go b/azbfs/zc_policy_retry.go index 00d1ccd58e..36e36f6c6e 100644 --- a/azbfs/zc_policy_retry.go +++ b/azbfs/zc_policy_retry.go @@ -3,7 +3,6 @@ package azbfs import ( "context" "io" - "io/ioutil" "math/rand" "net" "net/http" @@ -254,6 +253,7 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { action = "Retry: Secondary URL returned 404" case err != nil: // NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation + // TODO: AAdd ignore for this error SA1019 if netErr, ok := err.(net.Error); ok && (netErr.Temporary() || netErr.Timeout()) { action = "Retry: net.Error and Temporary() or Timeout()" } else if err == io.ErrUnexpectedEOF { @@ -284,7 +284,7 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { } if response != nil && response.Response() != nil { // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection - _, _ = io.Copy(ioutil.Discard, response.Response().Body) + _, _ = io.Copy(io.Discard, response.Response().Body) response.Response().Body.Close() } // If retrying, cancel the current per-try timeout context diff --git a/azbfs/zz_response_model.go b/azbfs/zz_response_model.go index 84ab047015..e333db1904 100644 --- a/azbfs/zz_response_model.go +++ b/azbfs/zz_response_model.go @@ -295,9 +295,8 @@ func (dlr *DirectoryListResponse) Directories() []string { func (dlr *DirectoryListResponse) FilesAndDirectories() []Path { var entities []Path lSchema := (PathList)(*dlr) - for _, path := range lSchema.Paths { - entities = append(entities, path) - } + // Assuming this file is not generated based on the azbfs swagger README which states generated files prefix = zz_generated. + entities = append(entities, lSchema.Paths...) return entities } diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 7484c25701..04b75feeea 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -35,7 +35,7 @@ jobs: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin echo 'Installation complete' $(go env GOPATH)/bin/golangci-lint --version - $(go env GOPATH)/bin/golangci-lint run --tests=false --max-issues-per-linter=0 > lint.log + $(go env GOPATH)/bin/golangci-lint run --tests=false --max-issues-per-linter=0 --skip-files azbfs/zz_generated_* > lint.log result=$(cat lint.log | wc -l) if [ $result -ne 0 ]; then echo "-----------------------------------" diff --git a/cmd/copyEnumeratorInit.go b/cmd/copyEnumeratorInit.go index 88cbe3c90e..debfb7353c 100755 --- a/cmd/copyEnumeratorInit.go +++ b/cmd/copyEnumeratorInit.go @@ -567,7 +567,7 @@ var reverseEncodedChars = map[string]rune{ } func pathEncodeRules(path string, fromTo common.FromTo, disableAutoDecoding bool, source bool) string { - loc := common.ELocation.Unknown() + var loc common.Location if source { loc = fromTo.From() diff --git a/cmd/credentialUtil.go b/cmd/credentialUtil.go index f5aa00760b..54829fcfef 100644 --- a/cmd/credentialUtil.go +++ b/cmd/credentialUtil.go @@ -342,6 +342,7 @@ func oAuthTokenExists() (oauthTokenExists bool) { if hasCachedToken, err := uotm.HasCachedToken(); hasCachedToken { oauthTokenExists = true + // TODO : Add lint ignore for this SA9003 } else if err != nil { // Log the error if fail to get cached token, as these are unhandled errors, and should not influence the logic flow. // Uncomment for debugging. diff --git a/cmd/rpc.go b/cmd/rpc.go index 104e5550e1..35d6743d07 100644 --- a/cmd/rpc.go +++ b/cmd/rpc.go @@ -56,7 +56,7 @@ func inprocSend(rpcCmd common.RpcCmd, requestData interface{}, responseData inte *(responseData.(*common.ListJobTransfersResponse)) = jobsAdmin.ListJobTransfers(requestData.(common.ListJobTransfersRequest)) case common.ERpcCmd.PauseJob(): - responseData = jobsAdmin.CancelPauseJobOrder(requestData.(common.JobID), common.EJobStatus.Paused()) + *(responseData.(*common.CancelPauseResumeResponse)) = jobsAdmin.CancelPauseJobOrder(requestData.(common.JobID), common.EJobStatus.Paused()) case common.ERpcCmd.CancelJob(): *(responseData.(*common.CancelPauseResumeResponse)) = jobsAdmin.CancelPauseJobOrder(requestData.(common.JobID), common.EJobStatus.Cancelling()) diff --git a/cmd/zc_traverser_blob.go b/cmd/zc_traverser_blob.go index 6430fa58ae..7d54d319cd 100644 --- a/cmd/zc_traverser_blob.go +++ b/cmd/zc_traverser_blob.go @@ -384,6 +384,8 @@ func (t *blobTraverser) parallelList(containerURL azblob.ContainerURL, container } } + // TODO : Double check this + cancelWorkers() return nil } diff --git a/cmd/zc_traverser_gcp.go b/cmd/zc_traverser_gcp.go index 25010b3275..f277805799 100644 --- a/cmd/zc_traverser_gcp.go +++ b/cmd/zc_traverser_gcp.go @@ -130,7 +130,6 @@ func (t *gcpTraverser) Traverse(preprocessor objectMorpher, processor objectProc } } } - return nil } func newGCPTraverser(rawURL *url.URL, ctx context.Context, recursive, getProperties bool, incrementEnumerationCounter enumerationCounterFunc) (*gcpTraverser, error) { diff --git a/cmd/zc_traverser_gcp_service.go b/cmd/zc_traverser_gcp_service.go index 3a076529fc..57da560bbb 100644 --- a/cmd/zc_traverser_gcp_service.go +++ b/cmd/zc_traverser_gcp_service.go @@ -110,5 +110,5 @@ func newGCPServiceTraverser(rawURL *url.URL, ctx context.Context, getProperties t.gcpURL = gcpURLParts t.gcpClient, err = common.CreateGCPClient(t.ctx) - return t, nil + return t, err } diff --git a/common/lifecyleMgr.go b/common/lifecyleMgr.go index cb570e566e..bb52139018 100644 --- a/common/lifecyleMgr.go +++ b/common/lifecyleMgr.go @@ -11,6 +11,7 @@ import ( "strconv" "strings" "sync/atomic" + "syscall" "time" "github.com/Azure/azure-pipeline-go/pipeline" @@ -553,7 +554,7 @@ func (lcm *lifecycleMgr) InitiateProgressReporting(jc WorkController) { lastFetchTime := time.Now().Add(-wait) // So that we start fetching time immediately // cancelChannel will be notified when os receives os.Interrupt and os.Kill signals - signal.Notify(lcm.cancelChannel, os.Interrupt, os.Kill) + signal.Notify(lcm.cancelChannel, os.Interrupt, syscall.SIGTERM) cancelCalled := false diff --git a/ste/xferRetrypolicy.go b/ste/xferRetrypolicy.go index 4fc2bd3d56..b715ef01ec 100644 --- a/ste/xferRetrypolicy.go +++ b/ste/xferRetrypolicy.go @@ -3,7 +3,6 @@ package ste import ( "context" "io" - "io/ioutil" "math/rand" "net" "net/http" @@ -282,8 +281,8 @@ func NewBFSXferRetryPolicyFactory(o XferRetryOptions) pipeline.Factory { break // Don't retry } if response.Response() != nil { - // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection - _, _ = io.Copy(ioutil.Discard, response.Response().Body) + // If we're going to retry, and we got a previous response, then flush its body to avoid leaking its TCP connection + _, _ = io.Copy(io.Discard, response.Response().Body) response.Response().Body.Close() } // If retrying, cancel the current per-try timeout context @@ -455,7 +454,7 @@ func NewBlobXferRetryPolicyFactory(o XferRetryOptions) pipeline.Factory { } if response.Response() != nil { // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection - _, _ = io.Copy(ioutil.Discard, response.Response().Body) + _, _ = io.Copy(io.Discard, response.Response().Body) response.Response().Body.Close() } // If retrying, cancel the current per-try timeout context diff --git a/ste/xferStatsPolicy.go b/ste/xferStatsPolicy.go index 7b489b79d2..e40792817f 100644 --- a/ste/xferStatsPolicy.go +++ b/ste/xferStatsPolicy.go @@ -25,7 +25,7 @@ import ( "context" "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/common" - "io/ioutil" + "io" "net/http" "strings" "sync/atomic" @@ -206,9 +206,9 @@ func transparentlyReadBody(r *http.Response) string { if r.Body == http.NoBody { return "" } - buf, _ := ioutil.ReadAll(r.Body) // error responses are short fragments of XML, so safe to read all - _ = r.Body.Close() // must close the real body - r.Body = ioutil.NopCloser(bytes.NewReader(buf)) // replace it with something that will read the same data we just read + buf, _ := io.ReadAll(r.Body) // error responses are short fragments of XML, so safe to read all + _ = r.Body.Close() // must close the real body + r.Body = io.NopCloser(bytes.NewReader(buf)) // replace it with something that will read the same data we just read return string(buf) // copy to string } diff --git a/testSuite/cmd/create.go b/testSuite/cmd/create.go index 709165c4ff..7aa05415f9 100644 --- a/testSuite/cmd/create.go +++ b/testSuite/cmd/create.go @@ -460,7 +460,7 @@ func createGCPObject(objectURLStr string, objectSize uint32, o gcpUtils.ObjectAt os.Exit(1) } - gcpClient, err := createGCPClientWithGCSSDK() + gcpClient, _ := createGCPClientWithGCSSDK() randomString := createStringWithRandomChars(int(objectSize)) if o.ContentType == "" { @@ -470,7 +470,7 @@ func createGCPObject(objectURLStr string, objectSize uint32, o gcpUtils.ObjectAt obj := gcpClient.Bucket(gcpURLParts.BucketName).Object(gcpURLParts.ObjectKey) wc := obj.NewWriter(context.Background()) reader := strings.NewReader(randomString) - _, err = io.Copy(wc, reader) + _, _ = io.Copy(wc, reader) err = wc.Close() _, err = obj.Update(context.Background(), o) diff --git a/testSuite/cmd/list.go b/testSuite/cmd/list.go index ae5fc6716f..878e9c4aec 100644 --- a/testSuite/cmd/list.go +++ b/testSuite/cmd/list.go @@ -116,6 +116,7 @@ func listContainer(resourceUrl string, numberOfresource int64) { blobName := blobInfo.Name if len(searchPrefix) > 0 { // strip away search prefix from the blob name. + // TODO: Ignore this in lint ineffassign blobName = strings.Replace(blobName, searchPrefix, "", 1) } numberOfblobs++ diff --git a/testSuite/cmd/testblob.go b/testSuite/cmd/testblob.go index b15950d585..13a57084a6 100644 --- a/testSuite/cmd/testblob.go +++ b/testSuite/cmd/testblob.go @@ -196,7 +196,7 @@ func verifyBlockBlobDirUpload(testBlobCmd TestBlobCommand) { // read all bytes. blobBytesDownloaded, err := ioutil.ReadAll(get.Body(azblob.RetryReaderOptions{})) if err != nil { - fmt.Println(fmt.Sprintf("error reading the body of blob %s downloaded and failed with error %s", blobInfo.Name, err.Error())) + fmt.Printf("error reading the body of blob %s downloaded and failed with error %s\n", blobInfo.Name, err.Error()) os.Exit(1) } // remove the search prefix from the blob name @@ -258,7 +258,7 @@ func validateMetadata(expectedMetaDataString string, actualMetaData azblob.Metad // iterating through each key value pair of actual metaData and comparing the key value pair in expected metadata for key, value := range actualMetaData { if expectedMetaData[key] != value { - fmt.Println(fmt.Sprintf("value of user given key %s is %s in actual data while it is %s in expected metadata", key, value, expectedMetaData[key])) + fmt.Printf("value of user given key %s is %s in actual data while it is %s in expected metadata\n", key, value, expectedMetaData[key]) return false } } @@ -323,7 +323,7 @@ func verifySinglePageBlobUpload(testBlobCmd TestBlobCommand) { if azblob.AccessTierType(testBlobCmd.BlobTier) != azblob.AccessTierNone { blobProperties, err := pageBlobUrl.GetProperties(testCtx, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) if err != nil { - fmt.Println(fmt.Sprintf("error getting the properties of the blob. failed with error %s", err.Error())) + fmt.Printf("error getting the properties of the blob. failed with error %s\n", err.Error()) os.Exit(1) } // If the blob tier does not match the expected blob tier. diff --git a/testSuite/cmd/upload.go b/testSuite/cmd/upload.go index fe6c758436..ca52430e15 100644 --- a/testSuite/cmd/upload.go +++ b/testSuite/cmd/upload.go @@ -262,7 +262,6 @@ func getRelativePath(rootPath, filePath string) string { if len(rootPath) == 0 { return filePath } - result := filePath // replace the path separator in filepath with AZCOPY_PATH_SEPARATOR // this replacement is required to handle the windows filepath @@ -276,7 +275,7 @@ func getRelativePath(rootPath, filePath string) string { scrubAway = rootPath[:strings.LastIndex(rootPath, common.AZCOPY_PATH_SEPARATOR_STRING)+1] } - result = strings.Replace(filePath, scrubAway, "", 1) + result := strings.Replace(filePath, scrubAway, "", 1) return result } From d2acc2af097d8e0818c160b98d7108246569e11a Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 4 Jan 2023 15:03:40 -0800 Subject: [PATCH 12/50] skip files correction --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 04b75feeea..037d41499b 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -35,7 +35,7 @@ jobs: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin echo 'Installation complete' $(go env GOPATH)/bin/golangci-lint --version - $(go env GOPATH)/bin/golangci-lint run --tests=false --max-issues-per-linter=0 --skip-files azbfs/zz_generated_* > lint.log + $(go env GOPATH)/bin/golangci-lint run --tests=false --max-issues-per-linter=0 --skip-files=azbfs/zz_generated_* > lint.log result=$(cat lint.log | wc -l) if [ $result -ne 0 ]; then echo "-----------------------------------" From 7bc273780f7a0cec887e3c96babd6f2d3127df29 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 4 Jan 2023 15:13:41 -0800 Subject: [PATCH 13/50] Fixed a few more errors --- cmd/jobsRemove.go | 3 +-- cmd/rpc.go | 4 ++-- cmd/sync.go | 6 +++--- jobsAdmin/JobsAdmin.go | 2 +- jobsAdmin/init.go | 7 ++++--- ste/downloader-azureFiles_linux.go | 3 ++- testSuite/cmd/create.go | 2 +- testSuite/cmd/testblob.go | 6 +++--- testSuite/cmd/upload.go | 2 +- 9 files changed, 18 insertions(+), 17 deletions(-) diff --git a/cmd/jobsRemove.go b/cmd/jobsRemove.go index 84d9a0124b..1fade4fe78 100644 --- a/cmd/jobsRemove.go +++ b/cmd/jobsRemove.go @@ -23,7 +23,6 @@ package cmd import ( "errors" "fmt" - "io/ioutil" "os" "path" "strings" @@ -108,7 +107,7 @@ func handleRemoveSingleJob(jobID common.JobID) error { // remove all files whose names are approved by the predicate in the targetFolder func removeFilesWithPredicate(targetFolder string, predicate func(string) bool) (int, error) { count := 0 - files, err := ioutil.ReadDir(targetFolder) + files, err := os.ReadDir(targetFolder) if err != nil { return count, err } diff --git a/cmd/rpc.go b/cmd/rpc.go index 35d6743d07..53055eb7b8 100644 --- a/cmd/rpc.go +++ b/cmd/rpc.go @@ -25,7 +25,7 @@ import ( "encoding/json" "fmt" "github.com/Azure/azure-storage-azcopy/v10/jobsAdmin" - "io/ioutil" + "io" "net/http" "github.com/Azure/azure-storage-azcopy/v10/common" @@ -111,7 +111,7 @@ func (httpClient *HTTPClient) send(rpcCmd common.RpcCmd, requestData interface{} } // Read response data, deserialize it and return it (via out responseData parameter) & error - responseJson, err := ioutil.ReadAll(response.Body) + responseJson, err := io.ReadAll(response.Body) response.Body.Close() if err != nil { return fmt.Errorf("error reading response for the request") diff --git a/cmd/sync.go b/cmd/sync.go index 7f1aa22721..ebb8dc3771 100644 --- a/cmd/sync.go +++ b/cmd/sync.go @@ -795,8 +795,8 @@ func init() { // temp, to assist users with change in param names, by providing a clearer message when these obsolete ones are accidentally used syncCmd.PersistentFlags().StringVar(&raw.legacyInclude, "include", "", "Legacy include param. DO NOT USE") syncCmd.PersistentFlags().StringVar(&raw.legacyExclude, "exclude", "", "Legacy exclude param. DO NOT USE") - syncCmd.PersistentFlags().MarkHidden("include") - syncCmd.PersistentFlags().MarkHidden("exclude") + _ = syncCmd.PersistentFlags().MarkHidden("include") + _ = syncCmd.PersistentFlags().MarkHidden("exclude") // TODO follow sym link is not implemented, clarify behavior first // syncCmd.PersistentFlags().BoolVar(&raw.followSymlinks, "follow-symlinks", false, "follow symbolic links when performing sync from local file system.") @@ -804,6 +804,6 @@ func init() { // TODO sync does not support all BlobAttributes on the command line, this functionality should be added // Deprecate the old persist-smb-permissions flag - syncCmd.PersistentFlags().MarkHidden("preserve-smb-permissions") + _ = syncCmd.PersistentFlags().MarkHidden("preserve-smb-permissions") syncCmd.PersistentFlags().BoolVar(&raw.preservePermissions, PreservePermissionsFlag, false, "False by default. Preserves ACLs between aware resources (Windows and Azure Files, or ADLS Gen 2 to ADLS Gen 2). For Hierarchical Namespace accounts, you will need a container SAS or OAuth token with Modify Ownership and Modify Permissions permissions. For downloads, you will also need the --backup flag to restore permissions where the new Owner will not be the user running AzCopy. This flag applies to both files and folders, unless a file-only filter is specified (e.g. include-pattern).") } diff --git a/jobsAdmin/JobsAdmin.go b/jobsAdmin/JobsAdmin.go index 5354104b6a..a8d6852f86 100755 --- a/jobsAdmin/JobsAdmin.go +++ b/jobsAdmin/JobsAdmin.go @@ -465,7 +465,7 @@ func (ja *jobsAdmin) ListJobs(givenStatus common.JobStatus) common.ListJobsRespo func (ja *jobsAdmin) SetConcurrencySettingsToAuto() { // Setting initial pool size to 4 and max pool size to 3,000 ja.concurrency.InitialMainPoolSize = 4 - ja.concurrency.MaxMainPoolSize = &ste.ConfiguredInt{3000, false, common.EEnvironmentVariable.ConcurrencyValue().Name, "auto-tuning limit"} + ja.concurrency.MaxMainPoolSize = &ste.ConfiguredInt{Value: 3000, IsUserSpecified: false, EnvVarName: common.EEnvironmentVariable.ConcurrencyValue().Name, DefaultSourceDesc: "auto-tuning limit"} // recreate the concurrency tuner. // Tuner isn't called until the first job part is scheduled for transfer, so it is safe to update it before that. diff --git a/jobsAdmin/init.go b/jobsAdmin/init.go index 1f43680cae..cc895e8e69 100755 --- a/jobsAdmin/init.go +++ b/jobsAdmin/init.go @@ -24,9 +24,10 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "math" "net/http" + "os" "time" "github.com/Azure/azure-pipeline-go/pipeline" @@ -64,7 +65,7 @@ func MainSTE(concurrency ste.ConcurrencySettings, targetRateInMegaBitsPerSec flo // if we've a custom mime map if path := common.GetLifecycleMgr().GetEnvironmentVariable(common.EEnvironmentVariable.MimeMapping()); path != "" { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return err } @@ -81,7 +82,7 @@ func MainSTE(concurrency ste.ConcurrencySettings, targetRateInMegaBitsPerSec flo deserialize := func(request *http.Request, v interface{}) { // TODO: Check the HTTP verb here? // reading the entire request body and closing the request body - body, err := ioutil.ReadAll(request.Body) + body, err := io.ReadAll(request.Body) request.Body.Close() if err != nil { JobsAdmin.Panic(fmt.Errorf("error deserializing HTTP request")) diff --git a/ste/downloader-azureFiles_linux.go b/ste/downloader-azureFiles_linux.go index 87e11198cb..6847fde835 100644 --- a/ste/downloader-azureFiles_linux.go +++ b/ste/downloader-azureFiles_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package ste @@ -56,7 +57,7 @@ func (*azureFilesDownloader) PutSMBProperties(sip ISMBPropertyBearingSourceInfoP var ts [2]unix.Timespec // Don't set atime. - ts[0] = unix.Timespec{unix.UTIME_OMIT, unix.UTIME_OMIT} + ts[0] = unix.Timespec{Sec: unix.UTIME_OMIT, Nsec: unix.UTIME_OMIT} // Set mtime to smbLastWrite. ts[1] = unix.NsecToTimespec(smbLastWrite.UnixNano()) diff --git a/testSuite/cmd/create.go b/testSuite/cmd/create.go index 7aa05415f9..df5d3f8363 100644 --- a/testSuite/cmd/create.go +++ b/testSuite/cmd/create.go @@ -471,7 +471,7 @@ func createGCPObject(objectURLStr string, objectSize uint32, o gcpUtils.ObjectAt wc := obj.NewWriter(context.Background()) reader := strings.NewReader(randomString) _, _ = io.Copy(wc, reader) - err = wc.Close() + _ = wc.Close() _, err = obj.Update(context.Background(), o) if err != nil { diff --git a/testSuite/cmd/testblob.go b/testSuite/cmd/testblob.go index 13a57084a6..5596803e28 100644 --- a/testSuite/cmd/testblob.go +++ b/testSuite/cmd/testblob.go @@ -328,7 +328,7 @@ func verifySinglePageBlobUpload(testBlobCmd TestBlobCommand) { } // If the blob tier does not match the expected blob tier. if !strings.EqualFold(blobProperties.AccessTier(), testBlobCmd.BlobTier) { - fmt.Println(fmt.Sprintf("Access blob tier type %s does not match the expected %s tier type", blobProperties.AccessTier(), testBlobCmd.BlobTier)) + fmt.Printf("Access blob tier type %s does not match the expected %s tier type\n", blobProperties.AccessTier(), testBlobCmd.BlobTier) os.Exit(1) } // Closing the blobProperties response body. @@ -462,7 +462,7 @@ func verifySingleBlockBlob(testBlobCmd TestBlobCommand) { sourceSas := testBlobCmd.Subject sourceURL, err := url.Parse(sourceSas) if err != nil { - fmt.Println(fmt.Sprintf("Error parsing the blob url source %s", testBlobCmd.Object)) + fmt.Printf("Error parsing the blob url source %s\n", testBlobCmd.Object) os.Exit(1) } @@ -499,7 +499,7 @@ func verifySingleBlockBlob(testBlobCmd TestBlobCommand) { if azblob.AccessTierType(testBlobCmd.BlobTier) != azblob.AccessTierNone { blobProperties, err := blobUrl.GetProperties(testCtx, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) if err != nil { - fmt.Println(fmt.Sprintf("error getting the blob properties. Failed with error %s", err.Error())) + fmt.Printf("error getting the blob properties. Failed with error %s\n", err.Error()) os.Exit(1) } // Match the Access Tier Type with Expected Tier Type. diff --git a/testSuite/cmd/upload.go b/testSuite/cmd/upload.go index ca52430e15..826af1c4da 100644 --- a/testSuite/cmd/upload.go +++ b/testSuite/cmd/upload.go @@ -146,7 +146,7 @@ func (u *testUploader) uploadToGCP() { } obj := gcpClient.Bucket(gcpURLPartsForFile.BucketName).Object(gcpURLPartsForFile.ObjectKey) wc := obj.NewWriter(context.Background()) - _, err = io.Copy(wc, f) + _, _ = io.Copy(wc, f) err = wc.Close() if err != nil { return err From f544f2028a0e5eb5084e82676569cabd366b6c11 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 4 Jan 2023 15:21:13 -0800 Subject: [PATCH 14/50] Fixed rest of printf println issues --- testSuite/cmd/testblob.go | 12 ++++++------ testSuite/cmd/testblobFS.go | 24 ++++++++++++------------ testSuite/cmd/testfile.go | 14 +++++++------- 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/testSuite/cmd/testblob.go b/testSuite/cmd/testblob.go index 5596803e28..0026a60a21 100644 --- a/testSuite/cmd/testblob.go +++ b/testSuite/cmd/testblob.go @@ -504,7 +504,7 @@ func verifySingleBlockBlob(testBlobCmd TestBlobCommand) { } // Match the Access Tier Type with Expected Tier Type. if !strings.EqualFold(blobProperties.AccessTier(), testBlobCmd.BlobTier) { - fmt.Println(fmt.Sprintf("block blob access tier %s does not matches the expected tier %s", blobProperties.AccessTier(), testBlobCmd.BlobTier)) + fmt.Printf("block blob access tier %s does not matches the expected tier %s\n", blobProperties.AccessTier(), testBlobCmd.BlobTier) os.Exit(1) } // Closing the blobProperties response. @@ -537,7 +537,7 @@ func verifySingleBlockBlob(testBlobCmd TestBlobCommand) { // If the fileSize is 0 and the len of downloaded bytes is not 0 // validation fails if len(blobBytesDownloaded) != 0 { - fmt.Println(fmt.Sprintf("validation failed since the actual file size %d differs from the downloaded file size %d", fileInfo.Size(), len(blobBytesDownloaded))) + fmt.Printf("validation failed since the actual file size %d differs from the downloaded file size %d\n", fileInfo.Size(), len(blobBytesDownloaded)) os.Exit(1) } // If both the actual and downloaded file size is 0, @@ -598,7 +598,7 @@ func verifySingleBlockBlob(testBlobCmd TestBlobCommand) { mmap.Unmap() err = file.Close() if err != nil { - fmt.Println(fmt.Sprintf("error closing the file %s and failed with error %s. Error could be while validating the blob.", file.Name(), err.Error())) + fmt.Printf("error closing the file %s and failed with error %s. Error could be while validating the blob.\n", file.Name(), err.Error()) os.Exit(1) } @@ -634,7 +634,7 @@ func verifySingleAppendBlob(testBlobCmd TestBlobCommand) { // getting the shared access signature of the resource. sourceURL, err := url.Parse(testBlobCmd.Subject) if err != nil { - fmt.Println(fmt.Sprintf("Error parsing the blob url source %s", testBlobCmd.Object)) + fmt.Printf("Error parsing the blob url source %s\n", testBlobCmd.Object) os.Exit(1) } @@ -668,12 +668,12 @@ func verifySingleAppendBlob(testBlobCmd TestBlobCommand) { if azblob.AccessTierType(testBlobCmd.BlobTier) != azblob.AccessTierNone { blobProperties, err := appendBlobURL.GetProperties(testCtx, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) if err != nil { - fmt.Println(fmt.Sprintf("error getting the properties of the blob. failed with error %s", err.Error())) + fmt.Printf("error getting the properties of the blob. failed with error %s\n", err.Error()) os.Exit(1) } // If the blob tier does not match the expected blob tier. if !strings.EqualFold(blobProperties.AccessTier(), testBlobCmd.BlobTier) { - fmt.Println(fmt.Sprintf("Access blob tier type %s does not match the expected %s tier type", blobProperties.AccessTier(), testBlobCmd.BlobTier)) + fmt.Printf("Access blob tier type %s does not match the expected %s tier type\n", blobProperties.AccessTier(), testBlobCmd.BlobTier) os.Exit(1) } // Closing the blobProperties response body. diff --git a/testSuite/cmd/testblobFS.go b/testSuite/cmd/testblobFS.go index dcad0c0971..1a0d773ffb 100644 --- a/testSuite/cmd/testblobFS.go +++ b/testSuite/cmd/testblobFS.go @@ -100,7 +100,7 @@ func (tbfsc TestBlobFSCommand) verifyRemoteFile() { fileUrl := azbfs.NewFileURL(*subjectUrl, p) dResp, err := fileUrl.Download(context.Background(), 0, 0) if err != nil { - fmt.Println(fmt.Sprintf("error downloading the subject %s. Failed with error %s", fileUrl.String(), err.Error())) + fmt.Printf("error downloading the subject %s. Failed with error %s\n", fileUrl.String(), err.Error()) os.Exit(1) } // get the size of the downloaded file @@ -122,7 +122,7 @@ func (tbfsc TestBlobFSCommand) verifyRemoteFile() { // If the length of file at two location is not same // validation has failed if downloadedLength != fInfo.Size() { - fmt.Println(fmt.Sprintf("validation failed because there is difference in the source size %d and destination size %d", fInfo.Size(), downloadedLength)) + fmt.Printf("validation failed because there is difference in the source size %d and destination size %d\n", fInfo.Size(), downloadedLength) os.Exit(1) } // If the size of the file is 0 both locally and remote @@ -193,11 +193,11 @@ func (tbfsc TestBlobFSCommand) verifyRemoteDir() { // local and remote objectInfo, err := os.Stat(tbfsc.Object) if err != nil { - fmt.Println(fmt.Sprintf("error getting the file info for dir %s. failed with error %s", tbfsc.Object, err.Error())) + fmt.Printf("error getting the file info for dir %s. failed with error %s\n", tbfsc.Object, err.Error()) os.Exit(1) } if !objectInfo.IsDir() { - fmt.Println(fmt.Sprintf("the source provided %s is not a directory path", tbfsc.Object)) + fmt.Printf("the source provided %s is not a directory path\n", tbfsc.Object) os.Exit(1) } // break the remote Url into parts @@ -211,7 +211,7 @@ func (tbfsc TestBlobFSCommand) verifyRemoteDir() { var firstListing bool = true dResp, err := dirUrl.ListDirectorySegment(context.Background(), &continuationMarker, true) if err != nil { - fmt.Println(fmt.Sprintf("error listing the directory path defined by url %s. Failed with error %s", dirUrl.String(), err.Error())) + fmt.Printf("error listing the directory path defined by url %s. Failed with error %s\n", dirUrl.String(), err.Error()) os.Exit(1) } // numberOfFilesinSubject keeps the count of number of files of at the destination @@ -235,13 +235,13 @@ func (tbfsc TestBlobFSCommand) verifyRemoteDir() { // open the filePath locally and calculate the md5 fpLocal, err := os.Open(filepathLocal) if err != nil { - fmt.Println(fmt.Sprintf("error opening the file %s. failed with error %s", filepathLocal, err.Error())) + fmt.Printf("error opening the file %s. failed with error %s\n", filepathLocal, err.Error()) os.Exit(1) } // Get the fileInfo to get size. fpLocalInfo, err := fpLocal.Stat() if err != nil { - fmt.Println(fmt.Sprintf("error getting the file info for file %s. failed with error %s", filepathLocal, err.Error())) + fmt.Printf("error getting the file info for file %s. failed with error %s\n", filepathLocal, err.Error()) os.Exit(1) } // Check the size of file @@ -260,7 +260,7 @@ func (tbfsc TestBlobFSCommand) verifyRemoteDir() { // memory map the file fpMMf, err := NewMMF(fpLocal, false, 0, fpLocalInfo.Size()) if err != nil { - fmt.Println(fmt.Sprintf("error memory mapping the file %s. failed with error %s", filepathLocal, err.Error())) + fmt.Printf("error memory mapping the file %s. failed with error %s\n", filepathLocal, err.Error()) os.Exit(1) } @@ -274,7 +274,7 @@ func (tbfsc TestBlobFSCommand) verifyRemoteDir() { fileUrl := azbfs.NewFileURL(tempUrlParts.URL(), p) fResp, err := fileUrl.Download(context.Background(), 0, 0) if err != nil { - fmt.Println(fmt.Sprintf("error downloading the file %s. failed with error %s", fileUrl.String(), err.Error())) + fmt.Printf("error downloading the file %s. failed with error %s\n", fileUrl.String(), err.Error()) os.Exit(1) } downloadedBuffer := make([]byte, *file.ContentLength) // byte buffer in which file will be downloaded to @@ -287,7 +287,7 @@ func (tbfsc TestBlobFSCommand) verifyRemoteDir() { // calculate the downloaded file Md5 subjMd5 := md5.Sum(downloadedBuffer) if objMd5 != subjMd5 { - fmt.Println(fmt.Sprintf("source file %s doesn't match the remote file %s", filepathLocal, fileUrl.String())) + fmt.Printf("source file %s doesn't match the remote file %s\n", filepathLocal, fileUrl.String()) os.Exit(1) } } @@ -304,7 +304,7 @@ func (tbfsc TestBlobFSCommand) verifyRemoteDir() { return nil }) if err != nil { - fmt.Println(fmt.Sprintf("validation failed with error %s walking inside the source %s", err.Error(), tbfsc.Object)) + fmt.Printf("validation failed with error %s walking inside the source %s\n", err.Error(), tbfsc.Object) os.Exit(1) } @@ -314,5 +314,5 @@ func (tbfsc TestBlobFSCommand) verifyRemoteDir() { fmt.Println("validation failed since there is difference in the number of files in source and destination") os.Exit(1) } - fmt.Println(fmt.Sprintf("successfully validated the source %s and destination %s", tbfsc.Object, tbfsc.Subject)) + fmt.Printf("successfully validated the source %s and destination %s\n", tbfsc.Object, tbfsc.Subject) } diff --git a/testSuite/cmd/testfile.go b/testSuite/cmd/testfile.go index af5e515af4..abcacc5988 100644 --- a/testSuite/cmd/testfile.go +++ b/testSuite/cmd/testfile.go @@ -132,7 +132,7 @@ func validateAzureDirWithLocalFile(curAzureDirURL azfile.DirectoryURL, baseAzure // look for all files that in current directory listFile, err := curAzureDirURL.ListFilesAndDirectoriesSegment(context.Background(), marker, azfile.ListFilesAndDirectoriesOptions{}) if err != nil { - // fmt.Println(fmt.Sprintf("fail to list files and directories inside the directory. Please check the directory sas, %v", err)) + // fmt.Printf("fail to list files and directories inside the directory. Please check the directory sas, %v\n", err) os.Exit(1) } @@ -149,7 +149,7 @@ func validateAzureDirWithLocalFile(curAzureDirURL azfile.DirectoryURL, baseAzure get, err := curFileURL.Download(context.Background(), 0, azfile.CountToEnd, false) if err != nil { - fmt.Println(fmt.Sprintf("fail to download the file %s", fileInfo.Name)) + fmt.Printf("fail to download the file %s\n", fileInfo.Name) os.Exit(1) } @@ -158,14 +158,14 @@ func validateAzureDirWithLocalFile(curAzureDirURL azfile.DirectoryURL, baseAzure // read all bytes. fileBytesDownloaded, err := ioutil.ReadAll(retryReader) if err != nil { - fmt.Println(fmt.Sprintf("fail to read the body of file %s downloaded and failed with error %s", fileInfo.Name, err.Error())) + fmt.Printf("fail to read the body of file %s downloaded and failed with error %s\n", fileInfo.Name, err.Error()) os.Exit(1) } retryReader.Close() tokens := strings.SplitAfterN(curFileURL.URL().Path, baseAzureDirPath, 2) if len(tokens) < 2 { - fmt.Println(fmt.Sprintf("fail to get sub directory and file name, file URL '%s', original dir path '%s'", curFileURL.String(), baseAzureDirPath)) + fmt.Printf("fail to get sub directory and file name, file URL '%s', original dir path '%s'\n", curFileURL.String(), baseAzureDirPath) os.Exit(1) } @@ -232,7 +232,7 @@ func validateMetadataForFile(expectedMetaDataString string, actualMetaData azfil // iterating through each key value pair of actual metaData and comparing the key value pair in expected metadata for key, value := range actualMetaData { if expectedMetaData[key] != value { - fmt.Println(fmt.Sprintf("value of user given key %s is %s in actual data while it is %s in expected metadata", key, value, expectedMetaData[key])) + fmt.Printf("value of user given key %s is %s in actual data while it is %s in expected metadata\n", key, value, expectedMetaData[key]) return false } } @@ -261,7 +261,7 @@ func verifySingleFileUpload(testFileCmd TestFileCommand) { // getting the shared access signature of the resource. sourceURL, err := url.Parse(testFileCmd.Subject) if err != nil { - // fmt.Println(fmt.Sprintf("Error parsing the file url source %s", testFileCmd.Object)) + // fmt.Printf("Error parsing the file url source %s\n", testFileCmd.Object) os.Exit(1) } @@ -287,7 +287,7 @@ func verifySingleFileUpload(testFileCmd TestFileCommand) { // If the fileSize is 0 and the len of downloaded bytes is not 0 // validation fails if len(fileBytesDownloaded) != 0 { - fmt.Println(fmt.Sprintf("validation failed since the actual file size %d differs from the downloaded file size %d", fileInfo.Size(), len(fileBytesDownloaded))) + fmt.Printf("validation failed since the actual file size %d differs from the downloaded file size %d\n", fileInfo.Size(), len(fileBytesDownloaded)) os.Exit(1) } // If both the actual and downloaded file size is 0, From f857e3efd2b5fd7ecf371ae6df0f07d14d0e6c30 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 4 Jan 2023 15:47:45 -0800 Subject: [PATCH 15/50] Fix all ioutil issues --- azbfs/zt_url_file_test.go | 11 +++++------ azbfs/zz_generated_filesystem.go | 13 ++++++------- azbfs/zz_generated_path.go | 11 +++++------ azbfs/zz_generated_responder_policy.go | 4 ++-- cmd/zc_traverser_local.go | 22 +++++++++++----------- cmd/zt_generic_traverser_test.go | 5 ++--- cmd/zt_scenario_helpers_for_test.go | 9 ++++----- cmd/zt_sync_blob_local_test.go | 3 +-- cmd/zt_test.go | 3 +-- common/credCache_windows.go | 5 ++--- testSuite/cmd/create.go | 3 +-- testSuite/cmd/testblob.go | 7 +++---- testSuite/cmd/testfile.go | 5 ++--- 13 files changed, 45 insertions(+), 56 deletions(-) diff --git a/azbfs/zt_url_file_test.go b/azbfs/zt_url_file_test.go index 6d7b2d78ac..b6e8b77ee4 100644 --- a/azbfs/zt_url_file_test.go +++ b/azbfs/zt_url_file_test.go @@ -16,7 +16,6 @@ import ( "net/url" //"strings" - "io/ioutil" "net/http" "github.com/Azure/azure-storage-azcopy/v10/azbfs" @@ -213,7 +212,7 @@ func (s *FileURLSuite) TestFileGetProperties(c *chk.C) { // c.Assert(resp.ContentType(), chk.Equals, "application/octet-stream") // c.Assert(resp.Status(), chk.Not(chk.Equals), "") // -// download, err := ioutil.ReadAll(resp.Response().Body) +// download, err := io.ReadAll(resp.Response().Body) // c.Assert(err, chk.IsNil) // c.Assert(download, chk.DeepEquals, contentD[:1024]) //} @@ -250,14 +249,14 @@ func (s *FileURLSuite) TestUnexpectedEOFRecovery(c *chk.C) { // Verify that we can inject errors first. reader := dResp.Body(azbfs.InjectErrorInRetryReaderOptions(errors.New("unrecoverable error"))) - _, err = ioutil.ReadAll(reader) + _, err = io.ReadAll(reader) c.Assert(err, chk.NotNil) c.Assert(err.Error(), chk.Equals, "unrecoverable error") // Then inject the retryable error. reader = dResp.Body(azbfs.InjectErrorInRetryReaderOptions(io.ErrUnexpectedEOF)) - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) c.Assert(err, chk.IsNil) c.Assert(buf, chk.DeepEquals, contentD) } @@ -309,7 +308,7 @@ func (s *FileURLSuite) TestUploadDownloadRoundTrip(c *chk.C) { c.Assert(resp.Status(), chk.Not(chk.Equals), "") // Verify the partial data - download, err := ioutil.ReadAll(resp.Response().Body) + download, err := io.ReadAll(resp.Response().Body) c.Assert(err, chk.IsNil) c.Assert(download, chk.DeepEquals, contentD1[:1024]) @@ -325,7 +324,7 @@ func (s *FileURLSuite) TestUploadDownloadRoundTrip(c *chk.C) { c.Assert(resp.Version(), chk.Not(chk.Equals), "") // Verify the entire content - download, err = ioutil.ReadAll(resp.Response().Body) + download, err = io.ReadAll(resp.Response().Body) c.Assert(err, chk.IsNil) c.Assert(download[:2048], chk.DeepEquals, contentD1[:]) c.Assert(download[2048:], chk.DeepEquals, contentD2[:]) diff --git a/azbfs/zz_generated_filesystem.go b/azbfs/zz_generated_filesystem.go index 49aab19ade..0ba2d38330 100644 --- a/azbfs/zz_generated_filesystem.go +++ b/azbfs/zz_generated_filesystem.go @@ -9,7 +9,6 @@ import ( "encoding/json" "github.com/Azure/azure-pipeline-go/pipeline" "io" - "io/ioutil" "net/http" "net/url" "strconv" @@ -94,7 +93,7 @@ func (client filesystemClient) createResponder(resp pipeline.Response) (pipeline if resp == nil { return nil, err } - io.Copy(ioutil.Discard, resp.Response().Body) + io.Copy(io.Discard, resp.Response().Body) resp.Response().Body.Close() return &FilesystemCreateResponse{rawResponse: resp.Response()}, err } @@ -177,7 +176,7 @@ func (client filesystemClient) deleteResponder(resp pipeline.Response) (pipeline if resp == nil { return nil, err } - io.Copy(ioutil.Discard, resp.Response().Body) + io.Copy(io.Discard, resp.Response().Body) resp.Response().Body.Close() return &FilesystemDeleteResponse{rawResponse: resp.Response()}, err } @@ -244,7 +243,7 @@ func (client filesystemClient) getPropertiesResponder(resp pipeline.Response) (p if resp == nil { return nil, err } - io.Copy(ioutil.Discard, resp.Response().Body) + io.Copy(io.Discard, resp.Response().Body) resp.Response().Body.Close() return &FilesystemGetPropertiesResponse{rawResponse: resp.Response()}, err } @@ -327,7 +326,7 @@ func (client filesystemClient) listResponder(resp pipeline.Response) (pipeline.R return result, err } defer resp.Response().Body.Close() - b, err := ioutil.ReadAll(resp.Response().Body) + b, err := io.ReadAll(resp.Response().Body) if err != nil { return result, err } @@ -436,7 +435,7 @@ func (client filesystemClient) listPathsResponder(resp pipeline.Response) (pipel return result, err } defer resp.Response().Body.Close() - b, err := ioutil.ReadAll(resp.Response().Body) + b, err := io.ReadAll(resp.Response().Body) if err != nil { return result, err } @@ -531,7 +530,7 @@ func (client filesystemClient) setPropertiesResponder(resp pipeline.Response) (p if resp == nil { return nil, err } - io.Copy(ioutil.Discard, resp.Response().Body) + io.Copy(io.Discard, resp.Response().Body) resp.Response().Body.Close() return &FilesystemSetPropertiesResponse{rawResponse: resp.Response()}, err } diff --git a/azbfs/zz_generated_path.go b/azbfs/zz_generated_path.go index 83f3769108..75a607f454 100644 --- a/azbfs/zz_generated_path.go +++ b/azbfs/zz_generated_path.go @@ -8,7 +8,6 @@ import ( "context" "github.com/Azure/azure-pipeline-go/pipeline" "io" - "io/ioutil" "net/http" "net/url" "strconv" @@ -221,7 +220,7 @@ func (client pathClient) createResponder(resp pipeline.Response) (pipeline.Respo if resp == nil { return nil, err } - io.Copy(ioutil.Discard, resp.Response().Body) + io.Copy(io.Discard, resp.Response().Body) resp.Response().Body.Close() return &PathCreateResponse{rawResponse: resp.Response()}, err } @@ -323,7 +322,7 @@ func (client pathClient) deleteResponder(resp pipeline.Response) (pipeline.Respo if resp == nil { return nil, err } - io.Copy(ioutil.Discard, resp.Response().Body) + io.Copy(io.Discard, resp.Response().Body) resp.Response().Body.Close() return &PathDeleteResponse{rawResponse: resp.Response()}, err } @@ -431,7 +430,7 @@ func (client pathClient) getPropertiesResponder(resp pipeline.Response) (pipelin if resp == nil { return nil, err } - io.Copy(ioutil.Discard, resp.Response().Body) + io.Copy(io.Discard, resp.Response().Body) resp.Response().Body.Close() return &PathGetPropertiesResponse{rawResponse: resp.Response()}, err } @@ -548,7 +547,7 @@ func (client pathClient) leaseResponder(resp pipeline.Response) (pipeline.Respon if resp == nil { return nil, err } - io.Copy(ioutil.Discard, resp.Response().Body) + io.Copy(io.Discard, resp.Response().Body) resp.Response().Body.Close() return &PathLeaseResponse{rawResponse: resp.Response()}, err } @@ -862,7 +861,7 @@ func (client pathClient) updateResponder(resp pipeline.Response) (pipeline.Respo if resp == nil { return nil, err } - io.Copy(ioutil.Discard, resp.Response().Body) + io.Copy(io.Discard, resp.Response().Body) resp.Response().Body.Close() return &PathUpdateResponse{rawResponse: resp.Response()}, err } diff --git a/azbfs/zz_generated_responder_policy.go b/azbfs/zz_generated_responder_policy.go index 9c35c7723e..1d54e49d33 100644 --- a/azbfs/zz_generated_responder_policy.go +++ b/azbfs/zz_generated_responder_policy.go @@ -8,7 +8,7 @@ import ( "context" "encoding/json" "github.com/Azure/azure-pipeline-go/pipeline" - "io/ioutil" + "io" ) type responder func(resp pipeline.Response) (result pipeline.Response, err error) @@ -53,7 +53,7 @@ func validateResponse(resp pipeline.Response, successStatusCodes ...int) error { // only close the body in the failure case. in the // success case responders will close the body as required. defer resp.Response().Body.Close() - b, err := ioutil.ReadAll(resp.Response().Body) + b, err := io.ReadAll(resp.Response().Body) if err != nil { return err } diff --git a/cmd/zc_traverser_local.go b/cmd/zc_traverser_local.go index a5980a7b08..44c2d2e3cd 100755 --- a/cmd/zc_traverser_local.go +++ b/cmd/zc_traverser_local.go @@ -24,7 +24,6 @@ import ( "context" "errors" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -449,21 +448,22 @@ func (t *localTraverser) Traverse(preprocessor objectMorpher, processor objectPr // We don't transfer any directory properties here, not even the root. (Because the root's // properties won't be transferred, because the only way to do a non-recursive directory transfer // is with /* (aka stripTopDir). - files, err := ioutil.ReadDir(t.fullPath) + entries, err := os.ReadDir(t.fullPath) if err != nil { return err } // go through the files and return if any of them fail to process - for _, singleFile := range files { + for _, entry := range entries { // This won't change. It's purely to hand info off to STE about where the symlink lives. - relativePath := singleFile.Name() - if singleFile.Mode()&os.ModeSymlink != 0 { + relativePath := entry.Name() + fileInfo, _ := entry.Info() + if fileInfo.Mode()&os.ModeSymlink != 0 { if !t.followSymlinks { continue } else { // Because this only goes one layer deep, we can just append the filename to fullPath and resolve with it. - symlinkPath := common.GenerateFullPath(t.fullPath, singleFile.Name()) + symlinkPath := common.GenerateFullPath(t.fullPath, entry.Name()) // Evaluate the symlink result, err := UnfurlSymlinks(symlinkPath) @@ -479,7 +479,7 @@ func (t *localTraverser) Traverse(preprocessor objectMorpher, processor objectPr } // Replace the current FileInfo with - singleFile, err = common.OSStat(result) + fileInfo, err = common.OSStat(result) if err != nil { return err @@ -487,7 +487,7 @@ func (t *localTraverser) Traverse(preprocessor objectMorpher, processor objectPr } } - if singleFile.IsDir() { + if entry.IsDir() { continue // it doesn't make sense to transfer directory properties when not recurring } @@ -499,11 +499,11 @@ func (t *localTraverser) Traverse(preprocessor objectMorpher, processor objectPr err := processIfPassedFilters(filters, newStoredObject( preprocessor, - singleFile.Name(), + entry.Name(), strings.ReplaceAll(relativePath, common.DeterminePathSeparator(t.fullPath), common.AZCOPY_PATH_SEPARATOR_STRING), // Consolidate relative paths to the azcopy path separator for sync common.EEntityType.File(), // TODO: add code path for folders - singleFile.ModTime(), - singleFile.Size(), + fileInfo.ModTime(), + fileInfo.Size(), noContentProps, // Local MD5s are computed in the STE, and other props don't apply to local files noBlobProps, noMetdata, diff --git a/cmd/zt_generic_traverser_test.go b/cmd/zt_generic_traverser_test.go index dc7970f668..c148f22b31 100644 --- a/cmd/zt_generic_traverser_test.go +++ b/cmd/zt_generic_traverser_test.go @@ -23,7 +23,6 @@ package cmd import ( "context" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -351,7 +350,7 @@ func (s *genericTraverserSuite) TestWalkWithSymlinksDedupe(c *chk.C) { fileNames := []string{"stonks.txt", "jaws but its a baby shark.mp3", "my crow soft.txt"} tmpDir := scenarioHelper{}.generateLocalDirectory(c) defer os.RemoveAll(tmpDir) - symlinkTmpDir, err := ioutil.TempDir(tmpDir, "subdir") + symlinkTmpDir, err := os.MkdirTemp(tmpDir, "subdir") c.Assert(err, chk.IsNil) scenarioHelper{}.generateLocalFilesFromList(c, tmpDir, fileNames) @@ -416,7 +415,7 @@ func (s *genericTraverserSuite) TestWalkWithSymlinksToParentAndChild(c *chk.C) { root2 := scenarioHelper{}.generateLocalDirectory(c) defer os.RemoveAll(root2) - child, err := ioutil.TempDir(root2, "childdir") + child, err := os.MkdirTemp(root2, "childdir") c.Assert(err, chk.IsNil) scenarioHelper{}.generateLocalFilesFromList(c, root2, fileNames) diff --git a/cmd/zt_scenario_helpers_for_test.go b/cmd/zt_scenario_helpers_for_test.go index 3e66a96916..8d650b8d71 100644 --- a/cmd/zt_scenario_helpers_for_test.go +++ b/cmd/zt_scenario_helpers_for_test.go @@ -24,7 +24,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/url" "os" "path" @@ -64,7 +63,7 @@ var specialNames = []string{ // note: this is to emulate the list-of-files flag func (scenarioHelper) generateListOfFiles(c *chk.C, fileList []string) (path string) { - parentDirName, err := ioutil.TempDir("", "AzCopyLocalTest") + parentDirName, err := os.MkdirTemp("", "AzCopyLocalTest") c.Assert(err, chk.IsNil) // create the file @@ -74,13 +73,13 @@ func (scenarioHelper) generateListOfFiles(c *chk.C, fileList []string) (path str // pipe content into it content := strings.Join(fileList, "\n") - err = ioutil.WriteFile(path, []byte(content), common.DEFAULT_FILE_PERM) + err = os.WriteFile(path, []byte(content), common.DEFAULT_FILE_PERM) c.Assert(err, chk.IsNil) return } func (scenarioHelper) generateLocalDirectory(c *chk.C) (dstDirName string) { - dstDirName, err := ioutil.TempDir("", "AzCopyLocalTest") + dstDirName, err := os.MkdirTemp("", "AzCopyLocalTest") c.Assert(err, chk.IsNil) return } @@ -97,7 +96,7 @@ func (scenarioHelper) generateLocalFile(filePath string, fileSize int) ([]byte, } // write to file and return the data - err = ioutil.WriteFile(filePath, bigBuff, common.DEFAULT_FILE_PERM) + err = os.WriteFile(filePath, bigBuff, common.DEFAULT_FILE_PERM) return bigBuff, err } diff --git a/cmd/zt_sync_blob_local_test.go b/cmd/zt_sync_blob_local_test.go index 67b705bfa8..aacf391bda 100644 --- a/cmd/zt_sync_blob_local_test.go +++ b/cmd/zt_sync_blob_local_test.go @@ -23,7 +23,6 @@ package cmd import ( "bytes" "context" - "io/ioutil" "os" "path/filepath" "strings" @@ -211,7 +210,7 @@ func (s *cmdIntegrationSuite) TestSyncDownloadWithMismatchedDestination(c *chk.C validateDownloadTransfersAreScheduled(c, "", "", expectedOutput, mockedRPC) // make sure the extra files were deleted - currentDstFileList, err := ioutil.ReadDir(dstDirName) + currentDstFileList, err := os.ReadDir(dstDirName) extraFilesFound := false for _, file := range currentDstFileList { if strings.Contains(file.Name(), "extra") { diff --git a/cmd/zt_test.go b/cmd/zt_test.go index 1c317b49dc..f18a3b02a1 100644 --- a/cmd/zt_test.go +++ b/cmd/zt_test.go @@ -26,7 +26,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math/rand" "net/url" "os" @@ -776,7 +775,7 @@ func disableSoftDelete(c *chk.C, bsu azblob.ServiceURL) { func validateUpload(c *chk.C, blobURL azblob.BlockBlobURL) { resp, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) c.Assert(err, chk.IsNil) - data, _ := ioutil.ReadAll(resp.Response().Body) + data, _ := io.ReadAll(resp.Response().Body) c.Assert(data, chk.HasLen, 0) } diff --git a/common/credCache_windows.go b/common/credCache_windows.go index 69b4d87879..db55074bcc 100644 --- a/common/credCache_windows.go +++ b/common/credCache_windows.go @@ -23,7 +23,6 @@ package common import ( "errors" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -133,7 +132,7 @@ func (c *CredCache) removeCachedTokenInternal() error { // loadTokenInternal restores a Token object from file cache. func (c *CredCache) loadTokenInternal() (*OAuthTokenInfo, error) { tokenFilePath := c.tokenFilePath() - b, err := ioutil.ReadFile(tokenFilePath) + b, err := os.ReadFile(tokenFilePath) if err != nil { return nil, fmt.Errorf("failed to read token file %q during loading token: %v", tokenFilePath, err) } @@ -163,7 +162,7 @@ func (c *CredCache) saveTokenInternal(token OAuthTokenInfo) error { return fmt.Errorf("failed to create directory %q to store token in, %v", dir, err) } - newFile, err := ioutil.TempFile(dir, "token") + newFile, err := os.CreateTemp(dir, "token") if err != nil { return fmt.Errorf("failed to create the temp file to write the token, %v", err) } diff --git a/testSuite/cmd/create.go b/testSuite/cmd/create.go index df5d3f8363..73ac0bb7e0 100644 --- a/testSuite/cmd/create.go +++ b/testSuite/cmd/create.go @@ -11,7 +11,6 @@ import ( "time" "io" - "io/ioutil" "math/rand" "net/http" "strings" @@ -281,7 +280,7 @@ func createBlob(blobURL string, blobSize uint32, metadata azblob.Metadata, blobH os.Exit(1) } if putBlobResp.Response() != nil { - _, _ = io.Copy(ioutil.Discard, putBlobResp.Response().Body) + _, _ = io.Copy(io.Discard, putBlobResp.Response().Body) putBlobResp.Response().Body.Close() } } diff --git a/testSuite/cmd/testblob.go b/testSuite/cmd/testblob.go index 0026a60a21..d33bd4dfa2 100644 --- a/testSuite/cmd/testblob.go +++ b/testSuite/cmd/testblob.go @@ -5,7 +5,6 @@ import ( "crypto/md5" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -194,7 +193,7 @@ func verifyBlockBlobDirUpload(testBlobCmd TestBlobCommand) { } // read all bytes. - blobBytesDownloaded, err := ioutil.ReadAll(get.Body(azblob.RetryReaderOptions{})) + blobBytesDownloaded, err := io.ReadAll(get.Body(azblob.RetryReaderOptions{})) if err != nil { fmt.Printf("error reading the body of blob %s downloaded and failed with error %s\n", blobInfo.Name, err.Error()) os.Exit(1) @@ -333,7 +332,7 @@ func verifySinglePageBlobUpload(testBlobCmd TestBlobCommand) { } // Closing the blobProperties response body. if blobProperties.Response() != nil { - _, _ = io.Copy(ioutil.Discard, blobProperties.Response().Body) + _, _ = io.Copy(io.Discard, blobProperties.Response().Body) blobProperties.Response().Body.Close() } } @@ -344,7 +343,7 @@ func verifySinglePageBlobUpload(testBlobCmd TestBlobCommand) { os.Exit(1) } // reading all the bytes downloaded. - blobBytesDownloaded, err := ioutil.ReadAll(get.Body(azblob.RetryReaderOptions{})) + blobBytesDownloaded, err := io.ReadAll(get.Body(azblob.RetryReaderOptions{})) if get.Response().Body != nil { get.Response().Body.Close() } diff --git a/testSuite/cmd/testfile.go b/testSuite/cmd/testfile.go index abcacc5988..c7cf8c5d65 100644 --- a/testSuite/cmd/testfile.go +++ b/testSuite/cmd/testfile.go @@ -4,7 +4,6 @@ import ( "context" "crypto/md5" "fmt" - "io/ioutil" "net/http" "net/url" "os" @@ -156,7 +155,7 @@ func validateAzureDirWithLocalFile(curAzureDirURL azfile.DirectoryURL, baseAzure retryReader := get.Body(azfile.RetryReaderOptions{MaxRetryRequests: 3}) // read all bytes. - fileBytesDownloaded, err := ioutil.ReadAll(retryReader) + fileBytesDownloaded, err := io.ReadAll(retryReader) if err != nil { fmt.Printf("fail to read the body of file %s downloaded and failed with error %s\n", fileInfo.Name, err.Error()) os.Exit(1) @@ -277,7 +276,7 @@ func verifySingleFileUpload(testFileCmd TestFileCommand) { // reading all the bytes downloaded. retryReader := get.Body(azfile.RetryReaderOptions{MaxRetryRequests: 3}) defer retryReader.Close() - fileBytesDownloaded, err := ioutil.ReadAll(retryReader) + fileBytesDownloaded, err := io.ReadAll(retryReader) if err != nil { fmt.Println("error reading the byes from response and failed with error ", err.Error()) os.Exit(1) From c1ed1428e7d34d87a1b51f208272f675fcb083a6 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 4 Jan 2023 15:50:40 -0800 Subject: [PATCH 16/50] import io --- testSuite/cmd/testfile.go | 1 + 1 file changed, 1 insertion(+) diff --git a/testSuite/cmd/testfile.go b/testSuite/cmd/testfile.go index c7cf8c5d65..735f2b0055 100644 --- a/testSuite/cmd/testfile.go +++ b/testSuite/cmd/testfile.go @@ -4,6 +4,7 @@ import ( "context" "crypto/md5" "fmt" + "io" "net/http" "net/url" "os" From 0eeb95cb4a337588351cb049bfa429a1f6cd8ebd Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 4 Jan 2023 16:28:01 -0800 Subject: [PATCH 17/50] Allow nolint --- azbfs/zc_credential_anonymous.go | 5 ++ azbfs/zc_credential_shared_key.go | 30 ++++---- e2etest/declarativeHelpers.go | 2 +- e2etest/declarativeResourceManagers.go | 2 +- e2etest/declarativeScenario.go | 2 +- e2etest/factory.go | 6 +- e2etest/helpers.go | 96 +++++++++++++------------- e2etest/scenario_helpers.go | 64 ++++++++--------- 8 files changed, 103 insertions(+), 104 deletions(-) diff --git a/azbfs/zc_credential_anonymous.go b/azbfs/zc_credential_anonymous.go index 7e9a70197d..729ecbc320 100644 --- a/azbfs/zc_credential_anonymous.go +++ b/azbfs/zc_credential_anonymous.go @@ -12,6 +12,7 @@ type Credential interface { credentialMarker() } +//nolint:unused type credentialFunc pipeline.FactoryFunc func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { @@ -36,11 +37,15 @@ type anonymousCredentialPolicyFactory struct { } // New creates a credential policy object. +// +//nolint:unused func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { return &anonymousCredentialPolicy{next: next} } // credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +// +//nolint:unused func (*anonymousCredentialPolicyFactory) credentialMarker() {} // anonymousCredentialPolicy is the credential's policy object. diff --git a/azbfs/zc_credential_shared_key.go b/azbfs/zc_credential_shared_key.go index 27c0f4ede6..f41f7d171e 100644 --- a/azbfs/zc_credential_shared_key.go +++ b/azbfs/zc_credential_shared_key.go @@ -62,25 +62,19 @@ func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptio // credentialMarker is a package-internal method that exists just to satisfy the Credential interface. func (*SharedKeyCredential) credentialMarker() {} -// Constants ensuring that header names are correctly spelled and consistently cased. const ( - headerAuthorization = "Authorization" - headerCacheControl = "Cache-Control" - headerContentEncoding = "Content-Encoding" - headerContentDisposition = "Content-Disposition" - headerContentLanguage = "Content-Language" - headerContentLength = "Content-Length" - headerContentMD5 = "Content-MD5" - headerContentType = "Content-Type" - headerDate = "Date" - headerIfMatch = "If-Match" - headerIfModifiedSince = "If-Modified-Since" - headerIfNoneMatch = "If-None-Match" - headerIfUnmodifiedSince = "If-Unmodified-Since" - headerRange = "Range" - headerUserAgent = "User-Agent" - headerXmsDate = "x-ms-date" - headerXmsVersion = "x-ms-version" + headerAuthorization = "Authorization" + headerContentEncoding = "Content-Encoding" + headerContentLanguage = "Content-Language" + headerContentLength = "Content-Length" + headerContentMD5 = "Content-MD5" + headerContentType = "Content-Type" + headerIfMatch = "If-Match" + headerIfModifiedSince = "If-Modified-Since" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerRange = "Range" + headerXmsDate = "x-ms-date" ) // ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. diff --git a/e2etest/declarativeHelpers.go b/e2etest/declarativeHelpers.go index ea01fb8af6..169cb684c2 100644 --- a/e2etest/declarativeHelpers.go +++ b/e2etest/declarativeHelpers.go @@ -52,7 +52,7 @@ func equals() comparison { return comparison{true} } -// nolint +//nolint func notEquals() comparison { return comparison{false} } diff --git a/e2etest/declarativeResourceManagers.go b/e2etest/declarativeResourceManagers.go index 4b86ef6738..9710d84c7c 100644 --- a/e2etest/declarativeResourceManagers.go +++ b/e2etest/declarativeResourceManagers.go @@ -44,7 +44,7 @@ type downloadContentOptions struct { downloadFileContentOptions } -// nolint +//nolint type downloadBlobContentOptions struct { containerURL azblob.ContainerURL cpkInfo common.CpkInfo diff --git a/e2etest/declarativeScenario.go b/e2etest/declarativeScenario.go index 73aa4d66ac..387ef73e83 100644 --- a/e2etest/declarativeScenario.go +++ b/e2etest/declarativeScenario.go @@ -591,7 +591,7 @@ func (s *scenario) validateLastWriteTime(expected, actual *time.Time) { expected, actual)) } -// nolint +//nolint func (s *scenario) validateSMBAttrs(expected, actual *uint32) { if expected == nil { // These properties were not explicitly stated for verification diff --git a/e2etest/factory.go b/e2etest/factory.go index 5217efca0c..af28b0c43b 100644 --- a/e2etest/factory.go +++ b/e2etest/factory.go @@ -184,7 +184,7 @@ func (TestResourceFactory) CreateNewFileShareSnapshot(c asserter, fileShare azfi } func (TestResourceFactory) CreateLocalDirectory(c asserter) (dstDirName string) { - dstDirName, err := os.MkdirTemp("","AzCopyLocalTest") + dstDirName, err := os.MkdirTemp("", "AzCopyLocalTest") c.AssertNoErr(err) return } @@ -249,13 +249,13 @@ func getTestName(t *testing.T) (pseudoSuite, test string) { return pseudoSuite, removeUnderscores(testName) } -// nolint // This function generates an entity name by concatenating the passed prefix, // the name of the test requesting the entity name, and the minute, second, and nanoseconds of the call. // This should make it easy to associate the entities with their test, uniquely identify // them, and determine the order in which they were created. // Will truncate the end of the test name, if there is not enough room for it, followed by the time-based suffix, // with a non-zero maxLen. +//nolint func generateName(c asserter, prefix string, maxLen int) string { name := c.CompactScenarioName() // don't want to just use test name here, because each test contains multiple scenarios with the declarative runner @@ -280,7 +280,7 @@ func (TestResourceNameGenerator) GenerateContainerName(c asserter) string { return uuid.New().String() } -// nolint +//nolint func (TestResourceNameGenerator) generateBlobName(c asserter) string { return generateName(c, blobPrefix, 0) } diff --git a/e2etest/helpers.go b/e2etest/helpers.go index 44563e4c1a..20cefee30d 100644 --- a/e2etest/helpers.go +++ b/e2etest/helpers.go @@ -65,54 +65,54 @@ const ( ) // if S3_TESTS_OFF is set at all, S3 tests are disabled. -// nolint +//nolint func isS3Disabled() bool { return strings.ToLower(os.Getenv("S3_TESTS_OFF")) != "" } -// nolint +//nolint func skipIfS3Disabled(c asserter) { if isS3Disabled() { c.Skip("S3 testing is disabled for this unit test suite run.") } } -// nolint +//nolint func generateContainerName(c asserter) string { return generateName(c, containerPrefix, 63) } -// nolint +//nolint func generateBlobName(c asserter) string { return generateName(c, blobPrefix, 0) } -// nolint +//nolint func generateBucketName(c asserter) string { return generateName(c, bucketPrefix, 63) } -// nolint +//nolint func generateBucketNameWithCustomizedPrefix(c asserter, customizedPrefix string) string { return generateName(c, customizedPrefix, 63) } -// nolint +//nolint func generateObjectName(c asserter) string { return generateName(c, objectPrefix, 0) } -// nolint +//nolint func generateShareName(c asserter) string { return generateName(c, sharePrefix, 63) } -// nolint +//nolint func generateFilesystemName(c asserter) string { return generateName(c, blobfsPrefix, 63) } -// nolint +//nolint func getShareURL(c asserter, fsu azfile.ServiceURL) (share azfile.ShareURL, name string) { name = generateShareName(c) share = fsu.NewShareURL(name) @@ -120,17 +120,17 @@ func getShareURL(c asserter, fsu azfile.ServiceURL) (share azfile.ShareURL, name return share, name } -// nolint +//nolint func generateAzureFileName(c asserter) string { return generateName(c, azureFilePrefix, 0) } -// nolint +//nolint func generateBfsFileName(c asserter) string { return generateName(c, blobfsPrefix, 0) } -// nolint +//nolint func getContainerURL(c asserter, bsu azblob.ServiceURL) (container azblob.ContainerURL, name string) { name = generateContainerName(c) container = bsu.NewContainerURL(name) @@ -138,7 +138,7 @@ func getContainerURL(c asserter, bsu azblob.ServiceURL) (container azblob.Contai return container, name } -// nolint +//nolint func getFilesystemURL(c asserter, bfssu azbfs.ServiceURL) (filesystem azbfs.FileSystemURL, name string) { name = generateFilesystemName(c) filesystem = bfssu.NewFileSystemURL(name) @@ -146,7 +146,7 @@ func getFilesystemURL(c asserter, bfssu azbfs.ServiceURL) (filesystem azbfs.File return } -// nolint +//nolint func getBlockBlobURL(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.BlockBlobURL, name string) { name = prefix + generateBlobName(c) blob = container.NewBlockBlobURL(name) @@ -154,7 +154,7 @@ func getBlockBlobURL(c asserter, container azblob.ContainerURL, prefix string) ( return blob, name } -// nolint +//nolint func getBfsFileURL(c asserter, filesystemURL azbfs.FileSystemURL, prefix string) (file azbfs.FileURL, name string) { name = prefix + generateBfsFileName(c) file = filesystemURL.NewRootDirectoryURL().NewFileURL(name) @@ -162,7 +162,7 @@ func getBfsFileURL(c asserter, filesystemURL azbfs.FileSystemURL, prefix string) return } -// nolint +//nolint func getAppendBlobURL(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.AppendBlobURL, name string) { name = generateBlobName(c) blob = container.NewAppendBlobURL(prefix + name) @@ -170,7 +170,7 @@ func getAppendBlobURL(c asserter, container azblob.ContainerURL, prefix string) return blob, name } -// nolint +//nolint func getPageBlobURL(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.PageBlobURL, name string) { name = generateBlobName(c) blob = container.NewPageBlobURL(prefix + name) @@ -178,7 +178,7 @@ func getPageBlobURL(c asserter, container azblob.ContainerURL, prefix string) (b return } -// nolint +//nolint func getAzureFileURL(c asserter, shareURL azfile.ShareURL, prefix string) (fileURL azfile.FileURL, name string) { name = prefix + generateAzureFileName(c) fileURL = shareURL.NewRootDirectoryURL().NewFileURL(name) @@ -186,7 +186,7 @@ func getAzureFileURL(c asserter, shareURL azfile.ShareURL, prefix string) (fileU return } -// nolint +//nolint func getReaderToRandomBytes(n int) *bytes.Reader { r, _ := getRandomDataAndReader(n) return r @@ -200,7 +200,7 @@ func getRandomDataAndReader(n int) (*bytes.Reader, []byte) { return bytes.NewReader(data), data } -// nolint +//nolint func createNewContainer(c asserter, bsu azblob.ServiceURL) (container azblob.ContainerURL, name string) { container, name = getContainerURL(c, bsu) @@ -210,7 +210,7 @@ func createNewContainer(c asserter, bsu azblob.ServiceURL) (container azblob.Con return container, name } -// nolint +//nolint func createNewFilesystem(c asserter, bfssu azbfs.ServiceURL) (filesystem azbfs.FileSystemURL, name string) { filesystem, name = getFilesystemURL(c, bfssu) @@ -220,7 +220,7 @@ func createNewFilesystem(c asserter, bfssu azbfs.ServiceURL) (filesystem azbfs.F return } -// nolint +//nolint func createNewBfsFile(c asserter, filesystem azbfs.FileSystemURL, prefix string) (file azbfs.FileURL, name string) { file, name = getBfsFileURL(c, filesystem, prefix) @@ -239,7 +239,7 @@ func createNewBfsFile(c asserter, filesystem azbfs.FileSystemURL, prefix string) return } -// nolint +//nolint func createNewBlockBlob(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.BlockBlobURL, name string) { blob, name = getBlockBlobURL(c, container, prefix) @@ -252,7 +252,7 @@ func createNewBlockBlob(c asserter, container azblob.ContainerURL, prefix string return } -// nolint +//nolint func createNewAzureShare(c asserter, fsu azfile.ServiceURL) (share azfile.ShareURL, name string) { share, name = getShareURL(c, fsu) @@ -262,7 +262,7 @@ func createNewAzureShare(c asserter, fsu azfile.ServiceURL) (share azfile.ShareU return share, name } -// nolint +//nolint func createNewAzureFile(c asserter, share azfile.ShareURL, prefix string) (file azfile.FileURL, name string) { file, name = getAzureFileURL(c, share, prefix) @@ -287,7 +287,7 @@ func generateParentsForAzureFile(c asserter, fileURL azfile.FileURL) { c.AssertNoErr(err) } -// nolint +//nolint func createNewAppendBlob(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.AppendBlobURL, name string) { blob, name = getAppendBlobURL(c, container, prefix) @@ -298,7 +298,7 @@ func createNewAppendBlob(c asserter, container azblob.ContainerURL, prefix strin return } -// nolint +//nolint func createNewPageBlob(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.PageBlobURL, name string) { blob, name = getPageBlobURL(c, container, prefix) @@ -309,26 +309,26 @@ func createNewPageBlob(c asserter, container azblob.ContainerURL, prefix string) return } -// nolint +//nolint func deleteContainer(c asserter, container azblob.ContainerURL) { resp, err := container.Delete(ctx, azblob.ContainerAccessConditions{}) c.AssertNoErr(err) c.Assert(resp.StatusCode(), equals(), 202) } -// nolint +//nolint func deleteFilesystem(c asserter, filesystem azbfs.FileSystemURL) { resp, err := filesystem.Delete(ctx) c.AssertNoErr(err) c.Assert(resp.StatusCode(), equals(), 202) } -// nolint +//nolint type createS3ResOptions struct { Location string } -// nolint +//nolint func createS3ClientWithMinio(c asserter, o createS3ResOptions) (*minio.Client, error) { skipIfS3Disabled(c) @@ -347,7 +347,7 @@ func createS3ClientWithMinio(c asserter, o createS3ResOptions) (*minio.Client, e return s3Client, nil } -// nolint +//nolint func createNewBucket(c asserter, client *minio.Client, o createS3ResOptions) string { bucketName := generateBucketName(c) err := client.MakeBucket(bucketName, o.Location) @@ -356,13 +356,13 @@ func createNewBucket(c asserter, client *minio.Client, o createS3ResOptions) str return bucketName } -// nolint +//nolint func createNewBucketWithName(c asserter, client *minio.Client, bucketName string, o createS3ResOptions) { err := client.MakeBucket(bucketName, o.Location) c.AssertNoErr(err) } -// nolint +//nolint func createNewObject(c asserter, client *minio.Client, bucketName string, prefix string) (objectKey string) { objectKey = prefix + generateObjectName(c) @@ -375,7 +375,7 @@ func createNewObject(c asserter, client *minio.Client, bucketName string, prefix return } -// nolint +//nolint func deleteBucket(_ asserter, client *minio.Client, bucketName string, waitQuarterMinute bool) { // If we error out in this function, simply just skip over deleting the bucket. // Some of our buckets have become "ghost" buckets in the past. @@ -420,7 +420,7 @@ func deleteBucket(_ asserter, client *minio.Client, bucketName string, waitQuart } } -// nolint +//nolint func cleanS3Account(c asserter, client *minio.Client) { buckets, err := client.ListBuckets() if err != nil { @@ -437,7 +437,7 @@ func cleanS3Account(c asserter, client *minio.Client) { time.Sleep(time.Minute) } -// nolint +//nolint func cleanBlobAccount(c asserter, serviceURL azblob.ServiceURL) { marker := azblob.Marker{} for marker.NotDone() { @@ -453,7 +453,7 @@ func cleanBlobAccount(c asserter, serviceURL azblob.ServiceURL) { } } -// nolint +//nolint func cleanFileAccount(c asserter, serviceURL azfile.ServiceURL) { marker := azfile.Marker{} for marker.NotDone() { @@ -471,7 +471,7 @@ func cleanFileAccount(c asserter, serviceURL azfile.ServiceURL) { time.Sleep(time.Minute) } -// nolint +//nolint func getGenericCredentialForFile(accountType string) (*azfile.SharedKeyCredential, error) { accountNameEnvVar := accountType + "ACCOUNT_NAME" accountKeyEnvVar := accountType + "ACCOUNT_KEY" @@ -492,7 +492,7 @@ func deleteShare(c asserter, share azfile.ShareURL) { // those changes not being reflected yet, we will wait 30 seconds and try the test again. If it fails this time for any reason, // we fail the test. It is the responsibility of the the testImplFunc to determine which error string indicates the test should be retried. // There can only be one such string. All errors that cannot be due to this detail should be asserted and not returned as an error string. -// nolint +//nolint func runTestRequiringServiceProperties(c asserter, bsu azblob.ServiceURL, code string, enableServicePropertyFunc func(asserter, azblob.ServiceURL), testImplFunc func(asserter, azblob.ServiceURL) error, @@ -508,7 +508,7 @@ func runTestRequiringServiceProperties(c asserter, bsu azblob.ServiceURL, code s } } -// nolint +//nolint func getContainerURLWithSAS(c asserter, credential azblob.SharedKeyCredential, containerName string) azblob.ContainerURL { sasQueryParams, err := azblob.BlobSASSignatureValues{ Protocol: azblob.SASProtocolHTTPS, @@ -531,7 +531,7 @@ func getContainerURLWithSAS(c asserter, credential azblob.SharedKeyCredential, c return azblob.NewContainerURL(*fullURL, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})) } -// nolint +//nolint func getBlobServiceURLWithSAS(c asserter, credential azblob.SharedKeyCredential) azblob.ServiceURL { sasQueryParams, err := azblob.AccountSASSignatureValues{ Protocol: azblob.SASProtocolHTTPS, @@ -554,7 +554,7 @@ func getBlobServiceURLWithSAS(c asserter, credential azblob.SharedKeyCredential) return azblob.NewServiceURL(*fullURL, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})) } -// nolint +//nolint func getFileServiceURLWithSAS(c asserter, credential azfile.SharedKeyCredential) azfile.ServiceURL { sasQueryParams, err := azfile.AccountSASSignatureValues{ Protocol: azfile.SASProtocolHTTPS, @@ -574,7 +574,7 @@ func getFileServiceURLWithSAS(c asserter, credential azfile.SharedKeyCredential) return azfile.NewServiceURL(*fullURL, azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{})) } -// nolint +//nolint func getShareURLWithSAS(c asserter, credential azfile.SharedKeyCredential, shareName string) azfile.ShareURL { sasQueryParams, err := azfile.FileSASSignatureValues{ Protocol: azfile.SASProtocolHTTPS, @@ -597,7 +597,7 @@ func getShareURLWithSAS(c asserter, credential azfile.SharedKeyCredential, share return azfile.NewShareURL(*fullURL, azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{})) } -// nolint +//nolint func getAdlsServiceURLWithSAS(c asserter, credential azbfs.SharedKeyCredential) azbfs.ServiceURL { sasQueryParams, err := azbfs.AccountSASSignatureValues{ Protocol: azbfs.SASProtocolHTTPS, @@ -621,13 +621,13 @@ func getAdlsServiceURLWithSAS(c asserter, credential azbfs.SharedKeyCredential) } // check.v1 style "StringContains" checker -// nolint +//nolint type stringContainsChecker struct { *chk.CheckerInfo } // Check -// nolint +//nolint func (checker *stringContainsChecker) Check(params []interface{}, _ []string) (result bool, error string) { if len(params) < 2 { return false, "StringContains requires two parameters" diff --git a/e2etest/scenario_helpers.go b/e2etest/scenario_helpers.go index 25402884e1..7a3c4a5efa 100644 --- a/e2etest/scenario_helpers.go +++ b/e2etest/scenario_helpers.go @@ -49,7 +49,7 @@ const defaultStringFileSize = "1k" type scenarioHelper struct{} -// nolint +//nolint var specialNames = []string{ "打麻将.txt", "wow such space so much space", @@ -65,7 +65,7 @@ var specialNames = []string{ } // note: this is to emulate the list-of-files flag -// nolint +//nolint func (scenarioHelper) generateListOfFiles(c asserter, fileList []string) (path string) { parentDirName, err := os.MkdirTemp("", "AzCopyLocalTest") c.AssertNoErr(err) @@ -82,7 +82,7 @@ func (scenarioHelper) generateListOfFiles(c asserter, fileList []string) (path s return } -// nolint +//nolint func (scenarioHelper) generateLocalDirectory(c asserter) (dstDirName string) { dstDirName, err := os.MkdirTemp("", "AzCopyLocalTest") c.AssertNoErr(err) @@ -198,7 +198,7 @@ func (s scenarioHelper) enumerateLocalProperties(a asserter, dirpath string) map return result } -// nolint +//nolint func (s scenarioHelper) generateCommonRemoteScenarioForLocal(c asserter, dirPath string, prefix string) (fileList []string) { fileList = make([]string, 50) for i := 0; i < 10; i++ { @@ -222,7 +222,7 @@ func (s scenarioHelper) generateCommonRemoteScenarioForLocal(c asserter, dirPath return } -// nolint +//nolint func (scenarioHelper) generateCommonRemoteScenarioForBlob(c asserter, containerURL azblob.ContainerURL, prefix string) (blobList []string) { // make 50 blobs with random names // 10 of them at the top level @@ -251,7 +251,7 @@ func (scenarioHelper) generateCommonRemoteScenarioForBlob(c asserter, containerU return } -// nolint +//nolint func (scenarioHelper) generateCommonRemoteScenarioForBlobFS(c asserter, filesystemURL azbfs.FileSystemURL, prefix string) (pathList []string) { pathList = make([]string, 50) @@ -274,7 +274,7 @@ func (scenarioHelper) generateCommonRemoteScenarioForBlobFS(c asserter, filesyst return } -// nolint +//nolint func (scenarioHelper) generateCommonRemoteScenarioForAzureFile(c asserter, shareURL azfile.ShareURL, prefix string) (fileList []string) { fileList = make([]string, 50) @@ -297,7 +297,7 @@ func (scenarioHelper) generateCommonRemoteScenarioForAzureFile(c asserter, share return } -// nolint +//nolint func (s scenarioHelper) generateBlobContainersAndBlobsFromLists(c asserter, serviceURL azblob.ServiceURL, containerList []string, blobList []*testObject) { for _, containerName := range containerList { curl := serviceURL.NewContainerURL(containerName) @@ -313,7 +313,7 @@ func (s scenarioHelper) generateBlobContainersAndBlobsFromLists(c asserter, serv } } -// nolint +//nolint func (s scenarioHelper) generateFileSharesAndFilesFromLists(c asserter, serviceURL azfile.ServiceURL, shareList []string, fileList []*testObject) { for _, shareName := range shareList { sURL := serviceURL.NewShareURL(shareName) @@ -328,7 +328,7 @@ func (s scenarioHelper) generateFileSharesAndFilesFromLists(c asserter, serviceU } } -// nolint +//nolint func (s scenarioHelper) generateFilesystemsAndFilesFromLists(c asserter, serviceURL azbfs.ServiceURL, fsList []string, fileList []string, data string) { for _, filesystemName := range fsList { fsURL := serviceURL.NewFileSystemURL(filesystemName) @@ -339,7 +339,7 @@ func (s scenarioHelper) generateFilesystemsAndFilesFromLists(c asserter, service } } -// nolint +//nolint func (s scenarioHelper) generateS3BucketsAndObjectsFromLists(c asserter, s3Client *minio.Client, bucketList []string, objectList []string, data string) { for _, bucketName := range bucketList { err := s3Client.MakeBucket(bucketName, "") @@ -534,7 +534,7 @@ func (s scenarioHelper) downloadBlobContent(a asserter, options downloadContentO return destData[:] } -// nolint +//nolint func (scenarioHelper) generatePageBlobsFromList(c asserter, containerURL azblob.ContainerURL, blobList []string, data string) { for _, blobName := range blobList { // Create the blob (PUT blob) @@ -571,7 +571,7 @@ func (scenarioHelper) generatePageBlobsFromList(c asserter, containerURL azblob. time.Sleep(time.Millisecond * 1050) } -// nolint +//nolint func (scenarioHelper) generateAppendBlobsFromList(c asserter, containerURL azblob.ContainerURL, blobList []string, data string) { for _, blobName := range blobList { // Create the blob (PUT blob) @@ -602,7 +602,7 @@ func (scenarioHelper) generateAppendBlobsFromList(c asserter, containerURL azblo time.Sleep(time.Millisecond * 1050) } -// nolint +//nolint func (scenarioHelper) generateBlockBlobWithAccessTier(c asserter, containerURL azblob.ContainerURL, blobName string, accessTier azblob.AccessTierType) { blob := containerURL.NewBlockBlobURL(blobName) cResp, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), azblob.BlobHTTPHeaders{}, @@ -612,7 +612,7 @@ func (scenarioHelper) generateBlockBlobWithAccessTier(c asserter, containerURL a } // create the demanded objects -// nolint +//nolint func (scenarioHelper) generateObjects(c asserter, client *minio.Client, bucketName string, objectList []string) { size := int64(len(objectDefaultData)) for _, objectName := range objectList { @@ -623,7 +623,7 @@ func (scenarioHelper) generateObjects(c asserter, client *minio.Client, bucketNa } // create the demanded files -// nolint +//nolint func (scenarioHelper) generateFlatFiles(c asserter, shareURL azfile.ShareURL, fileList []string) { for _, fileName := range fileList { file := shareURL.NewRootDirectoryURL().NewFileURL(fileName) @@ -635,7 +635,7 @@ func (scenarioHelper) generateFlatFiles(c asserter, shareURL azfile.ShareURL, fi time.Sleep(time.Millisecond * 1050) } -// nolint +//nolint func (scenarioHelper) generateCommonRemoteScenarioForS3(c asserter, client *minio.Client, bucketName string, prefix string, returnObjectListWithBucketName bool) (objectList []string) { // make 50 objects with random names // 10 of them at the top level @@ -916,7 +916,7 @@ func (s scenarioHelper) downloadFileContent(a asserter, options downloadContentO return destData } -// nolint +//nolint func (scenarioHelper) generateBFSPathsFromList(c asserter, filesystemURL azbfs.FileSystemURL, fileList []string) { for _, bfsPath := range fileList { file := filesystemURL.NewRootDirectoryURL().NewFileURL(bfsPath) @@ -948,7 +948,7 @@ func (scenarioHelper) convertListToMap(list []*testObject, converter func(*testO return lookupMap } -// nolint +//nolint func (scenarioHelper) shaveOffPrefix(list []string, prefix string) []string { cleanList := make([]string, len(list)) for i, item := range list { @@ -957,7 +957,7 @@ func (scenarioHelper) shaveOffPrefix(list []string, prefix string) []string { return cleanList } -// nolint +//nolint func (scenarioHelper) addPrefix(list []string, prefix string) []string { modifiedList := make([]string, len(list)) for i, item := range list { @@ -966,7 +966,7 @@ func (scenarioHelper) addPrefix(list []string, prefix string) []string { return modifiedList } -// nolint +//nolint func (scenarioHelper) getRawContainerURLWithSAS(c asserter, containerName string) url.URL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) @@ -975,7 +975,7 @@ func (scenarioHelper) getRawContainerURLWithSAS(c asserter, containerName string return containerURLWithSAS.URL() } -// nolint +//nolint func (scenarioHelper) getRawBlobURLWithSAS(c asserter, containerName string, blobName string) url.URL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) @@ -985,7 +985,7 @@ func (scenarioHelper) getRawBlobURLWithSAS(c asserter, containerName string, blo return blobURLWithSAS.URL() } -// nolint +//nolint func (scenarioHelper) getRawBlobServiceURLWithSAS(c asserter) url.URL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) @@ -994,7 +994,7 @@ func (scenarioHelper) getRawBlobServiceURLWithSAS(c asserter) url.URL { return getBlobServiceURLWithSAS(c, *credential).URL() } -// nolint +//nolint func (scenarioHelper) getRawFileServiceURLWithSAS(c asserter) url.URL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azfile.NewSharedKeyCredential(accountName, accountKey) @@ -1003,7 +1003,7 @@ func (scenarioHelper) getRawFileServiceURLWithSAS(c asserter) url.URL { return getFileServiceURLWithSAS(c, *credential).URL() } -// nolint +//nolint func (scenarioHelper) getRawAdlsServiceURLWithSAS(c asserter) azbfs.ServiceURL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential := azbfs.NewSharedKeyCredential(accountName, accountKey) @@ -1011,7 +1011,7 @@ func (scenarioHelper) getRawAdlsServiceURLWithSAS(c asserter) azbfs.ServiceURL { return getAdlsServiceURLWithSAS(c, *credential) } -// nolint +//nolint func (scenarioHelper) getBlobServiceURL(c asserter) azblob.ServiceURL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) @@ -1025,7 +1025,7 @@ func (scenarioHelper) getBlobServiceURL(c asserter) azblob.ServiceURL { return azblob.NewServiceURL(*fullURL, azblob.NewPipeline(credential, azblob.PipelineOptions{})) } -// nolint +//nolint func (s scenarioHelper) getContainerURL(c asserter, containerName string) azblob.ContainerURL { serviceURL := s.getBlobServiceURL(c) containerURL := serviceURL.NewContainerURL(containerName) @@ -1033,7 +1033,7 @@ func (s scenarioHelper) getContainerURL(c asserter, containerName string) azblob return containerURL } -// nolint +//nolint func (scenarioHelper) getRawS3AccountURL(c asserter, region string) url.URL { rawURL := fmt.Sprintf("https://s3%s.amazonaws.com", common.IffString(region == "", "", "-"+region)) @@ -1044,7 +1044,7 @@ func (scenarioHelper) getRawS3AccountURL(c asserter, region string) url.URL { } // TODO: Possibly add virtual-hosted-style and dual stack support. Currently use path style for testing. -// nolint +//nolint func (scenarioHelper) getRawS3BucketURL(c asserter, region string, bucketName string) url.URL { rawURL := fmt.Sprintf("https://s3%s.amazonaws.com/%s", common.IffString(region == "", "", "-"+region), bucketName) @@ -1054,7 +1054,7 @@ func (scenarioHelper) getRawS3BucketURL(c asserter, region string, bucketName st return *fullURL } -// nolint +//nolint func (scenarioHelper) getRawS3ObjectURL(c asserter, region string, bucketName string, objectName string) url.URL { rawURL := fmt.Sprintf("https://s3%s.amazonaws.com/%s/%s", common.IffString(region == "", "", "-"+region), bucketName, objectName) @@ -1064,7 +1064,7 @@ func (scenarioHelper) getRawS3ObjectURL(c asserter, region string, bucketName st return *fullURL } -// nolint +//nolint func (scenarioHelper) getRawFileURLWithSAS(c asserter, shareName string, fileName string) url.URL { credential, err := getGenericCredentialForFile("") c.AssertNoErr(err) @@ -1073,7 +1073,7 @@ func (scenarioHelper) getRawFileURLWithSAS(c asserter, shareName string, fileNam return fileURLWithSAS.URL() } -// nolint +//nolint func (scenarioHelper) getRawShareURLWithSAS(c asserter, shareName string) url.URL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azfile.NewSharedKeyCredential(accountName, accountKey) From 42434b7a0fe393063eb51f7c86e1a6f5924af333 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 4 Jan 2023 16:51:46 -0800 Subject: [PATCH 18/50] Marked or removed unused code --- azbfs/zc_credential_anonymous.go | 2 ++ azbfs/zc_mmf_unix.go | 4 ++++ azbfs/zc_sas_query_params.go | 4 ++-- azbfs/zc_uuid.go | 2 ++ common/credCacheInternal_linux.go | 1 - common/logger.go | 1 + common/singleChunkReader.go | 12 ------------ sddl/sddlHelper_linux.go | 2 ++ ste/downloader-blob.go | 5 ++--- ste/sender.go | 2 -- 10 files changed, 15 insertions(+), 20 deletions(-) diff --git a/azbfs/zc_credential_anonymous.go b/azbfs/zc_credential_anonymous.go index 729ecbc320..256c1ce5fa 100644 --- a/azbfs/zc_credential_anonymous.go +++ b/azbfs/zc_credential_anonymous.go @@ -15,11 +15,13 @@ type Credential interface { //nolint:unused type credentialFunc pipeline.FactoryFunc +//nolint:unused func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { return f(next, po) } // credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +//nolint:unused func (credentialFunc) credentialMarker() {} ////////////////////////////// diff --git a/azbfs/zc_mmf_unix.go b/azbfs/zc_mmf_unix.go index 9c6df82895..de835d4d13 100644 --- a/azbfs/zc_mmf_unix.go +++ b/azbfs/zc_mmf_unix.go @@ -1,3 +1,4 @@ +//go:build linux || darwin // +build linux darwin package azbfs @@ -7,8 +8,10 @@ import ( "syscall" ) +//nolint:unused type mmf []byte +//nolint:unused func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { prot, flags := syscall.PROT_READ, syscall.MAP_SHARED // Assume read-only if writable { @@ -18,6 +21,7 @@ func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) return mmf(addr), err } +//nolint:unused func (m *mmf) unmap() { err := syscall.Munmap(*m) *m = nil diff --git a/azbfs/zc_sas_query_params.go b/azbfs/zc_sas_query_params.go index 57ef32d328..132a3099ad 100644 --- a/azbfs/zc_sas_query_params.go +++ b/azbfs/zc_sas_query_params.go @@ -200,7 +200,7 @@ func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool case "st": p.startTime, p.stTimeFormat, _ = parseSASTimeString(val) case "se": - p.expiryTime, p.stTimeFormat, _ = parseSASTimeString(val) + p.expiryTime, p.seTimeFormat, _ = parseSASTimeString(val) case "sip": dashIndex := strings.Index(val, "-") if dashIndex == -1 { @@ -255,7 +255,7 @@ func (p *SASQueryParameters) addToValues(v url.Values) url.Values { v.Add("st", formatSASTime(&p.startTime, p.stTimeFormat)) } if !p.expiryTime.IsZero() { - v.Add("se", formatSASTime(&p.expiryTime, p.stTimeFormat)) + v.Add("se", formatSASTime(&p.expiryTime, p.seTimeFormat)) } if len(p.ipRange.Start) > 0 { v.Add("sip", p.ipRange.String()) diff --git a/azbfs/zc_uuid.go b/azbfs/zc_uuid.go index 0f90c9fd1a..be2c279309 100644 --- a/azbfs/zc_uuid.go +++ b/azbfs/zc_uuid.go @@ -39,6 +39,7 @@ func (u uuid) String() string { // ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f" // or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID. +//nolint:unused func parseUUID(uuidStr string) uuid { char := func(hexString string) byte { i, _ := strconv.ParseUint(hexString, 16, 8) @@ -75,6 +76,7 @@ func parseUUID(uuidStr string) uuid { return uuidVal } +//nolint:unused func (u uuid) bytes() []byte { return u[:] } diff --git a/common/credCacheInternal_linux.go b/common/credCacheInternal_linux.go index 8d6a4c4d3b..8c358f3ca4 100644 --- a/common/credCacheInternal_linux.go +++ b/common/credCacheInternal_linux.go @@ -29,7 +29,6 @@ import ( // CredCacheInternalIntegration manages credential caches with Gnome keyring. // Note: This should be only used for internal integration. type CredCacheInternalIntegration struct { - state string accountName string serviceName string keyName string diff --git a/common/logger.go b/common/logger.go index 56e708e6b8..6a5a696b38 100644 --- a/common/logger.go +++ b/common/logger.go @@ -52,6 +52,7 @@ type ILoggerResetable interface { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// TODO : Can this be deleted? func NewAppLogger(minimumLevelToLog pipeline.LogLevel, logFileFolder string) ILoggerCloser { // TODO: Put start date time in file Name // TODO: log life time management. diff --git a/common/singleChunkReader.go b/common/singleChunkReader.go index eddd5e01f0..4586368ea9 100644 --- a/common/singleChunkReader.go +++ b/common/singleChunkReader.go @@ -25,7 +25,6 @@ import ( "errors" "hash" "io" - "runtime" "sync" "github.com/Azure/azure-pipeline-go/pipeline" @@ -473,14 +472,3 @@ func (cr *singleChunkReader) WriteBufferTo(h hash.Hash) { panic("documentation of hash.Hash.Write says it will never return an error") } } - -func stack() []byte { - buf := make([]byte, 2048) - for { - n := runtime.Stack(buf, false) - if n < len(buf) { - return buf[:n] - } - buf = make([]byte, 2*len(buf)) - } -} diff --git a/sddl/sddlHelper_linux.go b/sddl/sddlHelper_linux.go index 874550d7fa..9d82ed355e 100644 --- a/sddl/sddlHelper_linux.go +++ b/sddl/sddlHelper_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux // Copyright Microsoft @@ -872,6 +873,7 @@ func aceRightsToString(aceRights uint32) string { // Does the aceType correspond to an object ACE? // We don't support object ACEs. +//nolint:unused func isObjectAce(aceType byte) bool { switch aceType { case ACCESS_ALLOWED_OBJECT_ACE_TYPE, diff --git a/ste/downloader-blob.go b/ste/downloader-blob.go index 335442b36e..a0475d3d76 100644 --- a/ste/downloader-blob.go +++ b/ste/downloader-blob.go @@ -41,9 +41,8 @@ type blobDownloader struct { // used to avoid re-setting file mode setMode bool - jptm IJobPartTransferMgr - txInfo TransferInfo - fileMode uint32 + jptm IJobPartTransferMgr + txInfo TransferInfo } func newBlobDownloader() downloader { diff --git a/ste/sender.go b/ste/sender.go index 446ecdc1d5..ba5ade1c2e 100644 --- a/ste/sender.go +++ b/ste/sender.go @@ -105,8 +105,6 @@ type s2sCopier interface { GenerateCopyFunc(chunkID common.ChunkID, blockIndex int32, adjustedChunkSize int64, chunkIsWholeFile bool) chunkFunc } -type s2sCopierFactory func(jptm IJobPartTransferMgr, srcInfoProvider IRemoteSourceInfoProvider, destination string, p pipeline.Pipeline, pacer pacer) (s2sCopier, error) - ///////////////////////////////////////////////////////////////////////////////////////////////// // Abstraction of the methods needed to upload one file to a remote location ///////////////////////////////////////////////////////////////////////////////////////////////// From 58d1bb575cd6ccaa524f288066c65abc05855f98 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Thu, 5 Jan 2023 10:26:52 -0800 Subject: [PATCH 19/50] Fixed some unused errors --- cmd/copyUtil.go | 25 ------------------ cmd/credentialUtil.go | 1 + cmd/login.go | 5 ---- cmd/rpc.go | 57 +---------------------------------------- common/logger.go | 4 +-- jobsAdmin/JobsAdmin.go | 20 +++++---------- ste/concurrencyTuner.go | 2 -- ste/downloader-blob.go | 3 --- ste/mgr-JobMgr.go | 3 --- 9 files changed, 10 insertions(+), 110 deletions(-) diff --git a/cmd/copyUtil.go b/cmd/copyUtil.go index f7dc3bdfaf..825f81844d 100644 --- a/cmd/copyUtil.go +++ b/cmd/copyUtil.go @@ -159,31 +159,6 @@ func (util copyHandlerUtil) urlIsAzureFileDirectory(ctx context.Context, url *ur return true } -// append a file name to the container path to generate a blob path -func (copyHandlerUtil) generateObjectPath(destinationPath, fileName string) string { - if strings.LastIndex(destinationPath, "/") == len(destinationPath)-1 { - return fmt.Sprintf("%s%s", destinationPath, fileName) - } - return fmt.Sprintf("%s/%s", destinationPath, fileName) -} - -func (util copyHandlerUtil) getBlobNameFromURL(path string) string { - // return everything after the second / - return strings.SplitAfterN(path[1:], common.AZCOPY_PATH_SEPARATOR_STRING, 2)[1] -} - -func (util copyHandlerUtil) firstIndexOfWildCard(name string) int { - return strings.Index(name, wildCard) -} -func (util copyHandlerUtil) getContainerURLFromString(url url.URL) url.URL { - blobParts := azblob.NewBlobURLParts(url) - blobParts.BlobName = "" - return blobParts.URL() - //containerName := strings.SplitAfterN(url.Path[1:], "/", 2)[0] - //url.Path = "/" + containerName - //return url -} - func (util copyHandlerUtil) getContainerUrl(blobParts azblob.BlobURLParts) url.URL { blobParts.BlobName = "" return blobParts.URL() diff --git a/cmd/credentialUtil.go b/cmd/credentialUtil.go index 54829fcfef..8a910ed48f 100644 --- a/cmd/credentialUtil.go +++ b/cmd/credentialUtil.go @@ -360,6 +360,7 @@ func getAzureFileCredentialType() (common.CredentialType, error) { // envVarCredentialType used for passing credential type into AzCopy through environment variable. // Note: This is only used for internal integration, and not encouraged to be used directly. +//nolint:unused const envVarCredentialType = "AZCOPY_CRED_TYPE" var stashedEnvCredType = "" diff --git a/cmd/login.go b/cmd/login.go index 1c45f742c8..bf122dabd5 100644 --- a/cmd/login.go +++ b/cmd/login.go @@ -105,11 +105,6 @@ type loginCmdArgs struct { persistToken bool } -type argValidity struct { - Required string - Invalid string -} - func (lca loginCmdArgs) validate() error { // Only support one kind of oauth login at same time. switch { diff --git a/cmd/rpc.go b/cmd/rpc.go index 53055eb7b8..49f4967d24 100644 --- a/cmd/rpc.go +++ b/cmd/rpc.go @@ -21,14 +21,9 @@ package cmd import ( - "bytes" - "encoding/json" "fmt" - "github.com/Azure/azure-storage-azcopy/v10/jobsAdmin" - "io" - "net/http" - "github.com/Azure/azure-storage-azcopy/v10/common" + "github.com/Azure/azure-storage-azcopy/v10/jobsAdmin" ) // Global singleton for sending RPC requests from the frontend to the STE @@ -72,53 +67,3 @@ func inprocSend(rpcCmd common.RpcCmd, requestData interface{}, responseData inte } return nil } - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -// NewHttpClient returns the instance of struct containing an instance of http.client and url -func NewHttpClient(url string) *HTTPClient { - return &HTTPClient{ - client: &http.Client{}, - url: url, - } -} - -// todo : use url in case of string -type HTTPClient struct { - client *http.Client - url string -} - -// Send method on HttpClient sends the data passed in the interface for given command type to the client url -func (httpClient *HTTPClient) send(rpcCmd common.RpcCmd, requestData interface{}, responseData interface{}) error { - // Create HTTP request with command in query parameter & request data as JSON payload - requestJson, err := json.Marshal(requestData) - if err != nil { - return fmt.Errorf("error marshalling request payload for command type %q", rpcCmd.String()) - } - request, err := http.NewRequest("POST", httpClient.url, bytes.NewReader(requestJson)) - if err != nil { - return err - } - // adding the commandType as a query param - q := request.URL.Query() - q.Add("commandType", rpcCmd.String()) - request.URL.RawQuery = q.Encode() - - response, err := httpClient.client.Do(request) - if err != nil { - return err - } - - // Read response data, deserialize it and return it (via out responseData parameter) & error - responseJson, err := io.ReadAll(response.Body) - response.Body.Close() - if err != nil { - return fmt.Errorf("error reading response for the request") - } - err = json.Unmarshal(responseJson, responseData) - common.PanicIfErr(err) - return nil -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/common/logger.go b/common/logger.go index 6a5a696b38..7ffbbd66f5 100644 --- a/common/logger.go +++ b/common/logger.go @@ -69,8 +69,8 @@ type appLogger struct { // maximum loglevel represents the maximum severity of log messages which can be logged to Job Log file. // any message with severity higher than this will be ignored. minimumLevelToLog pipeline.LogLevel // The maximum customer-desired log level for this job - file *os.File // The job's log file - logger *log.Logger // The Job's logger + // file *os.File // The job's log file + // logger *log.Logger // The Job's logger } func (al *appLogger) ShouldLog(level pipeline.LogLevel) bool { diff --git a/jobsAdmin/JobsAdmin.go b/jobsAdmin/JobsAdmin.go index a8d6852f86..8988ad9c74 100755 --- a/jobsAdmin/JobsAdmin.go +++ b/jobsAdmin/JobsAdmin.go @@ -238,13 +238,12 @@ func (ja *jobsAdmin) recordTuningCompleted(showOutput bool) { // There will be only 1 instance of the jobsAdmin type. // The coordinator uses this to manage all the running jobs and their job parts. type jobsAdmin struct { - atomicSuccessfulBytesInActiveFiles int64 - atomicBytesTransferredWhileTuning int64 - atomicTuningEndSeconds int64 - atomicCurrentMainPoolSize int32 // align 64 bit integers for 32 bit arch - concurrency ste.ConcurrencySettings - logger common.ILoggerCloser - jobIDToJobMgr jobIDToJobMgr // Thread-safe map from each JobID to its JobInfo + atomicBytesTransferredWhileTuning int64 + atomicTuningEndSeconds int64 + atomicCurrentMainPoolSize int32 // align 64 bit integers for 32 bit arch + concurrency ste.ConcurrencySettings + logger common.ILoggerCloser + jobIDToJobMgr jobIDToJobMgr // Thread-safe map from each JobID to its JobInfo // Other global state can be stored in more fields here... logDir string // Where log files are stored planDir string // Initialize to directory where Job Part Plans are stored @@ -583,13 +582,6 @@ func (ja *jobsAdmin) TryGetPerformanceAdvice(bytesInJob uint64, filesInJob uint3 return a.GetAdvice() } -//Structs for messageHandler - -/* PerfAdjustment message. */ -type jaPerfAdjustmentMsg struct { - Throughput int64 `json:"cap-mbps,string"` -} - func (ja *jobsAdmin) messageHandler(inputChan <-chan *common.LCMMsg) { toBitsPerSec := func(megaBitsPerSec int64) int64 { return megaBitsPerSec * 1000 * 1000 / 8 diff --git a/ste/concurrencyTuner.go b/ste/concurrencyTuner.go index ed9b0e50b0..4eb3d59bfc 100644 --- a/ste/concurrencyTuner.go +++ b/ste/concurrencyTuner.go @@ -21,7 +21,6 @@ package ste import ( - "github.com/Azure/azure-storage-azcopy/v10/common" "sync" "sync/atomic" ) @@ -73,7 +72,6 @@ type autoConcurrencyTuner struct { } initialConcurrency int maxConcurrency int - cpuMonitor common.CPUMonitor callbacksWhenStable chan func() finalReason string finalConcurrency int diff --git a/ste/downloader-blob.go b/ste/downloader-blob.go index a0475d3d76..1dbe4f9f11 100644 --- a/ste/downloader-blob.go +++ b/ste/downloader-blob.go @@ -38,9 +38,6 @@ type blobDownloader struct { // used to avoid downloading zero ranges of page blobs pageRangeOptimizer *pageRangeOptimizer - // used to avoid re-setting file mode - setMode bool - jptm IJobPartTransferMgr txInfo TransferInfo } diff --git a/ste/mgr-JobMgr.go b/ste/mgr-JobMgr.go index 92aebe3db6..3d3a64cdeb 100755 --- a/ste/mgr-JobMgr.go +++ b/ste/mgr-JobMgr.go @@ -319,9 +319,6 @@ type jobMgr struct { // only a single instance of the prompter is needed for all transfers overwritePrompter *overwritePrompter - // must have a single instance of this, for the whole job - folderCreationTracker FolderCreationTracker - initMu *sync.Mutex initState *jobMgrInitState From 46c107d3c074addfe4289972e3de16145c10f60e Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Thu, 5 Jan 2023 11:17:11 -0800 Subject: [PATCH 20/50] Fixed a few more unused errors --- cmd/copyUtil.go | 2 -- e2etest/declarativeHelpers.go | 1 + e2etest/declarativeTestFiles.go | 1 + e2etest/declarativeWithPropertyProviders.go | 11 +++++++++-- 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/cmd/copyUtil.go b/cmd/copyUtil.go index 825f81844d..b5b7d3e98d 100644 --- a/cmd/copyUtil.go +++ b/cmd/copyUtil.go @@ -44,8 +44,6 @@ type copyHandlerUtil struct{} // TODO: Need be replaced with anonymous embedded field technique. var gCopyUtil = copyHandlerUtil{} -const wildCard = "*" - // checks if a given url points to a container or virtual directory, as opposed to a blob or prefix match func (util copyHandlerUtil) urlIsContainerOrVirtualDirectory(url *url.URL) bool { if azblob.NewBlobURLParts(*url).IPEndpointStyleInfo.AccountName == "" { diff --git a/e2etest/declarativeHelpers.go b/e2etest/declarativeHelpers.go index 169cb684c2..754e6d9fe9 100644 --- a/e2etest/declarativeHelpers.go +++ b/e2etest/declarativeHelpers.go @@ -233,6 +233,7 @@ func (o Operation) includes(item Operation) bool { // /////////// +//nolint:unused var eTestFromTo = TestFromTo{} // TestFromTo is similar to common/FromTo, except that it can have cases where one value represents many possibilities diff --git a/e2etest/declarativeTestFiles.go b/e2etest/declarativeTestFiles.go index 49529072fd..e38435d4a7 100644 --- a/e2etest/declarativeTestFiles.go +++ b/e2etest/declarativeTestFiles.go @@ -285,6 +285,7 @@ func f(n string, properties ...withPropertyProvider) *testObject { } // define a folder, in the expectations lists on a testFiles struct +//nolint:unused func folder(n string, properties ...withPropertyProvider) *testObject { name := strings.TrimLeft(n, "/") result := f(name, properties...) diff --git a/e2etest/declarativeWithPropertyProviders.go b/e2etest/declarativeWithPropertyProviders.go index 801ec4c120..18e1acef95 100644 --- a/e2etest/declarativeWithPropertyProviders.go +++ b/e2etest/declarativeWithPropertyProviders.go @@ -65,6 +65,7 @@ func (with) appliesToVerification() bool { // maps non-nillable fields (which are easy to create in the tests) to nillable ones, which have clearer meaning in // the resourceManagers. +//nolint:unused func (w with) createObjectProperties() *objectProperties { result := &objectProperties{} populated := false @@ -173,12 +174,12 @@ func (w with) createObjectProperties() *objectProperties { // use createOnly if you want to define properties that should be used when creating an object, but not // used when verifying the state of the transferred object. Generally you'll have no use for this. // Just use "with", and the test framework will do the right thing. -//nolint +//nolint:unused type createOnly struct { with } -//nolint +//nolint:unused func (createOnly) appliesToVerification() bool { return false } @@ -198,8 +199,10 @@ func (verifyOnly) appliesToCreation() bool { //// // use withDirStubMetadata to say that file should be created with metadata that says its a directory stub, and it should have zero size +//nolint:unused type withDirStubMetadata struct{} +//nolint:unused func (withDirStubMetadata) appliesToCreation() bool { return true } @@ -225,6 +228,7 @@ func (withDirStubMetadata) createObjectProperties() *objectProperties { // It allows you to say what the error should be // TODO: as at 1 July 2020, we are not actually validating these. Should we? It could be nice. If we don't, // remove this type and its usages, and the expectedFailureProvider interface +//nolint:unused type withError struct { msg string } @@ -233,14 +237,17 @@ func (withError) appliesToCreation() bool { return false } +//nolint:unused func (withError) appliesToVerification() bool { return false } +//nolint:unused func (withError) createObjectProperties() *objectProperties { return nil // implementing withPropertyProvider is just to trick the type system into letting us pass this to f() and folder(). Our implementation doesn't DO anything } +//nolint:unused func (w withError) expectedFailure() string { return w.msg } From dc57b3169d6c6ff875408df6a1e63fae9d7ac1de Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Thu, 5 Jan 2023 11:20:45 -0800 Subject: [PATCH 21/50] Undo generated file changes --- azbfs/zz_generated_filesystem.go | 13 +++++++------ azbfs/zz_generated_path.go | 11 ++++++----- azbfs/zz_generated_responder_policy.go | 4 ++-- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/azbfs/zz_generated_filesystem.go b/azbfs/zz_generated_filesystem.go index 0ba2d38330..49aab19ade 100644 --- a/azbfs/zz_generated_filesystem.go +++ b/azbfs/zz_generated_filesystem.go @@ -9,6 +9,7 @@ import ( "encoding/json" "github.com/Azure/azure-pipeline-go/pipeline" "io" + "io/ioutil" "net/http" "net/url" "strconv" @@ -93,7 +94,7 @@ func (client filesystemClient) createResponder(resp pipeline.Response) (pipeline if resp == nil { return nil, err } - io.Copy(io.Discard, resp.Response().Body) + io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &FilesystemCreateResponse{rawResponse: resp.Response()}, err } @@ -176,7 +177,7 @@ func (client filesystemClient) deleteResponder(resp pipeline.Response) (pipeline if resp == nil { return nil, err } - io.Copy(io.Discard, resp.Response().Body) + io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &FilesystemDeleteResponse{rawResponse: resp.Response()}, err } @@ -243,7 +244,7 @@ func (client filesystemClient) getPropertiesResponder(resp pipeline.Response) (p if resp == nil { return nil, err } - io.Copy(io.Discard, resp.Response().Body) + io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &FilesystemGetPropertiesResponse{rawResponse: resp.Response()}, err } @@ -326,7 +327,7 @@ func (client filesystemClient) listResponder(resp pipeline.Response) (pipeline.R return result, err } defer resp.Response().Body.Close() - b, err := io.ReadAll(resp.Response().Body) + b, err := ioutil.ReadAll(resp.Response().Body) if err != nil { return result, err } @@ -435,7 +436,7 @@ func (client filesystemClient) listPathsResponder(resp pipeline.Response) (pipel return result, err } defer resp.Response().Body.Close() - b, err := io.ReadAll(resp.Response().Body) + b, err := ioutil.ReadAll(resp.Response().Body) if err != nil { return result, err } @@ -530,7 +531,7 @@ func (client filesystemClient) setPropertiesResponder(resp pipeline.Response) (p if resp == nil { return nil, err } - io.Copy(io.Discard, resp.Response().Body) + io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &FilesystemSetPropertiesResponse{rawResponse: resp.Response()}, err } diff --git a/azbfs/zz_generated_path.go b/azbfs/zz_generated_path.go index 75a607f454..83f3769108 100644 --- a/azbfs/zz_generated_path.go +++ b/azbfs/zz_generated_path.go @@ -8,6 +8,7 @@ import ( "context" "github.com/Azure/azure-pipeline-go/pipeline" "io" + "io/ioutil" "net/http" "net/url" "strconv" @@ -220,7 +221,7 @@ func (client pathClient) createResponder(resp pipeline.Response) (pipeline.Respo if resp == nil { return nil, err } - io.Copy(io.Discard, resp.Response().Body) + io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &PathCreateResponse{rawResponse: resp.Response()}, err } @@ -322,7 +323,7 @@ func (client pathClient) deleteResponder(resp pipeline.Response) (pipeline.Respo if resp == nil { return nil, err } - io.Copy(io.Discard, resp.Response().Body) + io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &PathDeleteResponse{rawResponse: resp.Response()}, err } @@ -430,7 +431,7 @@ func (client pathClient) getPropertiesResponder(resp pipeline.Response) (pipelin if resp == nil { return nil, err } - io.Copy(io.Discard, resp.Response().Body) + io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &PathGetPropertiesResponse{rawResponse: resp.Response()}, err } @@ -547,7 +548,7 @@ func (client pathClient) leaseResponder(resp pipeline.Response) (pipeline.Respon if resp == nil { return nil, err } - io.Copy(io.Discard, resp.Response().Body) + io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &PathLeaseResponse{rawResponse: resp.Response()}, err } @@ -861,7 +862,7 @@ func (client pathClient) updateResponder(resp pipeline.Response) (pipeline.Respo if resp == nil { return nil, err } - io.Copy(io.Discard, resp.Response().Body) + io.Copy(ioutil.Discard, resp.Response().Body) resp.Response().Body.Close() return &PathUpdateResponse{rawResponse: resp.Response()}, err } diff --git a/azbfs/zz_generated_responder_policy.go b/azbfs/zz_generated_responder_policy.go index 1d54e49d33..9c35c7723e 100644 --- a/azbfs/zz_generated_responder_policy.go +++ b/azbfs/zz_generated_responder_policy.go @@ -8,7 +8,7 @@ import ( "context" "encoding/json" "github.com/Azure/azure-pipeline-go/pipeline" - "io" + "io/ioutil" ) type responder func(resp pipeline.Response) (result pipeline.Response, err error) @@ -53,7 +53,7 @@ func validateResponse(resp pipeline.Response, successStatusCodes ...int) error { // only close the body in the failure case. in the // success case responders will close the body as required. defer resp.Response().Body.Close() - b, err := io.ReadAll(resp.Response().Body) + b, err := ioutil.ReadAll(resp.Response().Body) if err != nil { return err } From 3d57e91f56977c49f06ac67c7f76ac02af1047b7 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Thu, 5 Jan 2023 11:27:23 -0800 Subject: [PATCH 22/50] resolved more unused --- e2etest/declarativeRunner.go | 2 ++ e2etest/declarativeTestFiles.go | 4 ++++ e2etest/declarativeWithPropertyProviders.go | 10 +++++++++- 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/e2etest/declarativeRunner.go b/e2etest/declarativeRunner.go index dc762ceab6..b521474e50 100644 --- a/e2etest/declarativeRunner.go +++ b/e2etest/declarativeRunner.go @@ -44,9 +44,11 @@ var validCredTypesPerLocation = map[common.Location][]common.CredentialType{ common.ELocation.GCP(): {common.ECredentialType.GoogleAppCredentials()}, } +//nolint:unused var allCredentialTypes []common.CredentialType = nil // var oAuthOnly = []common.CredentialType{common.ECredentialType.OAuthToken()} +//nolint:unused var anonymousAuthOnly = []common.CredentialType{common.ECredentialType.Anonymous()} func getValidCredCombinationsForFromTo(fromTo common.FromTo, requestedCredentialTypesSrc, requestedCredentialTypesDst []common.CredentialType, accountTypes []AccountType) [][2]common.CredentialType { diff --git a/e2etest/declarativeTestFiles.go b/e2etest/declarativeTestFiles.go index e38435d4a7..984cb7336a 100644 --- a/e2etest/declarativeTestFiles.go +++ b/e2etest/declarativeTestFiles.go @@ -220,6 +220,7 @@ func (t *testObject) isRootFolder() bool { // It is to be used ONLY as parameters to the f() and folder() methods. // It is not used in other parts of the code, since the other parts use the testObject instances that are created // from +//nolint:unused type withPropertyProvider interface { appliesToCreation() bool appliesToVerification() bool @@ -227,6 +228,7 @@ type withPropertyProvider interface { createObjectProperties() *objectProperties } +//nolint:unused type expectedFailureProvider interface { expectedFailure() string } @@ -241,6 +243,7 @@ type expectedFailureProvider interface { // You can also add withFailureMessage{"message"} to files that are expected to fail, to specify what the expected // failure message will be in the log. // And withStubMetadata{} to supply the metadata that indicates that an object is a directory stub. +//nolint:unused func f(n string, properties ...withPropertyProvider) *testObject { haveCreationProperties := false haveVerificationProperties := false @@ -327,6 +330,7 @@ type testFiles struct { shouldSkip []interface{} } +//nolint:unused func (tf testFiles) cloneShouldTransfers() testFiles { return testFiles{ defaultSize: tf.defaultSize, diff --git a/e2etest/declarativeWithPropertyProviders.go b/e2etest/declarativeWithPropertyProviders.go index 18e1acef95..25ff00c214 100644 --- a/e2etest/declarativeWithPropertyProviders.go +++ b/e2etest/declarativeWithPropertyProviders.go @@ -33,6 +33,7 @@ import ( // This is the main property provider, and the only one most tests will ever need. // For ease of use, and conciseness in the tests, the members of this struct are NOT pointers. // Instead, default values in these structs are mapped to nils, inside the createObjectProperties method. +//nolint:unused type with struct { size string // uses our standard K, M, G suffix @@ -55,10 +56,12 @@ type with struct { cpkByValue bool } +//nolint:unused func (with) appliesToCreation() bool { return true } +//nolint:unused func (with) appliesToVerification() bool { return true } @@ -187,11 +190,13 @@ func (createOnly) appliesToVerification() bool { //// // Use verifyOnly if you need to specify some properties that should NOT be applied to the file when it is created, -// but should be present on it afte) the transfer +// but should be present on it after) the transfer +//nolint:unused type verifyOnly struct { with } +//nolint:unused func (verifyOnly) appliesToCreation() bool { return false } @@ -207,10 +212,12 @@ func (withDirStubMetadata) appliesToCreation() bool { return true } +//nolint:unused func (withDirStubMetadata) appliesToVerification() bool { return true // since IF we ever do move these stubs, we expect them to retain their stub metadata } +//nolint:unused func (withDirStubMetadata) createObjectProperties() *objectProperties { m := map[string]string{"hdi_isfolder": "true"} // special flag that says this file is a stub size := int64(0) @@ -233,6 +240,7 @@ type withError struct { msg string } +//nolint:unused func (withError) appliesToCreation() bool { return false } From 54b0defd69709a92d0650339e641c911f12e7a62 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Thu, 5 Jan 2023 11:43:52 -0800 Subject: [PATCH 23/50] exclude S1008 --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 037d41499b..3a79166ff5 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -35,7 +35,7 @@ jobs: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin echo 'Installation complete' $(go env GOPATH)/bin/golangci-lint --version - $(go env GOPATH)/bin/golangci-lint run --tests=false --max-issues-per-linter=0 --skip-files=azbfs/zz_generated_* > lint.log + $(go env GOPATH)/bin/golangci-lint run --tests=false --max-issues-per-linter=0 --skip-files=azbfs/zz_generated_* --exclude=S1008 > lint.log result=$(cat lint.log | wc -l) if [ $result -ne 0 ]; then echo "-----------------------------------" From 2ffb0938290918d3c227e184baedee60b399dad1 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Thu, 5 Jan 2023 13:10:12 -0800 Subject: [PATCH 24/50] resolved all lint issues --- azbfs/zc_policy_retry.go | 3 +-- cmd/copyEnumeratorInit.go | 6 +++--- cmd/credentialUtil.go | 2 +- common/oauthTokenManager.go | 14 +++++++------- common/parallel/FileSystemCrawler.go | 2 +- ste/JobPartPlan.go | 2 +- ste/JobPartPlanFileName.go | 4 ++-- ste/concurrencyTuner.go | 6 +++--- testSuite/cmd/list.go | 2 +- 9 files changed, 20 insertions(+), 21 deletions(-) diff --git a/azbfs/zc_policy_retry.go b/azbfs/zc_policy_retry.go index 36e36f6c6e..c5f0e4f797 100644 --- a/azbfs/zc_policy_retry.go +++ b/azbfs/zc_policy_retry.go @@ -253,8 +253,7 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { action = "Retry: Secondary URL returned 404" case err != nil: // NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation - // TODO: AAdd ignore for this error SA1019 - if netErr, ok := err.(net.Error); ok && (netErr.Temporary() || netErr.Timeout()) { + if netErr, ok := err.(net.Error); ok && (netErr.Temporary() || netErr.Timeout()) { //nolint:SA1019 action = "Retry: net.Error and Temporary() or Timeout()" } else if err == io.ErrUnexpectedEOF { // Some of our methods under the zz_ files do use io.Copy and other related methods that can throw an unexpectedEOF. diff --git a/cmd/copyEnumeratorInit.go b/cmd/copyEnumeratorInit.go index debfb7353c..eceb07b173 100755 --- a/cmd/copyEnumeratorInit.go +++ b/cmd/copyEnumeratorInit.go @@ -76,7 +76,7 @@ func (cca *CookedCopyCmdArgs) initEnumerator(jobPartOrder common.CopyJobPartOrde (cca.FromTo.From() == common.ELocation.File() && !cca.FromTo.To().IsRemote()) || // If download, we still need LMT and MD5 from files. (cca.FromTo.From() == common.ELocation.File() && cca.FromTo.To().IsRemote() && (cca.s2sSourceChangeValidation || cca.IncludeAfter != nil || cca.IncludeBefore != nil)) || // If S2S from File to *, and sourceChangeValidation is enabled, we get properties so that we have LMTs. Likewise, if we are using includeAfter or includeBefore, which require LMTs. (cca.FromTo.From().IsRemote() && cca.FromTo.To().IsRemote() && cca.s2sPreserveProperties && !cca.s2sGetPropertiesInBackend) // If S2S and preserve properties AND get properties in backend is on, turn this off, as properties will be obtained in the backend. - jobPartOrder.S2SGetPropertiesInBackend = cca.s2sPreserveProperties && !getRemoteProperties && cca.s2sGetPropertiesInBackend // Infer GetProperties if GetPropertiesInBackend is enabled. + jobPartOrder.S2SGetPropertiesInBackend = cca.s2sPreserveProperties && !getRemoteProperties && cca.s2sGetPropertiesInBackend // Infer GetProperties if GetPropertiesInBackend is enabled. jobPartOrder.S2SSourceChangeValidation = cca.s2sSourceChangeValidation jobPartOrder.DestLengthValidation = cca.CheckLength jobPartOrder.S2SInvalidMetadataHandleOption = cca.s2sInvalidMetadataHandleOption @@ -455,7 +455,8 @@ func (cca *CookedCopyCmdArgs) createDstContainer(containerName string, dstWithSA var dstCredInfo common.CredentialInfo // 3minutes is enough time to list properties of a container, and create new if it does not exist. - ctx, _ := context.WithTimeout(parentCtx, time.Minute*3) + ctx, cancel := context.WithTimeout(parentCtx, time.Minute*3) + defer cancel() if dstCredInfo, _, err = GetCredentialInfoForLocation(ctx, cca.FromTo.To(), cca.Destination.Value, cca.Destination.SAS, false, cca.CpkOptions); err != nil { return err } @@ -537,7 +538,6 @@ func (cca *CookedCopyCmdArgs) createDstContainer(containerName string, dstWithSA default: panic(fmt.Sprintf("cannot create a destination container at location %s.", cca.FromTo.To())) } - return } diff --git a/cmd/credentialUtil.go b/cmd/credentialUtil.go index 8a910ed48f..060f03f90b 100644 --- a/cmd/credentialUtil.go +++ b/cmd/credentialUtil.go @@ -343,7 +343,7 @@ func oAuthTokenExists() (oauthTokenExists bool) { if hasCachedToken, err := uotm.HasCachedToken(); hasCachedToken { oauthTokenExists = true // TODO : Add lint ignore for this SA9003 - } else if err != nil { + } else if err != nil { //nolint:SA9003 // Log the error if fail to get cached token, as these are unhandled errors, and should not influence the logic flow. // Uncomment for debugging. // glcm.Info(fmt.Sprintf("No cached token found, %v", err)) diff --git a/common/oauthTokenManager.go b/common/oauthTokenManager.go index 69656ba421..6a78366930 100644 --- a/common/oauthTokenManager.go +++ b/common/oauthTokenManager.go @@ -91,7 +91,7 @@ func newAzcopyHTTPClient() *http.Client { Timeout: 10 * time.Second, KeepAlive: 10 * time.Second, DualStack: true, - }).Dial, /*Context*/ + }).Dial, /*Context*/ MaxIdleConns: 0, // No limit MaxIdleConnsPerHost: 1000, IdleConnTimeout: 180 * time.Second, @@ -257,8 +257,8 @@ func (credInfo *OAuthTokenInfo) GetNewTokenFromSecret(ctx context.Context) (*ada // Read a potentially encrypted PKCS block func readPKCSBlock(block *pem.Block, secret []byte, parseFunc func([]byte) (interface{}, error)) (pk interface{}, err error) { // Reduce code duplication by baking the parse functions into this - if x509.IsEncryptedPEMBlock(block) { - data, err := x509.DecryptPEMBlock(block, secret) + if x509.IsEncryptedPEMBlock(block) { //nolint:SA1019 + data, err := x509.DecryptPEMBlock(block, secret) //nolint:SA1019 if err == nil { pk, err = parseFunc(data) @@ -804,7 +804,7 @@ func (credInfo *OAuthTokenInfo) GetNewTokenFromMSI(ctx context.Context) (*adal.T req, resp, errArcVM := credInfo.queryIMDS(ctx, MSIEndpointArcVM, targetResource, IMDSAPIVersionArcVM) if errArcVM != nil { // Try Azure VM since there was an error in trying Arc VM - reqAzureVM, respAzureVM, errAzureVM := credInfo.queryIMDS(ctx, MSIEndpointAzureVM, targetResource, IMDSAPIVersionAzureVM) + reqAzureVM, respAzureVM, errAzureVM := credInfo.queryIMDS(ctx, MSIEndpointAzureVM, targetResource, IMDSAPIVersionAzureVM) //nolint:SA4006 if errAzureVM != nil { var serr syscall.Errno if errors.As(errArcVM, &serr) { @@ -832,17 +832,17 @@ func (credInfo *OAuthTokenInfo) GetNewTokenFromMSI(ctx context.Context) (*adal.T } // Arc IMDS failed with error, but Azure IMDS succeeded - req, resp = reqAzureVM, respAzureVM + req, resp = reqAzureVM, respAzureVM //nolint:SA4006 } else if !isValidArcResponse(resp) { // Not valid response from ARC IMDS endpoint. Perhaps some other process listening on it. Try Azure IMDS endpoint as fallback option. - reqAzureVM, respAzureVM, errAzureVM := credInfo.queryIMDS(ctx, MSIEndpointAzureVM, targetResource, IMDSAPIVersionAzureVM) + reqAzureVM, respAzureVM, errAzureVM := credInfo.queryIMDS(ctx, MSIEndpointAzureVM, targetResource, IMDSAPIVersionAzureVM) //nolint:SA4006 if errAzureVM != nil { // Neither Arc nor Azure VM IMDS endpoint available. Can't use MSI. return nil, fmt.Errorf("invalid response received from Arc IMDS endpoint (%s), probably some unknown process listening. If this an Azure VM, please check whether MSI is enabled, to enable MSI please refer to https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-identity-on-an-existing-vm: %v", MSIEndpointArcVM, errAzureVM) } // Azure VM IMDS endpoint ok! - req, resp = reqAzureVM, respAzureVM + req, resp = reqAzureVM, respAzureVM //nolint:SA4006 } else { // Valid response received from ARC IMDS endpoint. Proceed with the next step. challengeTokenPath := strings.Split(resp.Header["Www-Authenticate"][0], "=")[1] diff --git a/common/parallel/FileSystemCrawler.go b/common/parallel/FileSystemCrawler.go index f8cf4a4a33..61f2c69bbd 100644 --- a/common/parallel/FileSystemCrawler.go +++ b/common/parallel/FileSystemCrawler.go @@ -100,6 +100,7 @@ func Walk(appCtx context.Context, root string, parallelism int, parallelStat boo defer reader.Close() ctx, cancel = context.WithCancel(appCtx) + defer cancel() ch := CrawlLocalDirectory(ctx, root, remainingParallelism, reader) for crawlResult := range ch { entry, err := crawlResult.Item() @@ -116,7 +117,6 @@ func Walk(appCtx context.Context, root string, parallelism int, parallelStat boo } } if err != nil { - cancel() return } } diff --git a/ste/JobPartPlan.go b/ste/JobPartPlan.go index bf9bbd975a..ba00395f65 100644 --- a/ste/JobPartPlan.go +++ b/ste/JobPartPlan.go @@ -262,7 +262,7 @@ func (jpph *JobPartPlanHeader) TransferSrcPropertiesAndMetadata(transferIndex ui if t.SrcBlobTagsLength != 0 { blobTagsString := jpph.getString(offset, t.SrcBlobTagsLength) blobTags = common.ToCommonBlobTagsMap(blobTagsString) - offset += int64(t.SrcBlobTagsLength) + offset += int64(t.SrcBlobTagsLength) //nolint:ineffassign } return } diff --git a/ste/JobPartPlanFileName.go b/ste/JobPartPlanFileName.go index ece41b9962..f568bd2fe4 100644 --- a/ste/JobPartPlanFileName.go +++ b/ste/JobPartPlanFileName.go @@ -43,7 +43,7 @@ func (jpfn JobPartPlanFileName) Parse() (jobID common.JobID, partNumber common.P jobId, err := common.ParseJobID(jpfnSplit[0]) if err != nil { err = fmt.Errorf("failed to parse the JobId from JobPartFileName %s. Failed with error %s", string(jpfn), err.Error()) - // TODO: return here on error? + // TODO: return here on error? or ignore } jobID = jobId n, err := fmt.Sscanf(jpfnSplit[1], "%05d.steV%d", &partNumber, &dataSchemaVersion) @@ -120,7 +120,7 @@ func (jpfn JobPartPlanFileName) Create(order common.CopyJobPartOrderRequest) { rv := reflect.ValueOf(v) structSize := reflect.TypeOf(v).Elem().Size() slice := reflect.SliceHeader{Data: rv.Pointer(), Len: int(structSize), Cap: int(structSize)} - byteSlice := *(*[]byte)(unsafe.Pointer(&slice)) + byteSlice := *(*[]byte)(unsafe.Pointer(&slice)) //nolint:govet err := binary.Write(writer, binary.LittleEndian, byteSlice) common.PanicIfErr(err) return int64(structSize) diff --git a/ste/concurrencyTuner.go b/ste/concurrencyTuner.go index 4eb3d59bfc..5d96c09c68 100644 --- a/ste/concurrencyTuner.go +++ b/ste/concurrencyTuner.go @@ -231,10 +231,10 @@ func (t *autoConcurrencyTuner) worker() { } if multiplier < minMulitplier { - break // no point in tuning any more + break // no point in tuning anymore } else { - lastReason = t.setConcurrency(concurrency, concurrencyReasonBackoff) - lastSpeed, _ = t.getCurrentSpeed() // must re-measure immediately after backing off + lastReason = t.setConcurrency(concurrency, concurrencyReasonBackoff) //nolint:SA4006 + lastSpeed, _ = t.getCurrentSpeed() // must re-measure immediately after backing off } } } diff --git a/testSuite/cmd/list.go b/testSuite/cmd/list.go index 878e9c4aec..92dd9d8fdb 100644 --- a/testSuite/cmd/list.go +++ b/testSuite/cmd/list.go @@ -117,7 +117,7 @@ func listContainer(resourceUrl string, numberOfresource int64) { if len(searchPrefix) > 0 { // strip away search prefix from the blob name. // TODO: Ignore this in lint ineffassign - blobName = strings.Replace(blobName, searchPrefix, "", 1) + blobName = strings.Replace(blobName, searchPrefix, "", 1) //nolint:ineffassign } numberOfblobs++ } From fc9dc208d990d5a17704489b4a76da885163a6e2 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Thu, 5 Jan 2023 13:15:36 -0800 Subject: [PATCH 25/50] Fix static check --- azbfs/zc_policy_retry.go | 2 +- cmd/credentialUtil.go | 2 +- common/oauthTokenManager.go | 12 ++++++------ ste/concurrencyTuner.go | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/azbfs/zc_policy_retry.go b/azbfs/zc_policy_retry.go index c5f0e4f797..6fa214bc39 100644 --- a/azbfs/zc_policy_retry.go +++ b/azbfs/zc_policy_retry.go @@ -253,7 +253,7 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { action = "Retry: Secondary URL returned 404" case err != nil: // NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation - if netErr, ok := err.(net.Error); ok && (netErr.Temporary() || netErr.Timeout()) { //nolint:SA1019 + if netErr, ok := err.(net.Error); ok && (netErr.Temporary() || netErr.Timeout()) { //nolint:staticcheck action = "Retry: net.Error and Temporary() or Timeout()" } else if err == io.ErrUnexpectedEOF { // Some of our methods under the zz_ files do use io.Copy and other related methods that can throw an unexpectedEOF. diff --git a/cmd/credentialUtil.go b/cmd/credentialUtil.go index 060f03f90b..7a2090841d 100644 --- a/cmd/credentialUtil.go +++ b/cmd/credentialUtil.go @@ -343,7 +343,7 @@ func oAuthTokenExists() (oauthTokenExists bool) { if hasCachedToken, err := uotm.HasCachedToken(); hasCachedToken { oauthTokenExists = true // TODO : Add lint ignore for this SA9003 - } else if err != nil { //nolint:SA9003 + } else if err != nil { //nolint:staticcheck // Log the error if fail to get cached token, as these are unhandled errors, and should not influence the logic flow. // Uncomment for debugging. // glcm.Info(fmt.Sprintf("No cached token found, %v", err)) diff --git a/common/oauthTokenManager.go b/common/oauthTokenManager.go index 6a78366930..784e801a65 100644 --- a/common/oauthTokenManager.go +++ b/common/oauthTokenManager.go @@ -257,8 +257,8 @@ func (credInfo *OAuthTokenInfo) GetNewTokenFromSecret(ctx context.Context) (*ada // Read a potentially encrypted PKCS block func readPKCSBlock(block *pem.Block, secret []byte, parseFunc func([]byte) (interface{}, error)) (pk interface{}, err error) { // Reduce code duplication by baking the parse functions into this - if x509.IsEncryptedPEMBlock(block) { //nolint:SA1019 - data, err := x509.DecryptPEMBlock(block, secret) //nolint:SA1019 + if x509.IsEncryptedPEMBlock(block) { //nolint:staticcheck + data, err := x509.DecryptPEMBlock(block, secret) //nolint:staticcheck if err == nil { pk, err = parseFunc(data) @@ -804,7 +804,7 @@ func (credInfo *OAuthTokenInfo) GetNewTokenFromMSI(ctx context.Context) (*adal.T req, resp, errArcVM := credInfo.queryIMDS(ctx, MSIEndpointArcVM, targetResource, IMDSAPIVersionArcVM) if errArcVM != nil { // Try Azure VM since there was an error in trying Arc VM - reqAzureVM, respAzureVM, errAzureVM := credInfo.queryIMDS(ctx, MSIEndpointAzureVM, targetResource, IMDSAPIVersionAzureVM) //nolint:SA4006 + reqAzureVM, respAzureVM, errAzureVM := credInfo.queryIMDS(ctx, MSIEndpointAzureVM, targetResource, IMDSAPIVersionAzureVM) //nolint:staticcheck if errAzureVM != nil { var serr syscall.Errno if errors.As(errArcVM, &serr) { @@ -832,17 +832,17 @@ func (credInfo *OAuthTokenInfo) GetNewTokenFromMSI(ctx context.Context) (*adal.T } // Arc IMDS failed with error, but Azure IMDS succeeded - req, resp = reqAzureVM, respAzureVM //nolint:SA4006 + req, resp = reqAzureVM, respAzureVM //nolint:staticcheck } else if !isValidArcResponse(resp) { // Not valid response from ARC IMDS endpoint. Perhaps some other process listening on it. Try Azure IMDS endpoint as fallback option. - reqAzureVM, respAzureVM, errAzureVM := credInfo.queryIMDS(ctx, MSIEndpointAzureVM, targetResource, IMDSAPIVersionAzureVM) //nolint:SA4006 + reqAzureVM, respAzureVM, errAzureVM := credInfo.queryIMDS(ctx, MSIEndpointAzureVM, targetResource, IMDSAPIVersionAzureVM) //nolint:staticcheck if errAzureVM != nil { // Neither Arc nor Azure VM IMDS endpoint available. Can't use MSI. return nil, fmt.Errorf("invalid response received from Arc IMDS endpoint (%s), probably some unknown process listening. If this an Azure VM, please check whether MSI is enabled, to enable MSI please refer to https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-identity-on-an-existing-vm: %v", MSIEndpointArcVM, errAzureVM) } // Azure VM IMDS endpoint ok! - req, resp = reqAzureVM, respAzureVM //nolint:SA4006 + req, resp = reqAzureVM, respAzureVM //nolint:staticcheck } else { // Valid response received from ARC IMDS endpoint. Proceed with the next step. challengeTokenPath := strings.Split(resp.Header["Www-Authenticate"][0], "=")[1] diff --git a/ste/concurrencyTuner.go b/ste/concurrencyTuner.go index 5d96c09c68..3b03dad6f1 100644 --- a/ste/concurrencyTuner.go +++ b/ste/concurrencyTuner.go @@ -233,7 +233,7 @@ func (t *autoConcurrencyTuner) worker() { if multiplier < minMulitplier { break // no point in tuning anymore } else { - lastReason = t.setConcurrency(concurrency, concurrencyReasonBackoff) //nolint:SA4006 + lastReason = t.setConcurrency(concurrency, concurrencyReasonBackoff) //nolint:staticcheck lastSpeed, _ = t.getCurrentSpeed() // must re-measure immediately after backing off } } From 30b38c20025746b2807363f3e6116194934f15b5 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Thu, 5 Jan 2023 13:19:32 -0800 Subject: [PATCH 26/50] resolve last couple checks --- ste/JobPartPlanFileName.go | 2 +- testSuite/cmd/list.go | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/ste/JobPartPlanFileName.go b/ste/JobPartPlanFileName.go index f568bd2fe4..0da1e5f60d 100644 --- a/ste/JobPartPlanFileName.go +++ b/ste/JobPartPlanFileName.go @@ -42,7 +42,7 @@ func (jpfn JobPartPlanFileName) Parse() (jobID common.JobID, partNumber common.P jpfnSplit := strings.Split(string(jpfn), "--") jobId, err := common.ParseJobID(jpfnSplit[0]) if err != nil { - err = fmt.Errorf("failed to parse the JobId from JobPartFileName %s. Failed with error %s", string(jpfn), err.Error()) + err = fmt.Errorf("failed to parse the JobId from JobPartFileName %s. Failed with error %s", string(jpfn), err.Error()) //nolint:staticcheck // TODO: return here on error? or ignore } jobID = jobId diff --git a/testSuite/cmd/list.go b/testSuite/cmd/list.go index 92dd9d8fdb..b1f27c7c80 100644 --- a/testSuite/cmd/list.go +++ b/testSuite/cmd/list.go @@ -116,8 +116,7 @@ func listContainer(resourceUrl string, numberOfresource int64) { blobName := blobInfo.Name if len(searchPrefix) > 0 { // strip away search prefix from the blob name. - // TODO: Ignore this in lint ineffassign - blobName = strings.Replace(blobName, searchPrefix, "", 1) //nolint:ineffassign + blobName = strings.Replace(blobName, searchPrefix, "", 1) //nolint:ineffassign,staticcheck } numberOfblobs++ } From 2e1f3cd2f27e0d9613d55fa0656f02b82cc3ad29 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Thu, 5 Jan 2023 13:20:19 -0800 Subject: [PATCH 27/50] remove workflow --- .github/workflows/golangci-lint.yml | 38 ----------------------------- 1 file changed, 38 deletions(-) delete mode 100644 .github/workflows/golangci-lint.yml diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml deleted file mode 100644 index 05aa008891..0000000000 --- a/.github/workflows/golangci-lint.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: golangci-lint -on: - push: - branches: - - dev - - main - pull_request: - branches: - - dev - - main -permissions: - contents: read - # Optional: allow read access to pull request. Use with `only-new-issues` option. - # pull-requests: read -jobs: - golangci: - strategy: - matrix: - go: [1.17] - os: [macos-latest, windows-latest] - name: lint - runs-on: ${{ matrix.os }} - steps: - - uses: actions/setup-go@v3 - with: - go-version: ${{ matrix.go }} - - uses: actions/checkout@v3 - - name: golangci-lint - uses: golangci/golangci-lint-action@v3 - with: - # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.29 - - # Optional: golangci-lint command line arguments. - # args: --issues-exit-code=0 - - # Optional: show only new issues if it's a pull request. The default value is `false`. - # only-new-issues: true \ No newline at end of file From ca2ea3d9ded87d5ffd61a9c0dbc9393fe5a3eaf4 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Fri, 6 Jan 2023 09:30:33 -0800 Subject: [PATCH 28/50] Added github file --- .github/workflows/golangci-lint.yml | 40 +++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 .github/workflows/golangci-lint.yml diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 0000000000..b608f1a693 --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,40 @@ +name: golangci-lint +on: + push: + branches: + - dev + - main + pull_request: + branches: + - dev + - main +permissions: + contents: read + # Optional: allow read access to pull request. Use with `only-new-issues` option. + # pull-requests: read +jobs: + golangci: + strategy: + matrix: + go: [1.17] + os: [macos-latest, windows-latest] + name: lint + runs-on: ${{ matrix.os }} + steps: + - uses: actions/setup-go@v3 + with: + go-version: ${{ matrix.go }} + - uses: actions/checkout@v3 + - name: golangci-lint + uses: golangci/golangci-lint-action@v3 + with: + # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. + version: v1.29 + # Optional: working directory, useful for monorepos + # working-directory: somedir + + # Optional: golangci-lint command line arguments. + args: --tests=false --max-issues-per-linter=0 --skip-files=azbfs/zz_generated_* --exclude=S1008 + + # Optional: show only new issues if it's a pull request. The default value is `false`. + # only-new-issues: true \ No newline at end of file From 35ba21d17fcc67f9369413a00010367e3a0f08bb Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Fri, 6 Jan 2023 09:35:38 -0800 Subject: [PATCH 29/50] v2 --- .github/workflows/golangci-lint.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index b608f1a693..4a1b756900 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -24,12 +24,12 @@ jobs: - uses: actions/setup-go@v3 with: go-version: ${{ matrix.go }} - - uses: actions/checkout@v3 + - uses: actions/checkout@v2 - name: golangci-lint - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v2 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.29 + version: v1.46 # Optional: working directory, useful for monorepos # working-directory: somedir From e75eaaabab3695c948310b30620fdef78351d742 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Fri, 6 Jan 2023 09:38:29 -0800 Subject: [PATCH 30/50] ubuntu latest --- .github/workflows/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 4a1b756900..bc9f6d4531 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -17,7 +17,7 @@ jobs: strategy: matrix: go: [1.17] - os: [macos-latest, windows-latest] + os: [ubuntu-latest] name: lint runs-on: ${{ matrix.os }} steps: From d99b38a064d79bc481ac24d502a1ed4eb4795eb8 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Fri, 6 Jan 2023 11:23:25 -0800 Subject: [PATCH 31/50] deadcode --- azbfs/zc_mmf_unix.go | 2 +- azbfs/zc_uuid.go | 3 ++- cmd/credentialUtil.go | 2 +- common/uuid.go | 1 + sddl/sddlHelper_linux.go | 2 +- 5 files changed, 6 insertions(+), 4 deletions(-) diff --git a/azbfs/zc_mmf_unix.go b/azbfs/zc_mmf_unix.go index de835d4d13..5a4349c199 100644 --- a/azbfs/zc_mmf_unix.go +++ b/azbfs/zc_mmf_unix.go @@ -11,7 +11,7 @@ import ( //nolint:unused type mmf []byte -//nolint:unused +//nolint:unused,deadcode func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { prot, flags := syscall.PROT_READ, syscall.MAP_SHARED // Assume read-only if writable { diff --git a/azbfs/zc_uuid.go b/azbfs/zc_uuid.go index be2c279309..8ff5c1e7fe 100644 --- a/azbfs/zc_uuid.go +++ b/azbfs/zc_uuid.go @@ -7,6 +7,7 @@ import ( ) // The UUID reserved variants. +//nolint:deadcode const ( reservedNCS byte = 0x80 reservedRFC4122 byte = 0x40 @@ -39,7 +40,7 @@ func (u uuid) String() string { // ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f" // or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID. -//nolint:unused +//nolint:unused,deadcode func parseUUID(uuidStr string) uuid { char := func(hexString string) byte { i, _ := strconv.ParseUint(hexString, 16, 8) diff --git a/cmd/credentialUtil.go b/cmd/credentialUtil.go index 5784391c0f..5b4395a811 100644 --- a/cmd/credentialUtil.go +++ b/cmd/credentialUtil.go @@ -360,7 +360,7 @@ func getAzureFileCredentialType() (common.CredentialType, error) { // envVarCredentialType used for passing credential type into AzCopy through environment variable. // Note: This is only used for internal integration, and not encouraged to be used directly. -//nolint:unused +//nolint:unused,deadcode const envVarCredentialType = "AZCOPY_CRED_TYPE" var stashedEnvCredType = "" diff --git a/common/uuid.go b/common/uuid.go index 8c4dcf9c95..e29dd8e225 100644 --- a/common/uuid.go +++ b/common/uuid.go @@ -8,6 +8,7 @@ import ( ) // The JobID reserved variants. +//nolint:deadcode const ( reservedNCS byte = 0x80 reservedRFC4122 byte = 0x40 diff --git a/sddl/sddlHelper_linux.go b/sddl/sddlHelper_linux.go index 9d82ed355e..4db0e88af7 100644 --- a/sddl/sddlHelper_linux.go +++ b/sddl/sddlHelper_linux.go @@ -873,7 +873,7 @@ func aceRightsToString(aceRights uint32) string { // Does the aceType correspond to an object ACE? // We don't support object ACEs. -//nolint:unused +//nolint:unused,deadcode func isObjectAce(aceType byte) bool { switch aceType { case ACCESS_ALLOWED_OBJECT_ACE_TYPE, From 25cd90fc18f355ab2cec4846b5cbc186c9bf7632 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Fri, 6 Jan 2023 11:27:22 -0800 Subject: [PATCH 32/50] Added deadcode --- .github/workflows/golangci-lint.yml | 2 +- azbfs/zc_uuid.go | 2 +- e2etest/declarativeHelpers.go | 2 +- e2etest/declarativeRunner.go | 16 ++++++++-------- e2etest/declarativeTestFiles.go | 2 +- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index bc9f6d4531..3e41510353 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -34,7 +34,7 @@ jobs: # working-directory: somedir # Optional: golangci-lint command line arguments. - args: --tests=false --max-issues-per-linter=0 --skip-files=azbfs/zz_generated_* --exclude=S1008 + args: --tests=false --max-issues-per-linter=0 --skip-files=azbfs/zz_generated_* --exclude=S1008 --max-same-issues=0 # Optional: show only new issues if it's a pull request. The default value is `false`. # only-new-issues: true \ No newline at end of file diff --git a/azbfs/zc_uuid.go b/azbfs/zc_uuid.go index 8ff5c1e7fe..75e0476996 100644 --- a/azbfs/zc_uuid.go +++ b/azbfs/zc_uuid.go @@ -7,7 +7,7 @@ import ( ) // The UUID reserved variants. -//nolint:deadcode +//nolint:deadcode,varcheck const ( reservedNCS byte = 0x80 reservedRFC4122 byte = 0x40 diff --git a/e2etest/declarativeHelpers.go b/e2etest/declarativeHelpers.go index 754e6d9fe9..8830a05e58 100644 --- a/e2etest/declarativeHelpers.go +++ b/e2etest/declarativeHelpers.go @@ -233,7 +233,7 @@ func (o Operation) includes(item Operation) bool { // /////////// -//nolint:unused +//nolint:unused,deadcode var eTestFromTo = TestFromTo{} // TestFromTo is similar to common/FromTo, except that it can have cases where one value represents many possibilities diff --git a/e2etest/declarativeRunner.go b/e2etest/declarativeRunner.go index e7bbe15c73..eafcd4fce0 100644 --- a/e2etest/declarativeRunner.go +++ b/e2etest/declarativeRunner.go @@ -44,11 +44,11 @@ var validCredTypesPerLocation = map[common.Location][]common.CredentialType{ common.ELocation.GCP(): {common.ECredentialType.GoogleAppCredentials()}, } -//nolint:unused +//nolint:unused,deadcode var allCredentialTypes []common.CredentialType = nil // var oAuthOnly = []common.CredentialType{common.ECredentialType.OAuthToken()} -//nolint:unused +//nolint:unused,deadcode var anonymousAuthOnly = []common.CredentialType{common.ECredentialType.Anonymous()} func getValidCredCombinationsForFromTo(fromTo common.FromTo, requestedCredentialTypesSrc, requestedCredentialTypesDst []common.CredentialType, accountTypes []AccountType) [][2]common.CredentialType { @@ -111,18 +111,18 @@ func RunScenarios( operations Operation, testFromTo TestFromTo, validate Validate, // TODO: do we really want the test author to have to nominate which validation should happen? Pros: better perf of tests. Cons: they have to tell us, and if they tell us wrong test may not test what they think it tests - // _ interface{}, // TODO if we want it??, blockBlobsOnly or specific/all blob types +// _ interface{}, // TODO if we want it??, blockBlobsOnly or specific/all blob types - // It would be a pain to list out every combo by hand, - // In addition to the fact that not every credential type is sensible. - // Thus, the E2E framework takes in a requested set of credential types, and applies them where sensible. - // This allows you to make tests use OAuth only, SAS only, etc. +// It would be a pain to list out every combo by hand, +// In addition to the fact that not every credential type is sensible. +// Thus, the E2E framework takes in a requested set of credential types, and applies them where sensible. +// This allows you to make tests use OAuth only, SAS only, etc. requestedCredentialTypesSrc []common.CredentialType, requestedCredentialTypesDst []common.CredentialType, p params, hs *hooks, fs testFiles, - // TODO: do we need something here to explicitly say that we expect success or failure? For now, we are just inferring that from the elements of sourceFiles +// TODO: do we need something here to explicitly say that we expect success or failure? For now, we are just inferring that from the elements of sourceFiles destAccountType AccountType, srcAccountType AccountType, scenarioSuffix string) { diff --git a/e2etest/declarativeTestFiles.go b/e2etest/declarativeTestFiles.go index 984cb7336a..fe4df620ee 100644 --- a/e2etest/declarativeTestFiles.go +++ b/e2etest/declarativeTestFiles.go @@ -288,7 +288,7 @@ func f(n string, properties ...withPropertyProvider) *testObject { } // define a folder, in the expectations lists on a testFiles struct -//nolint:unused +//nolint:unused,deadcode func folder(n string, properties ...withPropertyProvider) *testObject { name := strings.TrimLeft(n, "/") result := f(name, properties...) From a410848c90ec756912925d8f8d08b94c391ea0c7 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Fri, 6 Jan 2023 11:33:25 -0800 Subject: [PATCH 33/50] Varcheck --- cmd/credentialUtil.go | 2 +- common/uuid.go | 2 +- e2etest/declarativeHelpers.go | 2 +- e2etest/declarativeRunner.go | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/credentialUtil.go b/cmd/credentialUtil.go index 5b4395a811..2790708fc9 100644 --- a/cmd/credentialUtil.go +++ b/cmd/credentialUtil.go @@ -360,7 +360,7 @@ func getAzureFileCredentialType() (common.CredentialType, error) { // envVarCredentialType used for passing credential type into AzCopy through environment variable. // Note: This is only used for internal integration, and not encouraged to be used directly. -//nolint:unused,deadcode +//nolint:unused,deadcode,varcheck const envVarCredentialType = "AZCOPY_CRED_TYPE" var stashedEnvCredType = "" diff --git a/common/uuid.go b/common/uuid.go index e29dd8e225..6ecfc236ef 100644 --- a/common/uuid.go +++ b/common/uuid.go @@ -8,7 +8,7 @@ import ( ) // The JobID reserved variants. -//nolint:deadcode +//nolint:deadcode,varcheck const ( reservedNCS byte = 0x80 reservedRFC4122 byte = 0x40 diff --git a/e2etest/declarativeHelpers.go b/e2etest/declarativeHelpers.go index 8830a05e58..9e87d1fb1c 100644 --- a/e2etest/declarativeHelpers.go +++ b/e2etest/declarativeHelpers.go @@ -233,7 +233,7 @@ func (o Operation) includes(item Operation) bool { // /////////// -//nolint:unused,deadcode +//nolint:unused,deadcode,varcheck var eTestFromTo = TestFromTo{} // TestFromTo is similar to common/FromTo, except that it can have cases where one value represents many possibilities diff --git a/e2etest/declarativeRunner.go b/e2etest/declarativeRunner.go index eafcd4fce0..b7dc10dab8 100644 --- a/e2etest/declarativeRunner.go +++ b/e2etest/declarativeRunner.go @@ -44,11 +44,11 @@ var validCredTypesPerLocation = map[common.Location][]common.CredentialType{ common.ELocation.GCP(): {common.ECredentialType.GoogleAppCredentials()}, } -//nolint:unused,deadcode +//nolint:unused,deadcode,varcheck var allCredentialTypes []common.CredentialType = nil // var oAuthOnly = []common.CredentialType{common.ECredentialType.OAuthToken()} -//nolint:unused,deadcode +//nolint:unused,deadcode,varcheck var anonymousAuthOnly = []common.CredentialType{common.ECredentialType.Anonymous()} func getValidCredCombinationsForFromTo(fromTo common.FromTo, requestedCredentialTypesSrc, requestedCredentialTypesDst []common.CredentialType, accountTypes []AccountType) [][2]common.CredentialType { From af442a6aa8d3a18a682740ed07d1b8a7abeb7a7b Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Fri, 6 Jan 2023 11:35:46 -0800 Subject: [PATCH 34/50] Added windows and macos --- .github/workflows/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 3e41510353..d896ce68a5 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -17,7 +17,7 @@ jobs: strategy: matrix: go: [1.17] - os: [ubuntu-latest] + os: [ubuntu-latest, windows-latest, macos-latest] name: lint runs-on: ${{ matrix.os }} steps: From 97d8ef86a2519ad5fb5003442a2be2fa06cf434e Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Fri, 6 Jan 2023 11:39:36 -0800 Subject: [PATCH 35/50] increase timeout --- .github/workflows/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index d896ce68a5..da5cbc3db1 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -34,7 +34,7 @@ jobs: # working-directory: somedir # Optional: golangci-lint command line arguments. - args: --tests=false --max-issues-per-linter=0 --skip-files=azbfs/zz_generated_* --exclude=S1008 --max-same-issues=0 + args: --tests=false --max-issues-per-linter=0 --skip-files=azbfs/zz_generated_* --exclude=S1008 --max-same-issues=0 --timeout 5m0s # Optional: show only new issues if it's a pull request. The default value is `false`. # only-new-issues: true \ No newline at end of file From 51cdbd00cadbec7289e7bd85bf0b1497371a65bc Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Fri, 6 Jan 2023 11:52:57 -0800 Subject: [PATCH 36/50] windows lint --- azbfs/zc_mmf_windows.go | 5 ++++- common/credCache_windows.go | 4 ++-- common/mmf_windows.go | 4 ++-- main_windows.go | 14 +------------- testSuite/cmd/mmap_windows.go | 2 +- 5 files changed, 10 insertions(+), 19 deletions(-) diff --git a/azbfs/zc_mmf_windows.go b/azbfs/zc_mmf_windows.go index 550ab85b5a..4e3b6a57a5 100644 --- a/azbfs/zc_mmf_windows.go +++ b/azbfs/zc_mmf_windows.go @@ -7,8 +7,10 @@ import ( "unsafe" ) +//nolint:unused type mmf []byte +//nolint:deadcode func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { prot, access := uint32(syscall.PAGE_READONLY), uint32(syscall.FILE_MAP_READ) // Assume read-only if writable { @@ -19,7 +21,7 @@ func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) if hMMF == 0 { return nil, os.NewSyscallError("CreateFileMapping", errno) } - defer syscall.CloseHandle(hMMF) + defer syscall.CloseHandle(hMMF) //nolint:errcheck addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) m := mmf{} h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) @@ -29,6 +31,7 @@ func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) return m, nil } +//nolint:unused func (m *mmf) unmap() { addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) *m = mmf{} diff --git a/common/credCache_windows.go b/common/credCache_windows.go index db55074bcc..8df60cf81e 100644 --- a/common/credCache_windows.go +++ b/common/credCache_windows.go @@ -249,7 +249,7 @@ func encrypt(data []byte, entropy *dataBlob) ([]byte, error) { var outblob dataBlob defer func() { if outblob.pbData != nil { - mLocalFree.Call(uintptr(unsafe.Pointer(outblob.pbData))) + _, _, _ = mLocalFree.Call(uintptr(unsafe.Pointer(outblob.pbData))) } }() @@ -276,7 +276,7 @@ func decrypt(data []byte, entropy *dataBlob) ([]byte, error) { var outblob dataBlob defer func() { if outblob.pbData != nil { - mLocalFree.Call(uintptr(unsafe.Pointer(outblob.pbData))) + _, _, _ = mLocalFree.Call(uintptr(unsafe.Pointer(outblob.pbData))) } }() diff --git a/common/mmf_windows.go b/common/mmf_windows.go index bf1192a4e8..1ee20d8982 100644 --- a/common/mmf_windows.go +++ b/common/mmf_windows.go @@ -60,7 +60,7 @@ func NewMMF(file *os.File, writable bool, offset int64, length int64) (*MMF, err if hMMF == 0 { return nil, os.NewSyscallError("CreateFileMapping", errno) } - defer syscall.CloseHandle(hMMF) + defer syscall.CloseHandle(hMMF) //nolint:errcheck addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) if !writable { @@ -92,7 +92,7 @@ func (m *MMF) Unmap() { // "lazily" to disk; that is, modifications may be cached in memory and written to disk // at a later time. To avoid modifications to be cached in memory,explicitly flushing // modified pages using the FlushViewOfFile function. - syscall.FlushViewOfFile(addr, uintptr(m.length)) + _ = syscall.FlushViewOfFile(addr, uintptr(m.length)) err := syscall.UnmapViewOfFile(addr) PanicIfErr(err) m.isMapped = false diff --git a/main_windows.go b/main_windows.go index dd05e37edd..c3ca18496c 100644 --- a/main_windows.go +++ b/main_windows.go @@ -21,27 +21,15 @@ package main import ( + "github.com/minio/minio-go" "math" "net/http" - "os/exec" "path" "strings" - "syscall" - - "github.com/minio/minio-go" "github.com/Azure/azure-storage-azcopy/v10/common" ) -func osModifyProcessCommand(cmd *exec.Cmd) *exec.Cmd { - // On Windows, create the child process in new process group to avoid receiving signals - // (Ctrl+C, Ctrl+Break) from the console - cmd.SysProcAttr = &syscall.SysProcAttr{ - CreationFlags: syscall.CREATE_NEW_PROCESS_GROUP, - } - return cmd -} - // ProcessOSSpecificInitialization changes the soft limit for filedescriptor for process // return the filedescriptor limit for process. If the function fails with some, it returns // the error diff --git a/testSuite/cmd/mmap_windows.go b/testSuite/cmd/mmap_windows.go index 628c62bb17..d1a58e0891 100644 --- a/testSuite/cmd/mmap_windows.go +++ b/testSuite/cmd/mmap_windows.go @@ -18,7 +18,7 @@ func NewMMF(file *os.File, writable bool, offset int64, length int64) (MMF, erro if hMMF == 0 { return nil, os.NewSyscallError("CreateFileMapping", errno) } - defer syscall.CloseHandle(hMMF) + defer syscall.CloseHandle(hMMF) //nolint:errcheck addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) m := MMF{} h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) From 15a59978466cbe2629e7e91737d71f7069644898 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Fri, 6 Jan 2023 12:03:14 -0800 Subject: [PATCH 37/50] more windows errors --- azbfs/zc_mmf_windows.go | 4 ++-- cmd/zc_attr_filter_windows.go | 2 +- common/writeThoughFile_windows.go | 7 ------- ste/downloader-azureFiles_windows.go | 3 ++- testSuite/cmd/mmap_windows.go | 2 +- 5 files changed, 6 insertions(+), 12 deletions(-) diff --git a/azbfs/zc_mmf_windows.go b/azbfs/zc_mmf_windows.go index 4e3b6a57a5..7acbcff972 100644 --- a/azbfs/zc_mmf_windows.go +++ b/azbfs/zc_mmf_windows.go @@ -10,7 +10,7 @@ import ( //nolint:unused type mmf []byte -//nolint:deadcode +//nolint:deadcode,unused func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { prot, access := uint32(syscall.PAGE_READONLY), uint32(syscall.FILE_MAP_READ) // Assume read-only if writable { @@ -22,7 +22,7 @@ func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) return nil, os.NewSyscallError("CreateFileMapping", errno) } defer syscall.CloseHandle(hMMF) //nolint:errcheck - addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) + addr, _ := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) m := mmf{} h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) h.Data = addr diff --git a/cmd/zc_attr_filter_windows.go b/cmd/zc_attr_filter_windows.go index 8f26941c17..f90bf835c2 100644 --- a/cmd/zc_attr_filter_windows.go +++ b/cmd/zc_attr_filter_windows.go @@ -46,7 +46,7 @@ func (f *attrFilter) AppliesOnlyToFiles() bool { func (f *attrFilter) DoesPass(storedObject StoredObject) bool { fileName := "" - if strings.Index(f.filePath, "*") == -1 { + if !strings.Contains(f.filePath, "*") { fileName = common.GenerateFullPath(f.filePath, storedObject.relativePath) } else { basePath := getPathBeforeFirstWildcard(f.filePath) diff --git a/common/writeThoughFile_windows.go b/common/writeThoughFile_windows.go index d218bdf5db..c1ca1e2bbc 100644 --- a/common/writeThoughFile_windows.go +++ b/common/writeThoughFile_windows.go @@ -125,13 +125,6 @@ func CreateFileOfSizeWithWriteThroughOption(destinationPath string, fileSize int fd, err := doOpen() if err != nil { - // Because a hidden file isn't necessarily a intentional lock on a file, we choose to make it a default override. - toMatchSet := FILE_ATTRIBUTE_HIDDEN - // But, by the opposite nature, readonly is a intentional lock, so we make it a required option. - if forceIfReadOnly { - toMatchSet |= FILE_ATTRIBUTE_READONLY - } - // Let's check what we might need to clear, and if we should retry toClearFlagSet, allFlags, toRetry := getFlagMatches(FILE_ATTRIBUTE_READONLY | FILE_ATTRIBUTE_HIDDEN) diff --git a/ste/downloader-azureFiles_windows.go b/ste/downloader-azureFiles_windows.go index 6da4845e6a..d703f3655e 100644 --- a/ste/downloader-azureFiles_windows.go +++ b/ste/downloader-azureFiles_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package ste @@ -77,7 +78,7 @@ func (*azureFilesDownloader) PutSMBProperties(sip ISMBPropertyBearingSourceInfoP err = windows.SetFileTime(fd, &smbCreationFileTime, nil, pLastWriteTime) if err != nil { - err = fmt.Errorf("attempted update file times: %w", err) + return fmt.Errorf("attempted update file times: %w", err) } return nil } diff --git a/testSuite/cmd/mmap_windows.go b/testSuite/cmd/mmap_windows.go index d1a58e0891..d9b28cfe18 100644 --- a/testSuite/cmd/mmap_windows.go +++ b/testSuite/cmd/mmap_windows.go @@ -19,7 +19,7 @@ func NewMMF(file *os.File, writable bool, offset int64, length int64) (MMF, erro return nil, os.NewSyscallError("CreateFileMapping", errno) } defer syscall.CloseHandle(hMMF) //nolint:errcheck - addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) + addr, _ := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) m := MMF{} h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) h.Data = addr From 375e1416e3a9c38bc3920f45ac560e057b5b3f34 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Fri, 6 Jan 2023 12:07:18 -0800 Subject: [PATCH 38/50] ineffassign --- common/mmf_windows.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/mmf_windows.go b/common/mmf_windows.go index 1ee20d8982..0b3f79cf7d 100644 --- a/common/mmf_windows.go +++ b/common/mmf_windows.go @@ -61,7 +61,7 @@ func NewMMF(file *os.File, writable bool, offset int64, length int64) (*MMF, err return nil, os.NewSyscallError("CreateFileMapping", errno) } defer syscall.CloseHandle(hMMF) //nolint:errcheck - addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) + addr, _ := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) if !writable { // pre-fetch the memory mapped file so that performance is better when it is read From 398e1c4f8a901de6bbcffcce242c2cafd631b43d Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Fri, 6 Jan 2023 12:13:08 -0800 Subject: [PATCH 39/50] replace the ci job with github action --- azure-pipelines.yml | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 3a79166ff5..94e022f190 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -31,27 +31,6 @@ jobs: inputs: version: '1.19.2' - - script: | - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin - echo 'Installation complete' - $(go env GOPATH)/bin/golangci-lint --version - $(go env GOPATH)/bin/golangci-lint run --tests=false --max-issues-per-linter=0 --skip-files=azbfs/zz_generated_* --exclude=S1008 > lint.log - result=$(cat lint.log | wc -l) - if [ $result -ne 0 ]; then - echo "-----------------------------------" - echo "Below issues are found in static analysis" - cat lint.log - echo "-----------------------------------" - exit 1 - else - echo "-----------------------------------" - echo "No issues are found in static analysis" - echo "-----------------------------------" - fi - displayName: 'Golang Lint Check - Linux' - workingDirectory: $(System.DefaultWorkingDirectory) - condition: eq(variables.type, 'linux') - - script: | echo 'Running GO Vet' go vet From 944a992e9a355092d04503b0627ff66467bd9caf Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Mon, 9 Jan 2023 09:28:24 -0800 Subject: [PATCH 40/50] remove todo --- cmd/credentialUtil.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/credentialUtil.go b/cmd/credentialUtil.go index 2790708fc9..2f1885ffd0 100644 --- a/cmd/credentialUtil.go +++ b/cmd/credentialUtil.go @@ -342,7 +342,6 @@ func oAuthTokenExists() (oauthTokenExists bool) { if hasCachedToken, err := uotm.HasCachedToken(); hasCachedToken { oauthTokenExists = true - // TODO : Add lint ignore for this SA9003 } else if err != nil { //nolint:staticcheck // Log the error if fail to get cached token, as these are unhandled errors, and should not influence the logic flow. // Uncomment for debugging. From 67e42ffc9c3efc5e95c30793b5d0631ec39f6c23 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Tue, 7 Feb 2023 13:23:03 -0800 Subject: [PATCH 41/50] Update the linting --- azbfs/zc_credential_anonymous.go | 7 ++- azbfs/zc_uuid.go | 51 +-------------------- cmd/zc_traverser_blob.go | 2 - common/uuid.go | 8 +--- e2etest/declarativeHelpers.go | 2 +- e2etest/declarativeTestFiles.go | 8 ++-- e2etest/declarativeWithPropertyProviders.go | 34 +++++++------- sddl/sddlHelper_linux.go | 20 -------- ste/downloader-azureFiles_windows.go | 3 +- ste/mgr-JobPartMgr.go | 1 - ste/sender.go | 4 -- 11 files changed, 32 insertions(+), 108 deletions(-) diff --git a/azbfs/zc_credential_anonymous.go b/azbfs/zc_credential_anonymous.go index 256c1ce5fa..1b330aeb3d 100644 --- a/azbfs/zc_credential_anonymous.go +++ b/azbfs/zc_credential_anonymous.go @@ -15,8 +15,11 @@ type Credential interface { //nolint:unused type credentialFunc pipeline.FactoryFunc +// New creates a credentialFunc object. //nolint:unused func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + // Note: We are not deleting this "unused" code since this is a publicly exported function, we do not want to break + // anyone that has a dependency on the azbfs library (like blobfuse). return f(next, po) } @@ -39,14 +42,14 @@ type anonymousCredentialPolicyFactory struct { } // New creates a credential policy object. -// //nolint:unused func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + // Note: We are not deleting this "unused" code since this is a publicly exported function, we do not want to break + // anyone that has a dependency on the azbfs library (like blobfuse). return &anonymousCredentialPolicy{next: next} } // credentialMarker is a package-internal method that exists just to satisfy the Credential interface. -// //nolint:unused func (*anonymousCredentialPolicyFactory) credentialMarker() {} diff --git a/azbfs/zc_uuid.go b/azbfs/zc_uuid.go index 75e0476996..f14972d2f8 100644 --- a/azbfs/zc_uuid.go +++ b/azbfs/zc_uuid.go @@ -3,16 +3,11 @@ package azbfs import ( "crypto/rand" "fmt" - "strconv" ) // The UUID reserved variants. -//nolint:deadcode,varcheck const ( - reservedNCS byte = 0x80 - reservedRFC4122 byte = 0x40 - reservedMicrosoft byte = 0x20 - reservedFuture byte = 0x00 + reservedRFC4122 byte = 0x40 ) // A UUID representation compliant with specification in RFC 4122 document. @@ -37,47 +32,3 @@ func newUUID() (u uuid) { func (u uuid) String() string { return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) } - -// ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f" -// or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID. -//nolint:unused,deadcode -func parseUUID(uuidStr string) uuid { - char := func(hexString string) byte { - i, _ := strconv.ParseUint(hexString, 16, 8) - return byte(i) - } - if uuidStr[0] == '{' { - uuidStr = uuidStr[1:] // Skip over the '{' - } - // 03020100 - 05 04 - 07 06 - 08 09 - 0a 0b 0c 0d 0e 0f - // 1 11 1 11 11 1 12 22 2 22 22 22 33 33 33 - // 01234567 8 90 12 3 45 67 8 90 12 3 45 67 89 01 23 45 - uuidVal := uuid{ - char(uuidStr[0:2]), - char(uuidStr[2:4]), - char(uuidStr[4:6]), - char(uuidStr[6:8]), - - char(uuidStr[9:11]), - char(uuidStr[11:13]), - - char(uuidStr[14:16]), - char(uuidStr[16:18]), - - char(uuidStr[19:21]), - char(uuidStr[21:23]), - - char(uuidStr[24:26]), - char(uuidStr[26:28]), - char(uuidStr[28:30]), - char(uuidStr[30:32]), - char(uuidStr[32:34]), - char(uuidStr[34:36]), - } - return uuidVal -} - -//nolint:unused -func (u uuid) bytes() []byte { - return u[:] -} diff --git a/cmd/zc_traverser_blob.go b/cmd/zc_traverser_blob.go index 0ee0f0fe03..ec16f01df7 100644 --- a/cmd/zc_traverser_blob.go +++ b/cmd/zc_traverser_blob.go @@ -61,8 +61,6 @@ type blobTraverser struct { includeSnapshot bool includeVersion bool - - stripTopDir bool } func (t *blobTraverser) IsDirectory(isSource bool) (bool, error) { diff --git a/common/uuid.go b/common/uuid.go index 6ecfc236ef..fd1099a8d2 100644 --- a/common/uuid.go +++ b/common/uuid.go @@ -8,13 +8,9 @@ import ( ) // The JobID reserved variants. -//nolint:deadcode,varcheck const ( - reservedNCS byte = 0x80 - reservedRFC4122 byte = 0x40 - reservedMicrosoft byte = 0x20 - reservedFuture byte = 0x00 - guidFormat = "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" + reservedRFC4122 byte = 0x40 + guidFormat = "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" ) // A UUID representation compliant with specification in RFC 4122 document. diff --git a/e2etest/declarativeHelpers.go b/e2etest/declarativeHelpers.go index 17558fe504..5d206b17fa 100644 --- a/e2etest/declarativeHelpers.go +++ b/e2etest/declarativeHelpers.go @@ -234,7 +234,7 @@ func (o Operation) includes(item Operation) bool { // /////////// -//nolint:unused,deadcode,varcheck +//nolint var eTestFromTo = TestFromTo{} // TestFromTo is similar to common/FromTo, except that it can have cases where one value represents many possibilities diff --git a/e2etest/declarativeTestFiles.go b/e2etest/declarativeTestFiles.go index 706cab37f0..d2d87713b0 100644 --- a/e2etest/declarativeTestFiles.go +++ b/e2etest/declarativeTestFiles.go @@ -229,7 +229,7 @@ func (t *testObject) isRootFolder() bool { // It is to be used ONLY as parameters to the f() and folder() methods. // It is not used in other parts of the code, since the other parts use the testObject instances that are created // from -//nolint:unused +//nolint type withPropertyProvider interface { appliesToCreation() bool appliesToVerification() bool @@ -237,7 +237,7 @@ type withPropertyProvider interface { createObjectProperties() *objectProperties } -//nolint:unused +//nolint type expectedFailureProvider interface { expectedFailure() string } @@ -252,7 +252,7 @@ type expectedFailureProvider interface { // You can also add withFailureMessage{"message"} to files that are expected to fail, to specify what the expected // failure message will be in the log. // And withStubMetadata{} to supply the metadata that indicates that an object is a directory stub. -//nolint:unused +//nolint func f(n string, properties ...withPropertyProvider) *testObject { haveCreationProperties := false haveVerificationProperties := false @@ -297,7 +297,7 @@ func f(n string, properties ...withPropertyProvider) *testObject { } // define a folder, in the expectations lists on a testFiles struct -//nolint:unused,deadcode +//nolint func folder(n string, properties ...withPropertyProvider) *testObject { name := strings.TrimLeft(n, "/") result := f(name, properties...) diff --git a/e2etest/declarativeWithPropertyProviders.go b/e2etest/declarativeWithPropertyProviders.go index 25ff00c214..8556c450a0 100644 --- a/e2etest/declarativeWithPropertyProviders.go +++ b/e2etest/declarativeWithPropertyProviders.go @@ -33,7 +33,7 @@ import ( // This is the main property provider, and the only one most tests will ever need. // For ease of use, and conciseness in the tests, the members of this struct are NOT pointers. // Instead, default values in these structs are mapped to nils, inside the createObjectProperties method. -//nolint:unused +//nolint type with struct { size string // uses our standard K, M, G suffix @@ -56,19 +56,19 @@ type with struct { cpkByValue bool } -//nolint:unused +//nolint func (with) appliesToCreation() bool { return true } -//nolint:unused +//nolint func (with) appliesToVerification() bool { return true } // maps non-nillable fields (which are easy to create in the tests) to nillable ones, which have clearer meaning in // the resourceManagers. -//nolint:unused +//nolint func (w with) createObjectProperties() *objectProperties { result := &objectProperties{} populated := false @@ -177,12 +177,12 @@ func (w with) createObjectProperties() *objectProperties { // use createOnly if you want to define properties that should be used when creating an object, but not // used when verifying the state of the transferred object. Generally you'll have no use for this. // Just use "with", and the test framework will do the right thing. -//nolint:unused +//nolint type createOnly struct { with } -//nolint:unused +//nolint func (createOnly) appliesToVerification() bool { return false } @@ -191,12 +191,12 @@ func (createOnly) appliesToVerification() bool { // Use verifyOnly if you need to specify some properties that should NOT be applied to the file when it is created, // but should be present on it after) the transfer -//nolint:unused +//nolint type verifyOnly struct { with } -//nolint:unused +//nolint func (verifyOnly) appliesToCreation() bool { return false } @@ -204,20 +204,20 @@ func (verifyOnly) appliesToCreation() bool { //// // use withDirStubMetadata to say that file should be created with metadata that says its a directory stub, and it should have zero size -//nolint:unused +//nolint type withDirStubMetadata struct{} -//nolint:unused +//nolint func (withDirStubMetadata) appliesToCreation() bool { return true } -//nolint:unused +//nolint func (withDirStubMetadata) appliesToVerification() bool { return true // since IF we ever do move these stubs, we expect them to retain their stub metadata } -//nolint:unused +//nolint func (withDirStubMetadata) createObjectProperties() *objectProperties { m := map[string]string{"hdi_isfolder": "true"} // special flag that says this file is a stub size := int64(0) @@ -235,27 +235,27 @@ func (withDirStubMetadata) createObjectProperties() *objectProperties { // It allows you to say what the error should be // TODO: as at 1 July 2020, we are not actually validating these. Should we? It could be nice. If we don't, // remove this type and its usages, and the expectedFailureProvider interface -//nolint:unused +//nolint type withError struct { msg string } -//nolint:unused +//nolint func (withError) appliesToCreation() bool { return false } -//nolint:unused +//nolint func (withError) appliesToVerification() bool { return false } -//nolint:unused +//nolint func (withError) createObjectProperties() *objectProperties { return nil // implementing withPropertyProvider is just to trick the type system into letting us pass this to f() and folder(). Our implementation doesn't DO anything } -//nolint:unused +//nolint func (w withError) expectedFailure() string { return w.msg } diff --git a/sddl/sddlHelper_linux.go b/sddl/sddlHelper_linux.go index 4db0e88af7..4d378b39e4 100644 --- a/sddl/sddlHelper_linux.go +++ b/sddl/sddlHelper_linux.go @@ -871,26 +871,6 @@ func aceRightsToString(aceRights uint32) string { return fmt.Sprintf("0x%x", aceRights) } -// Does the aceType correspond to an object ACE? -// We don't support object ACEs. -//nolint:unused,deadcode -func isObjectAce(aceType byte) bool { - switch aceType { - case ACCESS_ALLOWED_OBJECT_ACE_TYPE, - ACCESS_DENIED_OBJECT_ACE_TYPE, - SYSTEM_AUDIT_OBJECT_ACE_TYPE, - SYSTEM_ALARM_OBJECT_ACE_TYPE, - ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE, - ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE, - SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE, - SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE: - return true - - default: - return false - } -} - // Returns true for aceTypes that we support. // TODO: Allow SACL ACE type, conditional ACE Types. func isUnsupportedAceType(aceType byte) bool { diff --git a/ste/downloader-azureFiles_windows.go b/ste/downloader-azureFiles_windows.go index d703f3655e..13952ac782 100644 --- a/ste/downloader-azureFiles_windows.go +++ b/ste/downloader-azureFiles_windows.go @@ -78,7 +78,8 @@ func (*azureFilesDownloader) PutSMBProperties(sip ISMBPropertyBearingSourceInfoP err = windows.SetFileTime(fd, &smbCreationFileTime, nil, pLastWriteTime) if err != nil { - return fmt.Errorf("attempted update file times: %w", err) + err = fmt.Errorf("attempted update file times: %w", err) //nolint:staticcheck + // TODO: return here on error? or ignore } return nil } diff --git a/ste/mgr-JobPartMgr.go b/ste/mgr-JobPartMgr.go index f96a3b8a55..1ee840ba2a 100644 --- a/ste/mgr-JobPartMgr.go +++ b/ste/mgr-JobPartMgr.go @@ -525,7 +525,6 @@ func (jpm *jobPartMgr) createPipelines(ctx context.Context, sourceBlobToken azbl if jpm.credInfo.CredentialType == common.ECredentialType.Unknown() { credInfo = jpm.jobMgr.getInMemoryTransitJobState().CredentialInfo } - // TODO: Double check this fix var userAgent string if fromTo.From() == common.ELocation.S3() { userAgent = common.S3ImportUserAgent diff --git a/ste/sender.go b/ste/sender.go index dc9613a3a4..336df5a51f 100644 --- a/ste/sender.go +++ b/ste/sender.go @@ -116,10 +116,6 @@ type s2sCopier interface { GenerateCopyFunc(chunkID common.ChunkID, blockIndex int32, adjustedChunkSize int64, chunkIsWholeFile bool) chunkFunc } -///////////////////////////////////////////////////////////////////////////////////////////////// - -type s2sCopierFactory func(jptm IJobPartTransferMgr, srcInfoProvider IRemoteSourceInfoProvider, destination string, p pipeline.Pipeline, pacer pacer) (s2sCopier, error) - // Abstraction of the methods needed to upload one file to a remote location // /////////////////////////////////////////////////////////////////////////////////////////////// type uploader interface { From 7bb68ec671f0e07334b5faff11d702c7afd7f427 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 8 Feb 2023 11:05:49 -0800 Subject: [PATCH 42/50] more lint fix --- cmd/copy.go | 8 +++----- ste/sender-blobFolders.go | 6 ------ 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/cmd/copy.go b/cmd/copy.go index 646bebdba0..12dc8d8867 100644 --- a/cmd/copy.go +++ b/cmd/copy.go @@ -1379,11 +1379,9 @@ func (cca *CookedCopyCmdArgs) processRedirectionUpload(blobResource common.Resou // get source credential - if there is a token it will be used to get passed along our pipeline func (cca *CookedCopyCmdArgs) getSrcCredential(ctx context.Context, jpo *common.CopyJobPartOrderRequest) (common.CredentialInfo, error) { - srcCredInfo := common.CredentialInfo{} - var err error - var isPublic bool - if srcCredInfo, isPublic, err = GetCredentialInfoForLocation(ctx, cca.FromTo.From(), cca.Source.Value, cca.Source.SAS, true, cca.CpkOptions); err != nil { + srcCredInfo, isPublic, err := GetCredentialInfoForLocation(ctx, cca.FromTo.From(), cca.Source.Value, cca.Source.SAS, true, cca.CpkOptions) + if err != nil { return srcCredInfo, err // If S2S and source takes OAuthToken as its cred type (OR) source takes anonymous as its cred type, but it's not public and there's no SAS } else if cca.FromTo.IsS2S() && @@ -1393,7 +1391,7 @@ func (cca *CookedCopyCmdArgs) getSrcCredential(ctx context.Context, jpo *common. } if cca.Source.SAS != "" && cca.FromTo.IsS2S() && jpo.CredentialInfo.CredentialType == common.ECredentialType.OAuthToken() { - //glcm.Info("Authentication: If the source and destination accounts are in the same AAD tenant & the user/spn/msi has appropriate permissions on both, the source SAS token is not required and OAuth can be used round-trip.") + glcm.Info("Authentication: If the source and destination accounts are in the same AAD tenant & the user/spn/msi has appropriate permissions on both, the source SAS token is not required and OAuth can be used round-trip.") } if cca.FromTo.IsS2S() { diff --git a/ste/sender-blobFolders.go b/ste/sender-blobFolders.go index 4b49040559..225c5fc8cc 100644 --- a/ste/sender-blobFolders.go +++ b/ste/sender-blobFolders.go @@ -177,12 +177,6 @@ func (b *blobFolderSender) EnsureFolderExists() error { } return nil - - if err != nil { - return err - } - - return folderPropertiesSetInCreation{} } func (b *blobFolderSender) SetFolderProperties() error { From 38030f1cbd4600b3e87b79050170e27ca08f355e Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Wed, 8 Feb 2023 12:58:57 -0800 Subject: [PATCH 43/50] Addressed some review comments --- azbfs/zc_credential_anonymous.go | 15 --------- azbfs/zc_mmf_unix.go | 31 ------------------ azbfs/zc_mmf_windows.go | 42 ------------------------ cmd/credentialUtil.go | 5 --- cmd/zc_traverser_blob.go | 5 +-- cmd/zc_traverser_blobfs_account.go | 6 +++- common/logger.go | 51 ------------------------------ 7 files changed, 6 insertions(+), 149 deletions(-) delete mode 100644 azbfs/zc_mmf_unix.go delete mode 100644 azbfs/zc_mmf_windows.go diff --git a/azbfs/zc_credential_anonymous.go b/azbfs/zc_credential_anonymous.go index 1b330aeb3d..2e9c891c4d 100644 --- a/azbfs/zc_credential_anonymous.go +++ b/azbfs/zc_credential_anonymous.go @@ -12,21 +12,6 @@ type Credential interface { credentialMarker() } -//nolint:unused -type credentialFunc pipeline.FactoryFunc - -// New creates a credentialFunc object. -//nolint:unused -func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { - // Note: We are not deleting this "unused" code since this is a publicly exported function, we do not want to break - // anyone that has a dependency on the azbfs library (like blobfuse). - return f(next, po) -} - -// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. -//nolint:unused -func (credentialFunc) credentialMarker() {} - ////////////////////////////// // NewAnonymousCredential creates an anonymous credential for use with HTTP(S) requests that read public resource diff --git a/azbfs/zc_mmf_unix.go b/azbfs/zc_mmf_unix.go deleted file mode 100644 index 5a4349c199..0000000000 --- a/azbfs/zc_mmf_unix.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build linux || darwin -// +build linux darwin - -package azbfs - -import ( - "os" - "syscall" -) - -//nolint:unused -type mmf []byte - -//nolint:unused,deadcode -func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { - prot, flags := syscall.PROT_READ, syscall.MAP_SHARED // Assume read-only - if writable { - prot, flags = syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED - } - addr, err := syscall.Mmap(int(file.Fd()), offset, length, prot, flags) - return mmf(addr), err -} - -//nolint:unused -func (m *mmf) unmap() { - err := syscall.Munmap(*m) - *m = nil - if err != nil { - panic(err) - } -} diff --git a/azbfs/zc_mmf_windows.go b/azbfs/zc_mmf_windows.go deleted file mode 100644 index 7acbcff972..0000000000 --- a/azbfs/zc_mmf_windows.go +++ /dev/null @@ -1,42 +0,0 @@ -package azbfs - -import ( - "os" - "reflect" - "syscall" - "unsafe" -) - -//nolint:unused -type mmf []byte - -//nolint:deadcode,unused -func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { - prot, access := uint32(syscall.PAGE_READONLY), uint32(syscall.FILE_MAP_READ) // Assume read-only - if writable { - prot, access = uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) - } - maxSize := int64(offset + int64(length)) - hMMF, errno := syscall.CreateFileMapping(syscall.Handle(file.Fd()), nil, prot, uint32(maxSize>>32), uint32(maxSize&0xffffffff), nil) - if hMMF == 0 { - return nil, os.NewSyscallError("CreateFileMapping", errno) - } - defer syscall.CloseHandle(hMMF) //nolint:errcheck - addr, _ := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) - m := mmf{} - h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) - h.Data = addr - h.Len = length - h.Cap = h.Len - return m, nil -} - -//nolint:unused -func (m *mmf) unmap() { - addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) - *m = mmf{} - err := syscall.UnmapViewOfFile(addr) - if err != nil { - panic(err) - } -} diff --git a/cmd/credentialUtil.go b/cmd/credentialUtil.go index d92c315475..06948b92a8 100644 --- a/cmd/credentialUtil.go +++ b/cmd/credentialUtil.go @@ -357,11 +357,6 @@ func getAzureFileCredentialType() (common.CredentialType, error) { return common.ECredentialType.Anonymous(), nil } -// envVarCredentialType used for passing credential type into AzCopy through environment variable. -// Note: This is only used for internal integration, and not encouraged to be used directly. -//nolint:unused,deadcode,varcheck -const envVarCredentialType = "AZCOPY_CRED_TYPE" - var stashedEnvCredType = "" // GetCredTypeFromEnvVar tries to get credential type from environment variable defined by envVarCredentialType. diff --git a/cmd/zc_traverser_blob.go b/cmd/zc_traverser_blob.go index ec16f01df7..7718758a86 100644 --- a/cmd/zc_traverser_blob.go +++ b/cmd/zc_traverser_blob.go @@ -368,12 +368,12 @@ func (t *blobTraverser) parallelList(containerURL azblob.ContainerURL, container // initiate parallel scanning, starting at the root path workerContext, cancelWorkers := context.WithCancel(t.ctx) + defer cancelWorkers() cCrawled := parallel.Crawl(workerContext, searchPrefix+extraSearchPrefix, enumerateOneDir, EnumerationParallelism) for x := range cCrawled { item, workerError := x.Item() if workerError != nil { - cancelWorkers() return workerError } @@ -385,13 +385,10 @@ func (t *blobTraverser) parallelList(containerURL azblob.ContainerURL, container processErr := processIfPassedFilters(filters, object, processor) _, processErr = getProcessingError(processErr) if processErr != nil { - cancelWorkers() return processErr } } - // TODO : Double check this - cancelWorkers() return nil } diff --git a/cmd/zc_traverser_blobfs_account.go b/cmd/zc_traverser_blobfs_account.go index 6bb95eb238..9c13fb2e63 100644 --- a/cmd/zc_traverser_blobfs_account.go +++ b/cmd/zc_traverser_blobfs_account.go @@ -101,7 +101,11 @@ func (t *BlobFSAccountTraverser) listContainers() ([]string, error) { func (t *BlobFSAccountTraverser) Traverse(preprocessor objectMorpher, processor objectProcessor, filters []ObjectFilter) error { // listContainers will return the cached filesystem list if filesystems have already been listed by this traverser. - fsList, _ := t.listContainers() + fsList, err := t.listContainers() + + if err != nil { + return err + } for _, v := range fsList { fileSystemURL := t.accountURL.NewFileSystemURL(v).URL() diff --git a/common/logger.go b/common/logger.go index 7ffbbd66f5..6b30fda39e 100644 --- a/common/logger.go +++ b/common/logger.go @@ -52,57 +52,6 @@ type ILoggerResetable interface { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// TODO : Can this be deleted? -func NewAppLogger(minimumLevelToLog pipeline.LogLevel, logFileFolder string) ILoggerCloser { - // TODO: Put start date time in file Name - // TODO: log life time management. - // appLogFile, err := os.OpenFile(path.Join(logFileFolder, "azcopy.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) // TODO: Make constant for 0666 - // PanicIfErr(err) - return &appLogger{ - minimumLevelToLog: minimumLevelToLog, - // file: appLogFile, - // logger: log.New(appLogFile, "", log.LstdFlags|log.LUTC), - } -} - -type appLogger struct { - // maximum loglevel represents the maximum severity of log messages which can be logged to Job Log file. - // any message with severity higher than this will be ignored. - minimumLevelToLog pipeline.LogLevel // The maximum customer-desired log level for this job - // file *os.File // The job's log file - // logger *log.Logger // The Job's logger -} - -func (al *appLogger) ShouldLog(level pipeline.LogLevel) bool { - if level == pipeline.LogNone { - return false - } - return level <= al.minimumLevelToLog -} - -func (al *appLogger) CloseLog() { - // TODO consider delete completely to get rid of app logger - // al.logger.Println("Closing Log") - // err := al.file.Close() - // PanicIfErr(err) -} - -func (al *appLogger) Log(loglevel pipeline.LogLevel, msg string) { - // TODO consider delete completely to get rid of app logger - // TODO: see also the workaround in jobsAdmin.LogToJobLog - // TODO: if we DON'T delete, use azCopyLogSanitizer - // if al.ShouldLog(loglevel) { - // al.logger.Println(msg) - // } -} - -func (al *appLogger) Panic(err error) { - // TODO consider delete completely to get rid of app logger - // al.logger.Panic(err) -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - type jobLogger struct { // maximum loglevel represents the maximum severity of log messages which can be logged to Job Log file. // any message with severity higher than this will be ignored. From 6391c3a140e99f83cbd3681075d600c739902b09 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Tue, 21 Feb 2023 08:47:37 -0800 Subject: [PATCH 44/50] clean up nolint in e2etest --- .github/workflows/golangci-lint.yml | 2 +- e2etest/declarativeHelpers.go | 2 - e2etest/declarativeResourceManagers.go | 1 - e2etest/declarativeRunner.go | 2 - e2etest/declarativeScenario.go | 3 +- e2etest/declarativeTestFiles.go | 5 --- e2etest/declarativeWithPropertyProviders.go | 17 -------- e2etest/factory.go | 2 - e2etest/helpers.go | 48 --------------------- e2etest/scenario_helpers.go | 32 -------------- e2etest/scenario_os_helpers.go | 8 +--- e2etest/scenario_os_helpers_for_windows.go | 4 +- ste/downloader-blob.go | 2 +- ste/sourceInfoProvider-Blob.go | 4 +- 14 files changed, 9 insertions(+), 123 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index da5cbc3db1..e001b570b8 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -34,7 +34,7 @@ jobs: # working-directory: somedir # Optional: golangci-lint command line arguments. - args: --tests=false --max-issues-per-linter=0 --skip-files=azbfs/zz_generated_* --exclude=S1008 --max-same-issues=0 --timeout 5m0s + args: --tests=false --max-issues-per-linter=0 --skip-files=azbfs/zz_generated_* --skip-dirs=e2etest --exclude=S1008 --max-same-issues=0 --timeout 5m0s # Optional: show only new issues if it's a pull request. The default value is `false`. # only-new-issues: true \ No newline at end of file diff --git a/e2etest/declarativeHelpers.go b/e2etest/declarativeHelpers.go index 51fc32362c..d1443c83c1 100644 --- a/e2etest/declarativeHelpers.go +++ b/e2etest/declarativeHelpers.go @@ -52,7 +52,6 @@ func equals() comparison { return comparison{true} } -//nolint func notEquals() comparison { return comparison{false} } @@ -235,7 +234,6 @@ func (o Operation) includes(item Operation) bool { // /////////// -//nolint var eTestFromTo = TestFromTo{} // TestFromTo is similar to common/FromTo, except that it can have cases where one value represents many possibilities diff --git a/e2etest/declarativeResourceManagers.go b/e2etest/declarativeResourceManagers.go index 9710d84c7c..482ff7b9d0 100644 --- a/e2etest/declarativeResourceManagers.go +++ b/e2etest/declarativeResourceManagers.go @@ -44,7 +44,6 @@ type downloadContentOptions struct { downloadFileContentOptions } -//nolint type downloadBlobContentOptions struct { containerURL azblob.ContainerURL cpkInfo common.CpkInfo diff --git a/e2etest/declarativeRunner.go b/e2etest/declarativeRunner.go index b7dc10dab8..0fc1384ac8 100644 --- a/e2etest/declarativeRunner.go +++ b/e2etest/declarativeRunner.go @@ -44,11 +44,9 @@ var validCredTypesPerLocation = map[common.Location][]common.CredentialType{ common.ELocation.GCP(): {common.ECredentialType.GoogleAppCredentials()}, } -//nolint:unused,deadcode,varcheck var allCredentialTypes []common.CredentialType = nil // var oAuthOnly = []common.CredentialType{common.ECredentialType.OAuthToken()} -//nolint:unused,deadcode,varcheck var anonymousAuthOnly = []common.CredentialType{common.ECredentialType.Anonymous()} func getValidCredCombinationsForFromTo(fromTo common.FromTo, requestedCredentialTypesSrc, requestedCredentialTypesDst []common.CredentialType, accountTypes []AccountType) [][2]common.CredentialType { diff --git a/e2etest/declarativeScenario.go b/e2etest/declarativeScenario.go index 1e37ff19ec..dadbd3634b 100644 --- a/e2etest/declarativeScenario.go +++ b/e2etest/declarativeScenario.go @@ -256,7 +256,7 @@ func (s *scenario) runAzCopy(logDirectory string) { result, wasClean, err := r.ExecuteAzCopyCommand( s.operation, s.state.source.getParam(s.stripTopDir, needsSAS(s.credTypes[0]), tf.objectTarget), - s.state.dest.getParam(false, needsSAS(s.credTypes[1]), common.IffString(tf.destTarget != "", tf.destTarget, tf.objectTarget)), + s.state.dest.getParam(false, needsSAS(s.credTypes[1]), common.IffString(tf.destTarget != "", tf.destTarget, tf.objectTarget)), s.credTypes[0] == common.ECredentialType.OAuthToken() || s.credTypes[1] == common.ECredentialType.OAuthToken(), // needsOAuth afterStart, s.chToStdin, logDirectory) @@ -626,7 +626,6 @@ func (s *scenario) validateLastWriteTime(expected, actual *time.Time) { expected, actual)) } -//nolint func (s *scenario) validateSMBAttrs(expected, actual *uint32) { if expected == nil { // These properties were not explicitly stated for verification diff --git a/e2etest/declarativeTestFiles.go b/e2etest/declarativeTestFiles.go index 25222c9d74..a13e136d59 100644 --- a/e2etest/declarativeTestFiles.go +++ b/e2etest/declarativeTestFiles.go @@ -244,7 +244,6 @@ func (t *testObject) isRootFolder() bool { // It is to be used ONLY as parameters to the f() and folder() methods. // It is not used in other parts of the code, since the other parts use the testObject instances that are created // from -//nolint type withPropertyProvider interface { appliesToCreation() bool appliesToVerification() bool @@ -252,7 +251,6 @@ type withPropertyProvider interface { createObjectProperties() *objectProperties } -//nolint type expectedFailureProvider interface { expectedFailure() string } @@ -267,7 +265,6 @@ type expectedFailureProvider interface { // You can also add withFailureMessage{"message"} to files that are expected to fail, to specify what the expected // failure message will be in the log. // And withStubMetadata{} to supply the metadata that indicates that an object is a directory stub. -//nolint func f(n string, properties ...withPropertyProvider) *testObject { haveCreationProperties := false haveVerificationProperties := false @@ -327,7 +324,6 @@ func symlink(new, target string) *testObject { } // define a folder, in the expectations lists on a testFiles struct -//nolint func folder(n string, properties ...withPropertyProvider) *testObject { name := strings.TrimLeft(n, "/") result := f(name, properties...) @@ -369,7 +365,6 @@ type testFiles struct { shouldSkip []interface{} } -//nolint:unused func (tf testFiles) cloneShouldTransfers() testFiles { return testFiles{ defaultSize: tf.defaultSize, diff --git a/e2etest/declarativeWithPropertyProviders.go b/e2etest/declarativeWithPropertyProviders.go index 3e1aa906d2..2e7ec12885 100644 --- a/e2etest/declarativeWithPropertyProviders.go +++ b/e2etest/declarativeWithPropertyProviders.go @@ -33,7 +33,6 @@ import ( // This is the main property provider, and the only one most tests will ever need. // For ease of use, and conciseness in the tests, the members of this struct are NOT pointers. // Instead, default values in these structs are mapped to nils, inside the createObjectProperties method. -//nolint type with struct { size string // uses our standard K, M, G suffix @@ -58,19 +57,16 @@ type with struct { cpkByValue bool } -//nolint func (with) appliesToCreation() bool { return true } -//nolint func (with) appliesToVerification() bool { return true } // maps non-nillable fields (which are easy to create in the tests) to nillable ones, which have clearer meaning in // the resourceManagers. -//nolint func (w with) createObjectProperties() *objectProperties { result := &objectProperties{} populated := false @@ -183,12 +179,10 @@ func (w with) createObjectProperties() *objectProperties { // use createOnly if you want to define properties that should be used when creating an object, but not // used when verifying the state of the transferred object. Generally you'll have no use for this. // Just use "with", and the test framework will do the right thing. -// nolint type createOnly struct { with } -// nolint func (createOnly) appliesToVerification() bool { return false } @@ -197,12 +191,10 @@ func (createOnly) appliesToVerification() bool { // Use verifyOnly if you need to specify some properties that should NOT be applied to the file when it is created, // but should be present on it after) the transfer -//nolint type verifyOnly struct { with } -//nolint func (verifyOnly) appliesToCreation() bool { return false } @@ -210,20 +202,16 @@ func (verifyOnly) appliesToCreation() bool { //// // use withDirStubMetadata to say that file should be created with metadata that says its a directory stub, and it should have zero size -//nolint type withDirStubMetadata struct{} -//nolint func (withDirStubMetadata) appliesToCreation() bool { return true } -//nolint func (withDirStubMetadata) appliesToVerification() bool { return true // since IF we ever do move these stubs, we expect them to retain their stub metadata } -//nolint func (withDirStubMetadata) createObjectProperties() *objectProperties { m := map[string]string{"hdi_isfolder": "true"} // special flag that says this file is a stub size := int64(0) @@ -241,27 +229,22 @@ func (withDirStubMetadata) createObjectProperties() *objectProperties { // It allows you to say what the error should be // TODO: as at 1 July 2020, we are not actually validating these. Should we? It could be nice. If we don't, // remove this type and its usages, and the expectedFailureProvider interface -//nolint type withError struct { msg string } -//nolint func (withError) appliesToCreation() bool { return false } -//nolint func (withError) appliesToVerification() bool { return false } -//nolint func (withError) createObjectProperties() *objectProperties { return nil // implementing withPropertyProvider is just to trick the type system into letting us pass this to f() and folder(). Our implementation doesn't DO anything } -//nolint func (w withError) expectedFailure() string { return w.msg } diff --git a/e2etest/factory.go b/e2etest/factory.go index af28b0c43b..c8fb9c2b3b 100644 --- a/e2etest/factory.go +++ b/e2etest/factory.go @@ -255,7 +255,6 @@ func getTestName(t *testing.T) (pseudoSuite, test string) { // them, and determine the order in which they were created. // Will truncate the end of the test name, if there is not enough room for it, followed by the time-based suffix, // with a non-zero maxLen. -//nolint func generateName(c asserter, prefix string, maxLen int) string { name := c.CompactScenarioName() // don't want to just use test name here, because each test contains multiple scenarios with the declarative runner @@ -280,7 +279,6 @@ func (TestResourceNameGenerator) GenerateContainerName(c asserter) string { return uuid.New().String() } -//nolint func (TestResourceNameGenerator) generateBlobName(c asserter) string { return generateName(c, blobPrefix, 0) } diff --git a/e2etest/helpers.go b/e2etest/helpers.go index 20cefee30d..9e9e0cdf95 100644 --- a/e2etest/helpers.go +++ b/e2etest/helpers.go @@ -65,54 +65,44 @@ const ( ) // if S3_TESTS_OFF is set at all, S3 tests are disabled. -//nolint func isS3Disabled() bool { return strings.ToLower(os.Getenv("S3_TESTS_OFF")) != "" } -//nolint func skipIfS3Disabled(c asserter) { if isS3Disabled() { c.Skip("S3 testing is disabled for this unit test suite run.") } } -//nolint func generateContainerName(c asserter) string { return generateName(c, containerPrefix, 63) } -//nolint func generateBlobName(c asserter) string { return generateName(c, blobPrefix, 0) } -//nolint func generateBucketName(c asserter) string { return generateName(c, bucketPrefix, 63) } -//nolint func generateBucketNameWithCustomizedPrefix(c asserter, customizedPrefix string) string { return generateName(c, customizedPrefix, 63) } -//nolint func generateObjectName(c asserter) string { return generateName(c, objectPrefix, 0) } -//nolint func generateShareName(c asserter) string { return generateName(c, sharePrefix, 63) } -//nolint func generateFilesystemName(c asserter) string { return generateName(c, blobfsPrefix, 63) } -//nolint func getShareURL(c asserter, fsu azfile.ServiceURL) (share azfile.ShareURL, name string) { name = generateShareName(c) share = fsu.NewShareURL(name) @@ -120,17 +110,14 @@ func getShareURL(c asserter, fsu azfile.ServiceURL) (share azfile.ShareURL, name return share, name } -//nolint func generateAzureFileName(c asserter) string { return generateName(c, azureFilePrefix, 0) } -//nolint func generateBfsFileName(c asserter) string { return generateName(c, blobfsPrefix, 0) } -//nolint func getContainerURL(c asserter, bsu azblob.ServiceURL) (container azblob.ContainerURL, name string) { name = generateContainerName(c) container = bsu.NewContainerURL(name) @@ -138,7 +125,6 @@ func getContainerURL(c asserter, bsu azblob.ServiceURL) (container azblob.Contai return container, name } -//nolint func getFilesystemURL(c asserter, bfssu azbfs.ServiceURL) (filesystem azbfs.FileSystemURL, name string) { name = generateFilesystemName(c) filesystem = bfssu.NewFileSystemURL(name) @@ -146,7 +132,6 @@ func getFilesystemURL(c asserter, bfssu azbfs.ServiceURL) (filesystem azbfs.File return } -//nolint func getBlockBlobURL(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.BlockBlobURL, name string) { name = prefix + generateBlobName(c) blob = container.NewBlockBlobURL(name) @@ -154,7 +139,6 @@ func getBlockBlobURL(c asserter, container azblob.ContainerURL, prefix string) ( return blob, name } -//nolint func getBfsFileURL(c asserter, filesystemURL azbfs.FileSystemURL, prefix string) (file azbfs.FileURL, name string) { name = prefix + generateBfsFileName(c) file = filesystemURL.NewRootDirectoryURL().NewFileURL(name) @@ -162,7 +146,6 @@ func getBfsFileURL(c asserter, filesystemURL azbfs.FileSystemURL, prefix string) return } -//nolint func getAppendBlobURL(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.AppendBlobURL, name string) { name = generateBlobName(c) blob = container.NewAppendBlobURL(prefix + name) @@ -170,7 +153,6 @@ func getAppendBlobURL(c asserter, container azblob.ContainerURL, prefix string) return blob, name } -//nolint func getPageBlobURL(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.PageBlobURL, name string) { name = generateBlobName(c) blob = container.NewPageBlobURL(prefix + name) @@ -178,7 +160,6 @@ func getPageBlobURL(c asserter, container azblob.ContainerURL, prefix string) (b return } -//nolint func getAzureFileURL(c asserter, shareURL azfile.ShareURL, prefix string) (fileURL azfile.FileURL, name string) { name = prefix + generateAzureFileName(c) fileURL = shareURL.NewRootDirectoryURL().NewFileURL(name) @@ -186,7 +167,6 @@ func getAzureFileURL(c asserter, shareURL azfile.ShareURL, prefix string) (fileU return } -//nolint func getReaderToRandomBytes(n int) *bytes.Reader { r, _ := getRandomDataAndReader(n) return r @@ -200,7 +180,6 @@ func getRandomDataAndReader(n int) (*bytes.Reader, []byte) { return bytes.NewReader(data), data } -//nolint func createNewContainer(c asserter, bsu azblob.ServiceURL) (container azblob.ContainerURL, name string) { container, name = getContainerURL(c, bsu) @@ -210,7 +189,6 @@ func createNewContainer(c asserter, bsu azblob.ServiceURL) (container azblob.Con return container, name } -//nolint func createNewFilesystem(c asserter, bfssu azbfs.ServiceURL) (filesystem azbfs.FileSystemURL, name string) { filesystem, name = getFilesystemURL(c, bfssu) @@ -220,7 +198,6 @@ func createNewFilesystem(c asserter, bfssu azbfs.ServiceURL) (filesystem azbfs.F return } -//nolint func createNewBfsFile(c asserter, filesystem azbfs.FileSystemURL, prefix string) (file azbfs.FileURL, name string) { file, name = getBfsFileURL(c, filesystem, prefix) @@ -239,7 +216,6 @@ func createNewBfsFile(c asserter, filesystem azbfs.FileSystemURL, prefix string) return } -//nolint func createNewBlockBlob(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.BlockBlobURL, name string) { blob, name = getBlockBlobURL(c, container, prefix) @@ -252,7 +228,6 @@ func createNewBlockBlob(c asserter, container azblob.ContainerURL, prefix string return } -//nolint func createNewAzureShare(c asserter, fsu azfile.ServiceURL) (share azfile.ShareURL, name string) { share, name = getShareURL(c, fsu) @@ -262,7 +237,6 @@ func createNewAzureShare(c asserter, fsu azfile.ServiceURL) (share azfile.ShareU return share, name } -//nolint func createNewAzureFile(c asserter, share azfile.ShareURL, prefix string) (file azfile.FileURL, name string) { file, name = getAzureFileURL(c, share, prefix) @@ -287,7 +261,6 @@ func generateParentsForAzureFile(c asserter, fileURL azfile.FileURL) { c.AssertNoErr(err) } -//nolint func createNewAppendBlob(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.AppendBlobURL, name string) { blob, name = getAppendBlobURL(c, container, prefix) @@ -298,7 +271,6 @@ func createNewAppendBlob(c asserter, container azblob.ContainerURL, prefix strin return } -//nolint func createNewPageBlob(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.PageBlobURL, name string) { blob, name = getPageBlobURL(c, container, prefix) @@ -309,26 +281,22 @@ func createNewPageBlob(c asserter, container azblob.ContainerURL, prefix string) return } -//nolint func deleteContainer(c asserter, container azblob.ContainerURL) { resp, err := container.Delete(ctx, azblob.ContainerAccessConditions{}) c.AssertNoErr(err) c.Assert(resp.StatusCode(), equals(), 202) } -//nolint func deleteFilesystem(c asserter, filesystem azbfs.FileSystemURL) { resp, err := filesystem.Delete(ctx) c.AssertNoErr(err) c.Assert(resp.StatusCode(), equals(), 202) } -//nolint type createS3ResOptions struct { Location string } -//nolint func createS3ClientWithMinio(c asserter, o createS3ResOptions) (*minio.Client, error) { skipIfS3Disabled(c) @@ -347,7 +315,6 @@ func createS3ClientWithMinio(c asserter, o createS3ResOptions) (*minio.Client, e return s3Client, nil } -//nolint func createNewBucket(c asserter, client *minio.Client, o createS3ResOptions) string { bucketName := generateBucketName(c) err := client.MakeBucket(bucketName, o.Location) @@ -356,13 +323,11 @@ func createNewBucket(c asserter, client *minio.Client, o createS3ResOptions) str return bucketName } -//nolint func createNewBucketWithName(c asserter, client *minio.Client, bucketName string, o createS3ResOptions) { err := client.MakeBucket(bucketName, o.Location) c.AssertNoErr(err) } -//nolint func createNewObject(c asserter, client *minio.Client, bucketName string, prefix string) (objectKey string) { objectKey = prefix + generateObjectName(c) @@ -375,7 +340,6 @@ func createNewObject(c asserter, client *minio.Client, bucketName string, prefix return } -//nolint func deleteBucket(_ asserter, client *minio.Client, bucketName string, waitQuarterMinute bool) { // If we error out in this function, simply just skip over deleting the bucket. // Some of our buckets have become "ghost" buckets in the past. @@ -420,7 +384,6 @@ func deleteBucket(_ asserter, client *minio.Client, bucketName string, waitQuart } } -//nolint func cleanS3Account(c asserter, client *minio.Client) { buckets, err := client.ListBuckets() if err != nil { @@ -437,7 +400,6 @@ func cleanS3Account(c asserter, client *minio.Client) { time.Sleep(time.Minute) } -//nolint func cleanBlobAccount(c asserter, serviceURL azblob.ServiceURL) { marker := azblob.Marker{} for marker.NotDone() { @@ -453,7 +415,6 @@ func cleanBlobAccount(c asserter, serviceURL azblob.ServiceURL) { } } -//nolint func cleanFileAccount(c asserter, serviceURL azfile.ServiceURL) { marker := azfile.Marker{} for marker.NotDone() { @@ -471,7 +432,6 @@ func cleanFileAccount(c asserter, serviceURL azfile.ServiceURL) { time.Sleep(time.Minute) } -//nolint func getGenericCredentialForFile(accountType string) (*azfile.SharedKeyCredential, error) { accountNameEnvVar := accountType + "ACCOUNT_NAME" accountKeyEnvVar := accountType + "ACCOUNT_KEY" @@ -492,7 +452,6 @@ func deleteShare(c asserter, share azfile.ShareURL) { // those changes not being reflected yet, we will wait 30 seconds and try the test again. If it fails this time for any reason, // we fail the test. It is the responsibility of the the testImplFunc to determine which error string indicates the test should be retried. // There can only be one such string. All errors that cannot be due to this detail should be asserted and not returned as an error string. -//nolint func runTestRequiringServiceProperties(c asserter, bsu azblob.ServiceURL, code string, enableServicePropertyFunc func(asserter, azblob.ServiceURL), testImplFunc func(asserter, azblob.ServiceURL) error, @@ -508,7 +467,6 @@ func runTestRequiringServiceProperties(c asserter, bsu azblob.ServiceURL, code s } } -//nolint func getContainerURLWithSAS(c asserter, credential azblob.SharedKeyCredential, containerName string) azblob.ContainerURL { sasQueryParams, err := azblob.BlobSASSignatureValues{ Protocol: azblob.SASProtocolHTTPS, @@ -531,7 +489,6 @@ func getContainerURLWithSAS(c asserter, credential azblob.SharedKeyCredential, c return azblob.NewContainerURL(*fullURL, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})) } -//nolint func getBlobServiceURLWithSAS(c asserter, credential azblob.SharedKeyCredential) azblob.ServiceURL { sasQueryParams, err := azblob.AccountSASSignatureValues{ Protocol: azblob.SASProtocolHTTPS, @@ -554,7 +511,6 @@ func getBlobServiceURLWithSAS(c asserter, credential azblob.SharedKeyCredential) return azblob.NewServiceURL(*fullURL, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})) } -//nolint func getFileServiceURLWithSAS(c asserter, credential azfile.SharedKeyCredential) azfile.ServiceURL { sasQueryParams, err := azfile.AccountSASSignatureValues{ Protocol: azfile.SASProtocolHTTPS, @@ -574,7 +530,6 @@ func getFileServiceURLWithSAS(c asserter, credential azfile.SharedKeyCredential) return azfile.NewServiceURL(*fullURL, azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{})) } -//nolint func getShareURLWithSAS(c asserter, credential azfile.SharedKeyCredential, shareName string) azfile.ShareURL { sasQueryParams, err := azfile.FileSASSignatureValues{ Protocol: azfile.SASProtocolHTTPS, @@ -597,7 +552,6 @@ func getShareURLWithSAS(c asserter, credential azfile.SharedKeyCredential, share return azfile.NewShareURL(*fullURL, azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{})) } -//nolint func getAdlsServiceURLWithSAS(c asserter, credential azbfs.SharedKeyCredential) azbfs.ServiceURL { sasQueryParams, err := azbfs.AccountSASSignatureValues{ Protocol: azbfs.SASProtocolHTTPS, @@ -621,13 +575,11 @@ func getAdlsServiceURLWithSAS(c asserter, credential azbfs.SharedKeyCredential) } // check.v1 style "StringContains" checker -//nolint type stringContainsChecker struct { *chk.CheckerInfo } // Check -//nolint func (checker *stringContainsChecker) Check(params []interface{}, _ []string) (result bool, error string) { if len(params) < 2 { return false, "StringContains requires two parameters" diff --git a/e2etest/scenario_helpers.go b/e2etest/scenario_helpers.go index 7ce9ce4519..0686f1a7ac 100644 --- a/e2etest/scenario_helpers.go +++ b/e2etest/scenario_helpers.go @@ -52,7 +52,6 @@ const defaultStringFileSize = "1k" type scenarioHelper struct{} -//nolint var specialNames = []string{ "打麻将.txt", "wow such space so much space", @@ -68,7 +67,6 @@ var specialNames = []string{ } // note: this is to emulate the list-of-files flag -//nolint func (scenarioHelper) generateListOfFiles(c asserter, fileList []string) (path string) { parentDirName, err := os.MkdirTemp("", "AzCopyLocalTest") c.AssertNoErr(err) @@ -85,7 +83,6 @@ func (scenarioHelper) generateListOfFiles(c asserter, fileList []string) (path s return } -//nolint func (scenarioHelper) generateLocalDirectory(c asserter) (dstDirName string) { dstDirName, err := os.MkdirTemp("", "AzCopyLocalTest") c.AssertNoErr(err) @@ -218,7 +215,6 @@ func (s scenarioHelper) enumerateLocalProperties(a asserter, dirpath string) map return result } -//nolint func (s scenarioHelper) generateCommonRemoteScenarioForLocal(c asserter, dirPath string, prefix string) (fileList []string) { fileList = make([]string, 50) for i := 0; i < 10; i++ { @@ -242,7 +238,6 @@ func (s scenarioHelper) generateCommonRemoteScenarioForLocal(c asserter, dirPath return } -//nolint func (scenarioHelper) generateCommonRemoteScenarioForBlob(c asserter, containerURL azblob.ContainerURL, prefix string) (blobList []string) { // make 50 blobs with random names // 10 of them at the top level @@ -271,7 +266,6 @@ func (scenarioHelper) generateCommonRemoteScenarioForBlob(c asserter, containerU return } -//nolint func (scenarioHelper) generateCommonRemoteScenarioForBlobFS(c asserter, filesystemURL azbfs.FileSystemURL, prefix string) (pathList []string) { pathList = make([]string, 50) @@ -294,7 +288,6 @@ func (scenarioHelper) generateCommonRemoteScenarioForBlobFS(c asserter, filesyst return } -//nolint func (scenarioHelper) generateCommonRemoteScenarioForAzureFile(c asserter, shareURL azfile.ShareURL, prefix string) (fileList []string) { fileList = make([]string, 50) @@ -317,7 +310,6 @@ func (scenarioHelper) generateCommonRemoteScenarioForAzureFile(c asserter, share return } -//nolint func (s scenarioHelper) generateBlobContainersAndBlobsFromLists(c asserter, serviceURL azblob.ServiceURL, containerList []string, blobList []*testObject) { for _, containerName := range containerList { curl := serviceURL.NewContainerURL(containerName) @@ -333,7 +325,6 @@ func (s scenarioHelper) generateBlobContainersAndBlobsFromLists(c asserter, serv } } -//nolint func (s scenarioHelper) generateFileSharesAndFilesFromLists(c asserter, serviceURL azfile.ServiceURL, shareList []string, fileList []*testObject) { for _, shareName := range shareList { sURL := serviceURL.NewShareURL(shareName) @@ -348,7 +339,6 @@ func (s scenarioHelper) generateFileSharesAndFilesFromLists(c asserter, serviceU } } -//nolint func (s scenarioHelper) generateFilesystemsAndFilesFromLists(c asserter, serviceURL azbfs.ServiceURL, fsList []string, fileList []string, data string) { for _, filesystemName := range fsList { fsURL := serviceURL.NewFileSystemURL(filesystemName) @@ -359,7 +349,6 @@ func (s scenarioHelper) generateFilesystemsAndFilesFromLists(c asserter, service } } -//nolint func (s scenarioHelper) generateS3BucketsAndObjectsFromLists(c asserter, s3Client *minio.Client, bucketList []string, objectList []string, data string) { for _, bucketName := range bucketList { err := s3Client.MakeBucket(bucketName, "") @@ -591,7 +580,6 @@ func (s scenarioHelper) downloadBlobContent(a asserter, options downloadContentO return destData[:] } -//nolint func (scenarioHelper) generatePageBlobsFromList(c asserter, containerURL azblob.ContainerURL, blobList []string, data string) { for _, blobName := range blobList { // Create the blob (PUT blob) @@ -628,7 +616,6 @@ func (scenarioHelper) generatePageBlobsFromList(c asserter, containerURL azblob. time.Sleep(time.Millisecond * 1050) } -//nolint func (scenarioHelper) generateAppendBlobsFromList(c asserter, containerURL azblob.ContainerURL, blobList []string, data string) { for _, blobName := range blobList { // Create the blob (PUT blob) @@ -659,7 +646,6 @@ func (scenarioHelper) generateAppendBlobsFromList(c asserter, containerURL azblo time.Sleep(time.Millisecond * 1050) } -//nolint func (scenarioHelper) generateBlockBlobWithAccessTier(c asserter, containerURL azblob.ContainerURL, blobName string, accessTier azblob.AccessTierType) { blob := containerURL.NewBlockBlobURL(blobName) cResp, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), azblob.BlobHTTPHeaders{}, @@ -669,7 +655,6 @@ func (scenarioHelper) generateBlockBlobWithAccessTier(c asserter, containerURL a } // create the demanded objects -//nolint func (scenarioHelper) generateObjects(c asserter, client *minio.Client, bucketName string, objectList []string) { size := int64(len(objectDefaultData)) for _, objectName := range objectList { @@ -680,7 +665,6 @@ func (scenarioHelper) generateObjects(c asserter, client *minio.Client, bucketNa } // create the demanded files -//nolint func (scenarioHelper) generateFlatFiles(c asserter, shareURL azfile.ShareURL, fileList []string) { for _, fileName := range fileList { file := shareURL.NewRootDirectoryURL().NewFileURL(fileName) @@ -692,7 +676,6 @@ func (scenarioHelper) generateFlatFiles(c asserter, shareURL azfile.ShareURL, fi time.Sleep(time.Millisecond * 1050) } -//nolint func (scenarioHelper) generateCommonRemoteScenarioForS3(c asserter, client *minio.Client, bucketName string, prefix string, returnObjectListWithBucketName bool) (objectList []string) { // make 50 objects with random names // 10 of them at the top level @@ -981,7 +964,6 @@ func (s scenarioHelper) downloadFileContent(a asserter, options downloadContentO return destData } -//nolint func (scenarioHelper) generateBFSPathsFromList(c asserter, filesystemURL azbfs.FileSystemURL, fileList []string) { for _, bfsPath := range fileList { file := filesystemURL.NewRootDirectoryURL().NewFileURL(bfsPath) @@ -1013,7 +995,6 @@ func (scenarioHelper) convertListToMap(list []*testObject, converter func(*testO return lookupMap } -//nolint func (scenarioHelper) shaveOffPrefix(list []string, prefix string) []string { cleanList := make([]string, len(list)) for i, item := range list { @@ -1022,7 +1003,6 @@ func (scenarioHelper) shaveOffPrefix(list []string, prefix string) []string { return cleanList } -//nolint func (scenarioHelper) addPrefix(list []string, prefix string) []string { modifiedList := make([]string, len(list)) for i, item := range list { @@ -1031,7 +1011,6 @@ func (scenarioHelper) addPrefix(list []string, prefix string) []string { return modifiedList } -//nolint func (scenarioHelper) getRawContainerURLWithSAS(c asserter, containerName string) url.URL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) @@ -1040,7 +1019,6 @@ func (scenarioHelper) getRawContainerURLWithSAS(c asserter, containerName string return containerURLWithSAS.URL() } -//nolint func (scenarioHelper) getRawBlobURLWithSAS(c asserter, containerName string, blobName string) url.URL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) @@ -1050,7 +1028,6 @@ func (scenarioHelper) getRawBlobURLWithSAS(c asserter, containerName string, blo return blobURLWithSAS.URL() } -//nolint func (scenarioHelper) getRawBlobServiceURLWithSAS(c asserter) url.URL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) @@ -1059,7 +1036,6 @@ func (scenarioHelper) getRawBlobServiceURLWithSAS(c asserter) url.URL { return getBlobServiceURLWithSAS(c, *credential).URL() } -//nolint func (scenarioHelper) getRawFileServiceURLWithSAS(c asserter) url.URL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azfile.NewSharedKeyCredential(accountName, accountKey) @@ -1068,7 +1044,6 @@ func (scenarioHelper) getRawFileServiceURLWithSAS(c asserter) url.URL { return getFileServiceURLWithSAS(c, *credential).URL() } -//nolint func (scenarioHelper) getRawAdlsServiceURLWithSAS(c asserter) azbfs.ServiceURL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential := azbfs.NewSharedKeyCredential(accountName, accountKey) @@ -1076,7 +1051,6 @@ func (scenarioHelper) getRawAdlsServiceURLWithSAS(c asserter) azbfs.ServiceURL { return getAdlsServiceURLWithSAS(c, *credential) } -//nolint func (scenarioHelper) getBlobServiceURL(c asserter) azblob.ServiceURL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) @@ -1090,7 +1064,6 @@ func (scenarioHelper) getBlobServiceURL(c asserter) azblob.ServiceURL { return azblob.NewServiceURL(*fullURL, azblob.NewPipeline(credential, azblob.PipelineOptions{})) } -//nolint func (s scenarioHelper) getContainerURL(c asserter, containerName string) azblob.ContainerURL { serviceURL := s.getBlobServiceURL(c) containerURL := serviceURL.NewContainerURL(containerName) @@ -1098,7 +1071,6 @@ func (s scenarioHelper) getContainerURL(c asserter, containerName string) azblob return containerURL } -//nolint func (scenarioHelper) getRawS3AccountURL(c asserter, region string) url.URL { rawURL := fmt.Sprintf("https://s3%s.amazonaws.com", common.IffString(region == "", "", "-"+region)) @@ -1109,7 +1081,6 @@ func (scenarioHelper) getRawS3AccountURL(c asserter, region string) url.URL { } // TODO: Possibly add virtual-hosted-style and dual stack support. Currently use path style for testing. -//nolint func (scenarioHelper) getRawS3BucketURL(c asserter, region string, bucketName string) url.URL { rawURL := fmt.Sprintf("https://s3%s.amazonaws.com/%s", common.IffString(region == "", "", "-"+region), bucketName) @@ -1119,7 +1090,6 @@ func (scenarioHelper) getRawS3BucketURL(c asserter, region string, bucketName st return *fullURL } -//nolint func (scenarioHelper) getRawS3ObjectURL(c asserter, region string, bucketName string, objectName string) url.URL { rawURL := fmt.Sprintf("https://s3%s.amazonaws.com/%s/%s", common.IffString(region == "", "", "-"+region), bucketName, objectName) @@ -1129,7 +1099,6 @@ func (scenarioHelper) getRawS3ObjectURL(c asserter, region string, bucketName st return *fullURL } -//nolint func (scenarioHelper) getRawFileURLWithSAS(c asserter, shareName string, fileName string) url.URL { credential, err := getGenericCredentialForFile("") c.AssertNoErr(err) @@ -1138,7 +1107,6 @@ func (scenarioHelper) getRawFileURLWithSAS(c asserter, shareName string, fileNam return fileURLWithSAS.URL() } -//nolint func (scenarioHelper) getRawShareURLWithSAS(c asserter, shareName string) url.URL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azfile.NewSharedKeyCredential(accountName, accountKey) diff --git a/e2etest/scenario_os_helpers.go b/e2etest/scenario_os_helpers.go index 57038bd0c9..1f055fc4cb 100644 --- a/e2etest/scenario_os_helpers.go +++ b/e2etest/scenario_os_helpers.go @@ -1,4 +1,6 @@ +//go:build !windows // +build !windows + // Copyright © Microsoft // // Permission is hereby granted, free of charge, to any person obtaining a copy @@ -30,34 +32,28 @@ import ( type osScenarioHelper struct{} // set file attributes to test file -//nolint func (osScenarioHelper) setAttributesForLocalFile() error { panic("should never be called") } -//nolint func (osScenarioHelper) setAttributesForLocalFiles(c asserter, dirPath string, fileList []string, attrList []string) { panic("should never be called") } -//nolint func (osScenarioHelper) getFileDates(c asserter, filePath string) (createdTime, lastWriteTime time.Time) { panic("should never be called") } -//nolint func (osScenarioHelper) getFileAttrs(c asserter, filepath string) *uint32 { var ret uint32 return &ret } -//nolint func (osScenarioHelper) getFileSDDLString(c asserter, filepath string) *string { ret := "" return &ret } -//nolint func (osScenarioHelper) setFileSDDLString(c asserter, filepath string, sddldata string) { panic("should never be called") } diff --git a/e2etest/scenario_os_helpers_for_windows.go b/e2etest/scenario_os_helpers_for_windows.go index ad7fde9ae0..7a2206e355 100644 --- a/e2etest/scenario_os_helpers_for_windows.go +++ b/e2etest/scenario_os_helpers_for_windows.go @@ -39,7 +39,7 @@ import ( type osScenarioHelper struct{} // set file attributes to test file -func (osScenarioHelper) setAttributesForLocalFile(filePath string, attrList []string) error { //nolint:golint,unused +func (osScenarioHelper) setAttributesForLocalFile(filePath string, attrList []string) error { lpFilePath, err := syscall.UTF16PtrFromString(filePath) if err != nil { return err @@ -65,7 +65,7 @@ func (osScenarioHelper) setAttributesForLocalFile(filePath string, attrList []st return err } -func (s osScenarioHelper) setAttributesForLocalFiles(c asserter, dirPath string, fileList []string, attrList []string) { //nolint:golint,unused +func (s osScenarioHelper) setAttributesForLocalFiles(c asserter, dirPath string, fileList []string, attrList []string) { for _, fileName := range fileList { err := s.setAttributesForLocalFile(filepath.Join(dirPath, fileName), attrList) c.AssertNoErr(err) diff --git a/ste/downloader-blob.go b/ste/downloader-blob.go index 456b62a0b1..02b141f195 100644 --- a/ste/downloader-blob.go +++ b/ste/downloader-blob.go @@ -49,7 +49,7 @@ func (bd *blobDownloader) CreateSymlink(jptm IJobPartTransferMgr) error { return err } symsip := sip.(ISymlinkBearingSourceInfoProvider) // blob always implements this - symlinkInfo, err := symsip.ReadLink() + symlinkInfo, _ := symsip.ReadLink() // create the link err = os.Symlink(symlinkInfo, jptm.Info().Destination) diff --git a/ste/sourceInfoProvider-Blob.go b/ste/sourceInfoProvider-Blob.go index 3f21f71f90..7f3aa96447 100644 --- a/ste/sourceInfoProvider-Blob.go +++ b/ste/sourceInfoProvider-Blob.go @@ -21,7 +21,7 @@ package ste import ( - "io/ioutil" + "io" "strings" "time" @@ -57,7 +57,7 @@ func (p *blobSourceInfoProvider) ReadLink() (string, error) { return "", err } - symlinkBuf, err := ioutil.ReadAll(resp.Body(azblob.RetryReaderOptions{ + symlinkBuf, err := io.ReadAll(resp.Body(azblob.RetryReaderOptions{ MaxRetryRequests: 5, NotifyFailedRead: common.NewReadLogFunc(p.jptm, uri), })) From 1f5e00237b47bb53ad521ecab4a86940dfc12e0e Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Tue, 21 Feb 2023 08:55:32 -0800 Subject: [PATCH 45/50] add another lint flag --- ste/downloader-azureFiles_windows.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ste/downloader-azureFiles_windows.go b/ste/downloader-azureFiles_windows.go index 13952ac782..021b65b16c 100644 --- a/ste/downloader-azureFiles_windows.go +++ b/ste/downloader-azureFiles_windows.go @@ -78,7 +78,7 @@ func (*azureFilesDownloader) PutSMBProperties(sip ISMBPropertyBearingSourceInfoP err = windows.SetFileTime(fd, &smbCreationFileTime, nil, pLastWriteTime) if err != nil { - err = fmt.Errorf("attempted update file times: %w", err) //nolint:staticcheck + err = fmt.Errorf("attempted update file times: %w", err) //nolint:staticcheck,ineffassign // TODO: return here on error? or ignore } return nil From c79479ecffd60d4303c436603db3206d6d07e524 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Tue, 21 Feb 2023 10:13:47 -0800 Subject: [PATCH 46/50] Add log for seek on error in azbfs --- azbfs/zc_util_validate.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/azbfs/zc_util_validate.go b/azbfs/zc_util_validate.go index 149e6eee04..4841bf42ed 100644 --- a/azbfs/zc_util_validate.go +++ b/azbfs/zc_util_validate.go @@ -58,7 +58,10 @@ func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) int64 { if err != nil { panic("failed to seek stream") } - _, _ = body.Seek(0, io.SeekStart) + _, err = body.Seek(0, io.SeekStart) + if err != nil { + logf("error seeking stream (%s)", err.Error()) + } return count } From 6b93690ca6d7b97e88af2f614a2aa3d1ad287a9d Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Mon, 27 Feb 2023 10:20:36 -0800 Subject: [PATCH 47/50] Brought back isObjectAce --- sddl/sddlHelper_linux.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/sddl/sddlHelper_linux.go b/sddl/sddlHelper_linux.go index 4d378b39e4..db67bf608b 100644 --- a/sddl/sddlHelper_linux.go +++ b/sddl/sddlHelper_linux.go @@ -871,6 +871,25 @@ func aceRightsToString(aceRights uint32) string { return fmt.Sprintf("0x%x", aceRights) } +// Does the aceType correspond to an object ACE? +// We don't support object ACEs. +func isObjectAce(aceType byte) bool { + switch aceType { + case ACCESS_ALLOWED_OBJECT_ACE_TYPE, + ACCESS_DENIED_OBJECT_ACE_TYPE, + SYSTEM_AUDIT_OBJECT_ACE_TYPE, + SYSTEM_ALARM_OBJECT_ACE_TYPE, + ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE, + ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE, + SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE, + SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE: + return true + + default: + return false + } +} + // Returns true for aceTypes that we support. // TODO: Allow SACL ACE type, conditional ACE Types. func isUnsupportedAceType(aceType byte) bool { From 2435b8fd8857a2881f7202edc7b0e9df4d97e888 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Mon, 27 Feb 2023 10:23:24 -0800 Subject: [PATCH 48/50] add unused label --- sddl/sddlHelper_linux.go | 1 + 1 file changed, 1 insertion(+) diff --git a/sddl/sddlHelper_linux.go b/sddl/sddlHelper_linux.go index db67bf608b..9d82ed355e 100644 --- a/sddl/sddlHelper_linux.go +++ b/sddl/sddlHelper_linux.go @@ -873,6 +873,7 @@ func aceRightsToString(aceRights uint32) string { // Does the aceType correspond to an object ACE? // We don't support object ACEs. +//nolint:unused func isObjectAce(aceType byte) bool { switch aceType { case ACCESS_ALLOWED_OBJECT_ACE_TYPE, From 90aeba6c5750d9fdecb65d7eb70401cda2913d46 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Mon, 27 Feb 2023 10:27:53 -0800 Subject: [PATCH 49/50] fixed nolint tag --- sddl/sddlHelper_linux.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sddl/sddlHelper_linux.go b/sddl/sddlHelper_linux.go index 9d82ed355e..e4c586878b 100644 --- a/sddl/sddlHelper_linux.go +++ b/sddl/sddlHelper_linux.go @@ -873,7 +873,7 @@ func aceRightsToString(aceRights uint32) string { // Does the aceType correspond to an object ACE? // We don't support object ACEs. -//nolint:unused +//nolint:deadcode func isObjectAce(aceType byte) bool { switch aceType { case ACCESS_ALLOWED_OBJECT_ACE_TYPE, From ac396cd7b0e926a3689805c0b82a01408a44f667 Mon Sep 17 00:00:00 2001 From: Gauri Prasad Date: Mon, 27 Feb 2023 10:31:03 -0800 Subject: [PATCH 50/50] added unused --- sddl/sddlHelper_linux.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sddl/sddlHelper_linux.go b/sddl/sddlHelper_linux.go index e4c586878b..5d5384ee8f 100644 --- a/sddl/sddlHelper_linux.go +++ b/sddl/sddlHelper_linux.go @@ -873,7 +873,7 @@ func aceRightsToString(aceRights uint32) string { // Does the aceType correspond to an object ACE? // We don't support object ACEs. -//nolint:deadcode +//nolint:deadcode,unused func isObjectAce(aceType byte) bool { switch aceType { case ACCESS_ALLOWED_OBJECT_ACE_TYPE,