Skip to content

Commit a75f510

Browse files
committed
tmp
1 parent 1320784 commit a75f510

File tree

84 files changed

+659
-1060
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

84 files changed

+659
-1060
lines changed

WORKSPACE

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -167,14 +167,14 @@ load(
167167
go_download_sdk(
168168
name = "go_sdk",
169169
sdks = {
170-
"darwin_amd64": ("go1.23.12.darwin-amd64.tar.gz", "4917e3cd04f67975d1f29a97ca773689eec7192529b5ac3cb75d8f1e962a9e5b"),
171-
"darwin_arm64": ("go1.23.12.darwin-arm64.tar.gz", "52ed7281c4f7d72510ee52251ecdac79ac9b54328043233b798c319ee76c1661"),
172-
"linux_amd64": ("go1.23.12.linux-amd64.tar.gz", "1caa6570a988766707ca5cd42eb9a3f5f05082d9d1e322782cadb600332d859e"),
173-
"linux_arm64": ("go1.23.12.linux-arm64.tar.gz", "a148957a75f61c5c745097e657ce2f1742dd7d513075d445546817035a601650"),
174-
"linux_s390x": ("go1.23.12.linux-s390x.tar.gz", "458cc3169bdaf49c67e1e8e3c9ba532243256fab61761209f61869c0cc6a7ec1"),
175-
"windows_amd64": ("go1.23.12.windows-amd64.tar.gz", "57783b115bc5f4301d5aadbf723c8abce458e48f405ddcdaf6f94bb435b97dab"),
170+
"darwin_amd64": ("go1.23.12.darwin-amd64.tar.gz", "34457131f14281e21e25493d68e7519ccf26342d176dac36a4fc5dbf5ef122d9"),
171+
"darwin_arm64": ("go1.23.12.darwin-arm64.tar.gz", "30e0735ab9ccda203946536d24afe895abd1a1d3f35ad199f9768ccbdd5d60bc"),
172+
"linux_amd64": ("go1.23.12.linux-amd64.tar.gz", "0cac0ac930ecb9458b8a0a7969cbf735c5884d24c879c97eb28a8997eca986fa"),
173+
"linux_arm64": ("go1.23.12.linux-arm64.tar.gz", "528601fc8fb2c7e5ce8b7ae7651fd4fce2450bbef687beb96616edc5a9effa41"),
174+
"linux_s390x": ("go1.23.12.linux-s390x.tar.gz", "f3f11bbb731da6716776d1c29a2db3d1063fa0a9f8c00636e6a77793ba79e2e3"),
175+
"windows_amd64": ("go1.23.12.windows-amd64.tar.gz", "71b5b5b86b3a5ff9f124e21984abd874a6bfeb438f368de2eee7c60a25a19c94"),
176176
},
177-
urls = ["https://storage.googleapis.com/public-bazel-artifacts/go/20251015-212755/{}"],
177+
urls = ["https://storage.googleapis.com/public-bazel-artifacts/go/20250818-202337/{}"],
178178
version = "1.23.12",
179179
)
180180

@@ -658,8 +658,8 @@ go_download_sdk(
658658
# able to provide additional diagnostic information such as the expected version of OpenSSL.
659659
experiments = ["boringcrypto"],
660660
sdks = {
661-
"linux_amd64": ("go1.23.12fips.linux-amd64.tar.gz", "8c3472d399280c511e36f4abc6673d4464674f1e1291a33c39b0a2264fe19271"),
661+
"linux_amd64": ("go1.23.12fips.linux-amd64.tar.gz", "9c58fd7137b4c9d387a5c37fd2e728bc5d39357c7f8ba3358bcae513704c2983"),
662662
},
663-
urls = ["https://storage.googleapis.com/public-bazel-artifacts/go/20251015-212755/{}"],
663+
urls = ["https://storage.googleapis.com/public-bazel-artifacts/go/20250818-202337/{}"],
664664
version = "1.23.12fips",
665665
)

build/bazelutil/distdir_files.bzl

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1250,13 +1250,13 @@ DISTDIR_FILES = {
12501250
"https://storage.googleapis.com/public-bazel-artifacts/c-deps/20241202-211651/libproj_foreign.macos.20241202-211651.tar.gz": "b0a672f24748c24c941884c04413fd8e20afd6bca95e0451f3cfb15dd2cc1eb9",
12511251
"https://storage.googleapis.com/public-bazel-artifacts/c-deps/20241202-211651/libproj_foreign.macosarm.20241202-211651.tar.gz": "a35706bf0112e9652d6e41501113ee0f0f392b67e495979c867f71f17cf6ce7e",
12521252
"https://storage.googleapis.com/public-bazel-artifacts/c-deps/20241202-211651/libproj_foreign.windows.20241202-211651.tar.gz": "a33b64a784e856dbb3c84a52075227c48f0ff52a858639c3737a8911201d0c65",
1253-
"https://storage.googleapis.com/public-bazel-artifacts/go/20251015-212755/go1.23.12.darwin-amd64.tar.gz": "4917e3cd04f67975d1f29a97ca773689eec7192529b5ac3cb75d8f1e962a9e5b",
1254-
"https://storage.googleapis.com/public-bazel-artifacts/go/20251015-212755/go1.23.12.darwin-arm64.tar.gz": "52ed7281c4f7d72510ee52251ecdac79ac9b54328043233b798c319ee76c1661",
1255-
"https://storage.googleapis.com/public-bazel-artifacts/go/20251015-212755/go1.23.12.linux-amd64.tar.gz": "1caa6570a988766707ca5cd42eb9a3f5f05082d9d1e322782cadb600332d859e",
1256-
"https://storage.googleapis.com/public-bazel-artifacts/go/20251015-212755/go1.23.12.linux-arm64.tar.gz": "a148957a75f61c5c745097e657ce2f1742dd7d513075d445546817035a601650",
1257-
"https://storage.googleapis.com/public-bazel-artifacts/go/20251015-212755/go1.23.12.linux-s390x.tar.gz": "458cc3169bdaf49c67e1e8e3c9ba532243256fab61761209f61869c0cc6a7ec1",
1258-
"https://storage.googleapis.com/public-bazel-artifacts/go/20251015-212755/go1.23.12.windows-amd64.tar.gz": "57783b115bc5f4301d5aadbf723c8abce458e48f405ddcdaf6f94bb435b97dab",
1259-
"https://storage.googleapis.com/public-bazel-artifacts/go/20251015-212755/go1.23.12fips.linux-amd64.tar.gz": "8c3472d399280c511e36f4abc6673d4464674f1e1291a33c39b0a2264fe19271",
1253+
"https://storage.googleapis.com/public-bazel-artifacts/go/20250818-202337/go1.23.12.darwin-amd64.tar.gz": "34457131f14281e21e25493d68e7519ccf26342d176dac36a4fc5dbf5ef122d9",
1254+
"https://storage.googleapis.com/public-bazel-artifacts/go/20250818-202337/go1.23.12.darwin-arm64.tar.gz": "30e0735ab9ccda203946536d24afe895abd1a1d3f35ad199f9768ccbdd5d60bc",
1255+
"https://storage.googleapis.com/public-bazel-artifacts/go/20250818-202337/go1.23.12.linux-amd64.tar.gz": "0cac0ac930ecb9458b8a0a7969cbf735c5884d24c879c97eb28a8997eca986fa",
1256+
"https://storage.googleapis.com/public-bazel-artifacts/go/20250818-202337/go1.23.12.linux-arm64.tar.gz": "528601fc8fb2c7e5ce8b7ae7651fd4fce2450bbef687beb96616edc5a9effa41",
1257+
"https://storage.googleapis.com/public-bazel-artifacts/go/20250818-202337/go1.23.12.linux-s390x.tar.gz": "f3f11bbb731da6716776d1c29a2db3d1063fa0a9f8c00636e6a77793ba79e2e3",
1258+
"https://storage.googleapis.com/public-bazel-artifacts/go/20250818-202337/go1.23.12.windows-amd64.tar.gz": "71b5b5b86b3a5ff9f124e21984abd874a6bfeb438f368de2eee7c60a25a19c94",
1259+
"https://storage.googleapis.com/public-bazel-artifacts/go/20250818-202337/go1.23.12fips.linux-amd64.tar.gz": "9c58fd7137b4c9d387a5c37fd2e728bc5d39357c7f8ba3358bcae513704c2983",
12601260
"https://storage.googleapis.com/public-bazel-artifacts/java/railroad/rr-1.63-java8.zip": "d2791cd7a44ea5be862f33f5a9b3d40aaad9858455828ebade7007ad7113fb41",
12611261
"https://storage.googleapis.com/public-bazel-artifacts/js/rules_jest-v0.18.4.tar.gz": "d3bb833f74b8ad054e6bff5e41606ff10a62880cc99e4d480f4bdfa70add1ba7",
12621262
"https://storage.googleapis.com/public-bazel-artifacts/js/rules_js-v1.42.3.tar.gz": "2cfb3875e1231cefd3fada6774f2c0c5a99db0070e0e48ea398acbff7c6c765b",
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
961e9c3dd4c58115ca7466104670e5ddc79150e2
1+
309f11146b97839ffbba1ac245b7aa901e3dbbcb

dev

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ fi
88
set -euo pipefail
99

1010
# Bump this counter to force rebuilding `dev` on all machines.
11-
DEV_VERSION=25201
11+
DEV_VERSION=110
1212

1313
THIS_DIR=$(cd "$(dirname "$0")" && pwd)
1414
BINARY_DIR=$THIS_DIR/bin/dev-versions

pkg/backup/backup_test.go

Lines changed: 5 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -1294,12 +1294,8 @@ func TestRestoreCheckpointing(t *testing.T) {
12941294
defer jobs.TestingSetProgressThresholds()()
12951295

12961296
// totalEntries represents the number of entries to appear in the persisted frontier.
1297-
const totalEntries = 7
1298-
const entriesBeforePause = 4
1299-
processedSpans := struct {
1300-
syncutil.Mutex
1301-
spans roachpb.Spans
1302-
}{}
1297+
totalEntries := 7
1298+
entriesBeforePause := 4
13031299
entriesCount := 0
13041300
var alreadyPaused atomic.Bool
13051301
postResumeCount := 0
@@ -1310,7 +1306,7 @@ func TestRestoreCheckpointing(t *testing.T) {
13101306
knobs := base.TestingKnobs{
13111307
DistSQL: &execinfra.TestingKnobs{
13121308
BackupRestoreTestingKnobs: &sql.BackupRestoreTestingKnobs{
1313-
RunAfterProcessingRestoreSpanEntry: func(_ context.Context, entry *execinfrapb.RestoreSpanEntry) error {
1309+
RunAfterProcessingRestoreSpanEntry: func(_ context.Context, _ *execinfrapb.RestoreSpanEntry) error {
13141310
// Because the restore processor has several workers that
13151311
// concurrently send addsstable requests and because all workers will
13161312
// wait on the lock below, when one flush gets blocked on the
@@ -1322,20 +1318,12 @@ func TestRestoreCheckpointing(t *testing.T) {
13221318
// checking if the job was paused in each request before it began
13231319
// waiting for the lock.
13241320
wasPausedBeforeWaiting := alreadyPaused.Load()
1325-
13261321
mu.Lock()
13271322
defer mu.Unlock()
13281323
if entriesCount == entriesBeforePause {
13291324
close(waitForProgress)
13301325
<-blockDBRestore
1331-
} else if entriesCount < entriesBeforePause {
1332-
// We save all spans from before the pause to ensure that they have
1333-
// been checkpointed and saved in the job progress.
1334-
processedSpans.Lock()
1335-
defer processedSpans.Unlock()
1336-
processedSpans.spans = append(processedSpans.spans, entry.Span)
13371326
}
1338-
13391327
entriesCount++
13401328
if wasPausedBeforeWaiting {
13411329
postResumeCount++
@@ -1378,25 +1366,8 @@ func TestRestoreCheckpointing(t *testing.T) {
13781366
// Pause the job after some progress has been logged.
13791367
<-waitForProgress
13801368

1381-
// To ensure that progress has been persisted, we wait until all processed
1382-
// spans from before the pause are stored in the job progress.
1383-
testutils.SucceedsSoon(t, func() error {
1384-
jobProgress := jobutils.GetJobProgress(t, sqlDB, jobID)
1385-
checkpointedSpans := jobProgress.GetRestore().Checkpoint
1386-
checkpointedSpanGroup := roachpb.SpanGroup{}
1387-
for _, span := range checkpointedSpans {
1388-
checkpointedSpanGroup.Add(span.Span)
1389-
}
1390-
1391-
processedSpans.Lock()
1392-
defer processedSpans.Unlock()
1393-
for _, span := range processedSpans.spans {
1394-
if !checkpointedSpanGroup.Encloses(span) {
1395-
return errors.Newf("span %s was processed but not saved in job progress yet")
1396-
}
1397-
}
1398-
return nil
1399-
})
1369+
// To ensure that progress gets persisted, sleep well beyond the test only job update interval.
1370+
time.Sleep(time.Second)
14001371

14011372
sqlDB.Exec(t, `PAUSE JOB $1`, &jobID)
14021373
jobutils.WaitForJobToPause(t, sqlDB, jobID)
@@ -7586,8 +7557,6 @@ func TestBackupExportRequestTimeout(t *testing.T) {
75867557
defer leaktest.AfterTest(t)()
75877558
defer log.Scope(t).Close(t)
75887559

7589-
skip.UnderDeadlock(t)
7590-
75917560
allowRequest := make(chan struct{})
75927561
defer close(allowRequest)
75937562

pkg/backup/backupsink/file_sst_sink_test.go

Lines changed: 16 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ import (
99
"bytes"
1010
"context"
1111
"fmt"
12-
"math/rand"
1312
"reflect"
1413
"strconv"
1514
"strings"
@@ -123,15 +122,6 @@ func TestFileSSTSinkExtendOneFile(t *testing.T) {
123122
require.Equal(t, 1, len(progDetails.Files))
124123
}
125124

126-
func randomValue(n int64) []byte {
127-
// Create random data so that it does not compress well.
128-
b := make([]byte, n)
129-
for i := range b {
130-
b[i] = byte(rand.Int())
131-
}
132-
return b
133-
}
134-
135125
// TestFileSSTSinkWrite tests the contents of flushed files and the internal
136126
// unflushed files of the FileSSTSink under different write scenarios. Each test
137127
// writes a sequence of exportedSpans into a FileSSTSink. The test then verifies
@@ -143,12 +133,6 @@ func TestFileSSTSinkWrite(t *testing.T) {
143133
defer log.Scope(t).Close(t)
144134

145135
ctx := context.Background()
146-
testTargetFileSize := int64(10 << 10)
147-
148-
// Override the fileSpanByteLimit so we can test going over the limit without
149-
// needing large buffers that may oom the test node.
150-
defer func(oldLimit int64) { fileSpanByteLimit = oldLimit }(fileSpanByteLimit)
151-
fileSpanByteLimit = testTargetFileSize / 2
152136

153137
type testCase struct {
154138
name string
@@ -161,7 +145,8 @@ func TestFileSSTSinkWrite(t *testing.T) {
161145
//
162146
// TODO (msbutler): we currently don't test expected error handling. If this
163147
// is non-empty, we just skip the test.
164-
errorExplanation string
148+
errorExplanation string
149+
noSSTSizeOverride bool
165150
}
166151

167152
for _, tt := range []testCase{{name: "out-of-order-key-boundary",
@@ -293,7 +278,7 @@ func TestFileSSTSinkWrite(t *testing.T) {
293278
{
294279
name: "size-flush",
295280
exportSpans: []ExportedSpan{
296-
newExportedSpanBuilder("a", "c").withKVs([]kvAndTS{{key: "a", timestamp: 10, value: randomValue(testTargetFileSize)}, {key: "b", timestamp: 10}}).build(),
281+
newExportedSpanBuilder("a", "c").withKVs([]kvAndTS{{key: "a", timestamp: 10, value: make([]byte, 20<<20)}, {key: "b", timestamp: 10}}).build(),
297282
newExportedSpanBuilder("d", "f").withKVs([]kvAndTS{{key: "d", timestamp: 10}, {key: "e", timestamp: 10}}).build(),
298283
},
299284
flushedSpans: []roachpb.Spans{
@@ -307,7 +292,7 @@ func TestFileSSTSinkWrite(t *testing.T) {
307292
// No flush can occur between two versions of the same key. Further, we must combine flushes which split a row.
308293
name: "no-size-flush-if-mid-mvcc",
309294
exportSpans: []ExportedSpan{
310-
newRawExportedSpanBuilder(s2k0("a"), s2k0("c"), s2k0("c")).withKVs([]kvAndTS{{key: "a", timestamp: 10, value: randomValue(testTargetFileSize)}, {key: "c", timestamp: 10}}).build(),
295+
newRawExportedSpanBuilder(s2k0("a"), s2k0("c"), s2k0("c")).withKVs([]kvAndTS{{key: "a", timestamp: 10, value: make([]byte, 20<<20)}, {key: "c", timestamp: 10}}).build(),
311296
newRawExportedSpanBuilder(s2k0("c"), s2k0("f"), s2k0("f")).withKVs([]kvAndTS{{key: "c", timestamp: 8}, {key: "f", timestamp: 10}}).build(),
312297
},
313298
flushedSpans: []roachpb.Spans{},
@@ -320,9 +305,9 @@ func TestFileSSTSinkWrite(t *testing.T) {
320305
name: "no-size-flush-mid-col-family",
321306
exportSpans: []ExportedSpan{
322307
newRawExportedSpanBuilder(s2kWithColFamily("c", 0), s2kWithColFamily("c", 1), s2kWithColFamily("c", 1)).withKVs([]kvAndTS{
323-
{key: "c", timestamp: 10, value: randomValue(testTargetFileSize)}}).build(),
308+
{key: "c", timestamp: 10, value: make([]byte, 20<<20)}}).build(),
324309
newRawExportedSpanBuilder(s2kWithColFamily("c", 1), s2kWithColFamily("c", 2), s2kWithColFamily("c", 2)).withKVs([]kvAndTS{
325-
{key: "c", timestamp: 10, value: randomValue(testTargetFileSize)}}).buildWithEncoding(func(stingedKey string) roachpb.Key { return s2kWithColFamily(stingedKey, 1) }),
310+
{key: "c", timestamp: 10, value: make([]byte, 20<<20)}}).buildWithEncoding(func(stingedKey string) roachpb.Key { return s2kWithColFamily(stingedKey, 1) }),
326311
},
327312
flushedSpans: []roachpb.Spans{},
328313
unflushedSpans: []roachpb.Spans{
@@ -333,7 +318,7 @@ func TestFileSSTSinkWrite(t *testing.T) {
333318
// It's safe to flush at the range boundary.
334319
name: "size-flush-at-range-boundary",
335320
exportSpans: []ExportedSpan{
336-
newRawExportedSpanBuilder(s2k("a"), s2k("d"), s2k("d")).withKVs([]kvAndTS{{key: "a", timestamp: 10, value: randomValue(testTargetFileSize)}, {key: "c", timestamp: 10}}).build(),
321+
newRawExportedSpanBuilder(s2k("a"), s2k("d"), s2k("d")).withKVs([]kvAndTS{{key: "a", timestamp: 10, value: make([]byte, 20<<20)}, {key: "c", timestamp: 10}}).build(),
337322
},
338323
flushedSpans: []roachpb.Spans{
339324
{{Key: s2k("a"), EndKey: s2k("d")}},
@@ -347,7 +332,7 @@ func TestFileSSTSinkWrite(t *testing.T) {
347332
// row between two column families.
348333
name: "trim-resume-key",
349334
exportSpans: []ExportedSpan{
350-
newRawExportedSpanBuilder(s2k0("a"), s2k0("c"), s2k("c")).withKVs([]kvAndTS{{key: "a", timestamp: 10, value: randomValue(testTargetFileSize)}}).build(),
335+
newRawExportedSpanBuilder(s2k0("a"), s2k0("c"), s2k("c")).withKVs([]kvAndTS{{key: "a", timestamp: 10, value: make([]byte, 20<<20)}}).build(),
351336
},
352337
flushedSpans: []roachpb.Spans{
353338
{{Key: s2k0("a"), EndKey: s2k("c")}},
@@ -359,23 +344,24 @@ func TestFileSSTSinkWrite(t *testing.T) {
359344
// even if the next span's start key matches the file's end key.
360345
name: "file-size-cut",
361346
exportSpans: []ExportedSpan{
362-
newExportedSpanBuilder("a", "c").withKVs([]kvAndTS{{key: "a", timestamp: 10, value: randomValue(fileSpanByteLimit)}, {key: "b", timestamp: 10}}).build(),
347+
newExportedSpanBuilder("a", "c").withKVs([]kvAndTS{{key: "a", timestamp: 10, value: make([]byte, 64<<20)}, {key: "b", timestamp: 10}}).build(),
363348
newExportedSpanBuilder("c", "f").withKVs([]kvAndTS{{key: "c", timestamp: 10}, {key: "e", timestamp: 10}}).build(),
364349
},
365350
flushedSpans: []roachpb.Spans{},
366351
unflushedSpans: []roachpb.Spans{
367352
{{Key: s2k0("a"), EndKey: s2k0("c")}, {Key: s2k0("c"), EndKey: s2k0("f")}},
368353
},
354+
noSSTSizeOverride: true,
369355
},
370356
{
371357
// No file cut can occur between the two column families of the same row,
372358
// even if the file is sufficiently large to get cut.
373359
name: "no-file-cut-mid-col-family",
374360
exportSpans: []ExportedSpan{
375361
newRawExportedSpanBuilder(s2kWithColFamily("c", 0), s2kWithColFamily("c", 1), s2kWithColFamily("c", 1)).withKVs([]kvAndTS{
376-
{key: "c", timestamp: 10, value: randomValue(testTargetFileSize)}}).build(),
362+
{key: "c", timestamp: 10, value: make([]byte, 65<<20)}}).build(),
377363
newRawExportedSpanBuilder(s2kWithColFamily("c", 1), s2kWithColFamily("c", 2), s2kWithColFamily("c", 2)).withKVs([]kvAndTS{
378-
{key: "c", timestamp: 10, value: randomValue(testTargetFileSize / 2)}}).buildWithEncoding(func(stingedKey string) roachpb.Key { return s2kWithColFamily(stingedKey, 1) }),
364+
{key: "c", timestamp: 10, value: make([]byte, 20<<20)}}).buildWithEncoding(func(stingedKey string) roachpb.Key { return s2kWithColFamily(stingedKey, 1) }),
379365
},
380366
flushedSpans: []roachpb.Spans{},
381367
unflushedSpans: []roachpb.Spans{
@@ -391,7 +377,9 @@ func TestFileSSTSinkWrite(t *testing.T) {
391377
return
392378
}
393379
st := cluster.MakeTestingClusterSettings()
394-
targetFileSize.Override(ctx, &st.SV, testTargetFileSize)
380+
if !tt.noSSTSizeOverride {
381+
targetFileSize.Override(ctx, &st.SV, 10<<10)
382+
}
395383

396384
sink, store := fileSSTSinkTestSetup(t, st, elide)
397385
defer func() {
@@ -546,7 +534,7 @@ func TestFileSSTSinkStats(t *testing.T) {
546534
sinkStats{hlc.Timestamp{WallTime: 10}, 3, 3, 0, 0, 0, 1}},
547535
{
548536
// Write an exported span that comes after all spans so far. This span has enough data for a size flush.
549-
newExportedSpanBuilder("g", "h").withKVs([]kvAndTS{{key: "g", timestamp: 10, value: randomValue(10 << 10)}}).build(),
537+
newExportedSpanBuilder("g", "h").withKVs([]kvAndTS{{key: "g", timestamp: 10, value: make([]byte, 20<<20)}}).build(),
550538
sinkStats{hlc.Timestamp{WallTime: 0}, 0, 4, 1, 0, 1, 1}},
551539
{
552540
// Write the first exported span after the flush.

pkg/backup/restore_job.go

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1301,11 +1301,6 @@ func createImportingDescriptors(
13011301

13021302
return nil
13031303
})
1304-
1305-
var opts []multiregion.MakeRegionConfigOption
1306-
if desc.RegionConfig.SecondaryRegion != "" {
1307-
opts = append(opts, multiregion.WithSecondaryRegion(desc.RegionConfig.SecondaryRegion))
1308-
}
13091304
regionConfig := multiregion.MakeRegionConfig(
13101305
regionNames,
13111306
desc.RegionConfig.PrimaryRegion,
@@ -1314,7 +1309,6 @@ func createImportingDescriptors(
13141309
desc.RegionConfig.Placement,
13151310
regionTypeDesc.TypeDesc().RegionConfig.SuperRegions,
13161311
regionTypeDesc.TypeDesc().RegionConfig.ZoneConfigExtensions,
1317-
opts...,
13181312
)
13191313
if err := sql.ApplyZoneConfigFromDatabaseRegionConfig(
13201314
ctx,

pkg/backup/show.go

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -741,16 +741,11 @@ func backupShowerDefault(
741741
case catalog.DatabaseDescriptor:
742742
descriptorType = "database"
743743
if desc.IsMultiRegion() {
744-
if mrEnum := typeIDToTypeDescriptor[desc.GetRegionConfig().RegionEnumID]; mrEnum != nil {
745-
// The enum may not be in the backup, for example in a table
746-
// level backup. Jury is out for whether databases should be
747-
// shown in table level backups.
748-
regions, err := showRegions(mrEnum, desc.GetName())
749-
if err != nil {
750-
return nil, errors.Wrapf(err, "cannot generate regions column")
751-
}
752-
regionsDatum = nullIfEmpty(regions)
744+
regions, err := showRegions(typeIDToTypeDescriptor[desc.GetRegionConfig().RegionEnumID], desc.GetName())
745+
if err != nil {
746+
return nil, errors.Wrapf(err, "cannot generate regions column")
753747
}
748+
regionsDatum = nullIfEmpty(regions)
754749
}
755750
case catalog.SchemaDescriptor:
756751
descriptorType = "schema"

0 commit comments

Comments
 (0)