Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master' into go-1.22
Browse files Browse the repository at this point in the history
  • Loading branch information
csuzhangxc committed Sep 24, 2024
2 parents 34a9244 + dca333c commit 850cbc2
Show file tree
Hide file tree
Showing 81 changed files with 1,252 additions and 478 deletions.
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ TEST_COVER_PACKAGES := go list ./cmd/... ./pkg/... $(foreach mod, $(GO_SUBMODULE

# NOTE: coverage report generated for E2E tests (with `-c`) may not stable, see
# https://github.com/golang/go/issues/23883#issuecomment-381766556
GO_TEST := $(GO) test -cover -covermode=atomic -coverpkg=$$($(TEST_COVER_PACKAGES))
GO_TEST := CGO_ENABLED=0 $(GO) test -cover -covermode=atomic -coverpkg=$$($(TEST_COVER_PACKAGES))

default: build

Expand Down Expand Up @@ -160,7 +160,7 @@ endif
cp -r charts/tidb-operator tests/images/e2e
cp -r charts/tidb-drainer tests/images/e2e
cp -r manifests tests/images/e2e
docker build -t "${DOCKER_REPO}/tidb-operator-e2e:${IMAGE_TAG}" tests/images/e2e
docker build -t "${DOCKER_REPO}/tidb-operator-e2e:${IMAGE_TAG}" --build-arg=TARGETARCH=$(GOARCH) tests/images/e2e

e2e-build: ## Build binaries for test
$(GO_BUILD) -ldflags '$(LDFLAGS)' -o tests/images/e2e/bin/ginkgo github.com/onsi/ginkgo/ginkgo
Expand Down
11 changes: 11 additions & 0 deletions cmd/backup-manager/app/backup/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
errorutils "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
)
Expand Down Expand Up @@ -101,6 +102,13 @@ func (bm *Manager) ProcessBackup() error {
return errorutils.NewAggregate(errs)
}

crData, err := json.Marshal(backup)
if err != nil {
klog.Errorf("failed to marshal backup %v to json, err: %v", backup, err)
} else {
klog.Infof("start to process backup: %s", string(crData))
}

// we treat snapshot backup as restarted if its status is not scheduled when backup pod just start to run
// we will clean backup data before run br command
if backup.Spec.Mode == v1alpha1.BackupModeSnapshot && (backup.Status.Phase != v1alpha1.BackupScheduled || v1alpha1.IsBackupRestart(backup)) {
Expand Down Expand Up @@ -132,6 +140,9 @@ func (bm *Manager) ProcessBackup() error {
return bm.performBackup(ctx, backup.DeepCopy(), nil)
}

klog.Infof("start to connect to tidb server (%s:%d) as the .spec.from field is specified",
backup.Spec.From.Host, backup.Spec.From.Port)

// validate and create from db
var db *sql.DB
db, err = bm.validateAndCreateFromDB(ctx, backup.DeepCopy())
Expand Down
8 changes: 8 additions & 0 deletions cmd/backup-manager/app/export/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
errorutils "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
)
Expand Down Expand Up @@ -99,6 +100,13 @@ func (bm *BackupManager) ProcessBackup() error {
return errorutils.NewAggregate(errs)
}

crData, err := json.Marshal(backup)
if err != nil {
klog.Errorf("failed to marshal backup %v to json, err: %v", backup, err)
} else {
klog.Infof("start to process backup: %s", string(crData))
}

reason, err := bm.setOptions(backup)
if err != nil {
errs = append(errs, err)
Expand Down
8 changes: 8 additions & 0 deletions cmd/backup-manager/app/import/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
errorutils "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/klog/v2"
)

Expand Down Expand Up @@ -87,6 +88,13 @@ func (rm *RestoreManager) ProcessRestore() error {
return errorutils.NewAggregate(errs)
}

crData, err := json.Marshal(restore)
if err != nil {
klog.Errorf("failed to marshal restore %v to json, err: %s", restore, err)
} else {
klog.Infof("start to process restore: %s", string(crData))
}

rm.setOptions(restore)

return rm.performRestore(ctx, restore.DeepCopy())
Expand Down
11 changes: 11 additions & 0 deletions cmd/backup-manager/app/restore/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
errorutils "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
)
Expand Down Expand Up @@ -96,12 +97,22 @@ func (rm *Manager) ProcessRestore() error {
return fmt.Errorf("no br config in %s", rm)
}

crData, err := json.Marshal(restore)
if err != nil {
klog.Errorf("failed to marshal restore %v to json, err: %s", restore, err)
} else {
klog.Infof("start to process restore: %s", string(crData))
}

if restore.Spec.To == nil {
return rm.performRestore(ctx, restore.DeepCopy(), nil)
}

rm.setOptions(restore)

klog.Infof("start to connect to tidb server (%s:%d) as the .spec.to field is specified",
restore.Spec.To.Host, restore.Spec.To.Port)

var db *sql.DB
var dsn string
err = wait.PollImmediate(constants.PollInterval, constants.CheckTimeout, func() (done bool, err error) {
Expand Down
2 changes: 1 addition & 1 deletion cmd/http-service/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM pingcap/pingcap-base:v1
FROM ghcr.io/pingcap-qe/bases/pingcap-base:v1.9.1

ARG TARGETARCH
RUN dnf install -y tzdata bind-utils && dnf clean all
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ spec:
pingcap.com/aws-local-ssd: "true"
serviceAccountName: local-storage-admin
containers:
- image: "quay.io/external_storage/local-volume-provisioner:v2.3.2"
- image: "registry.k8s.io/sig-storage/local-volume-provisioner:v2.6.0"
name: provisioner
securityContext:
privileged: true
Expand All @@ -58,7 +58,7 @@ spec:
fieldRef:
fieldPath: metadata.namespace
- name: JOB_CONTAINER_IMAGE
value: "quay.io/external_storage/local-volume-provisioner:v2.3.2"
value: "registry.k8s.io/sig-storage/local-volume-provisioner:v2.6.0"
resources:
requests:
cpu: 100m
Expand Down
36 changes: 36 additions & 0 deletions docs/api-references/docs.md
Original file line number Diff line number Diff line change
Expand Up @@ -3394,6 +3394,30 @@ azblob service account credentials.</p>
</tr>
<tr>
<td>
<code>storageAccount</code></br>
<em>
string
</em>
</td>
<td>
<p>StorageAccount is the storage account of the azure blob storage
If this field is set, then use this to set backup-manager env
Otherwise retrieve the storage account from secret</p>
</td>
</tr>
<tr>
<td>
<code>sasToken</code></br>
<em>
string
</em>
</td>
<td>
<p>SasToken is the sas token of the storage account</p>
</td>
</tr>
<tr>
<td>
<code>prefix</code></br>
<em>
string
Expand Down Expand Up @@ -12321,6 +12345,18 @@ int
</tr>
<tr>
<td>
<code>initWaitTime</code></br>
<em>
int
</em>
</td>
<td>
<p>Wait time before pd get started. This wait time is to allow the new DNS record to propagate,
ensuring that the PD DNS resolves to the same IP address as the pod.</p>
</td>
</tr>
<tr>
<td>
<code>mode</code></br>
<em>
string
Expand Down
8 changes: 4 additions & 4 deletions examples/advanced/tidb-cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -824,7 +824,7 @@ spec:
# app.kubernetes.io/component: pump
# annotations:
# node.kubernetes.io/instance-type: some-vm-type
# tolerations: {}
# tolerations: []
# configUpdateStrategy: RollingUpdate
# statefulSetUpdateStrategy: RollingUpdate
# podSecurityContext: {}
Expand Down Expand Up @@ -868,7 +868,7 @@ spec:
# app.kubernetes.io/component: ticdc
# annotations:
# node.kubernetes.io/instance-type: some-vm-type
# tolerations: {}
# tolerations: []
# configUpdateStrategy: RollingUpdate
# statefulSetUpdateStrategy: RollingUpdate
# podSecurityContext: {}
Expand Down Expand Up @@ -918,7 +918,7 @@ spec:
# app.kubernetes.io/component: tiflash
# annotations:
# node.kubernetes.io/instance-type: some-vm-type
# tolerations: {}
# tolerations: []
# configUpdateStrategy: RollingUpdate
# statefulSetUpdateStrategy: RollingUpdate
# podSecurityContext: {}
Expand Down Expand Up @@ -970,7 +970,7 @@ spec:
# # configure the configuration file for TiFlash Proxy process
# proxy: |
# [security]
# cert-allowed-cn = "CNNAME"
# cert-allowed-cn = ["CNNAME"]
# # TopologySpreadConstraints for pod scheduling, will overwrite the cluster level spread constraints setting
# # Ref: pkg/apis/pingcap/v1alpha1/types.go#TopologySpreadConstraint
# topologySpreadConstraints:
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/backup-azblob.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,6 @@ metadata:
spec:
cleanPolicy: Delete
# backupType: full
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb-host}
# port: ${tidb-port}
# user: ${tidb-user}
# secretName: backup-basic-tidb-secret
br:
cluster: basic
clusterNamespace: default
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/backup-ebs-local.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,6 @@ spec:
cleanPolicy: Delete
backupType: full
backupMode: volume-snapshot
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb-host}
# port: ${tidb-port}
# user: ${tidb-user}
# secretName: backup-basic-tidb-secret
toolImage: localhost:5000/pingcap/br:latest
br:
cluster: basic
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/backup-ebs-minio.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,6 @@ spec:
cleanPolicy: Delete
backupType: full
backupMode: volume-snapshot
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb-host}
# port: ${tidb-port}
# user: ${tidb-user}
# secretName: backup-basic-tidb-secret
toolImage: localhost:5000/pingcap/br:latest
br:
cluster: basic
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/backup-local.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,6 @@ metadata:
spec:
cleanPolicy: Delete
# backupType: full
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb-host}
# port: ${tidb-port}
# user: ${tidb-user}
# secretName: backup-basic-tidb-secret
br:
cluster: basic
clusterNamespace: default
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/backup-nfs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,6 @@ metadata:
spec:
cleanPolicy: Delete
# backupType: full
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb-host}
# port: ${tidb-port}
# user: ${tidb-user}
# secretName: backup-basic-tidb-secret
br:
cluster: basic
clusterNamespace: default
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/backup-schedule-azblob.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,6 @@ spec:
# maxReservedTime: "2m"
schedule: "*/1 * * * *"
backupTemplate:
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb_host}
# port: ${tidb_port}
# user: ${tidb_user}
# secretName: backup-demo1-tidb-secret
cleanPolicy: Delete
br:
cluster: basic
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/backup-schedule-nfs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,6 @@ spec:
# maxReservedTime: "2m"
schedule: "*/1 * * * *"
backupTemplate:
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb_host}
# port: ${tidb_port}
# user: ${tidb_user}
# secretName: backup-demo1-tidb-secret
cleanPolicy: Delete
br:
cluster: basic
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/restore-azblob.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,6 @@ metadata:
namespace: default
spec:
# backupType: full
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# to:
# host: ${tidb_host}
# port: ${tidb_port}
# user: ${tidb_user}
# secretName: restore-demo2-tidb-secret
br:
cluster: basic
clusterNamespace: default
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/restore-ebs-minio.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,6 @@ metadata:
spec:
backupType: full
backupMode: volume-snapshot
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb-host}
# port: ${tidb-port}
# user: ${tidb-user}
# secretName: backup-basic-tidb-secret
toolImage: localhost:5000/pingcap/br:latest
br:
cluster: basic
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/restore-local.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,6 @@ metadata:
namespace: default
spec:
# backupType: full
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb-host}
# port: ${tidb-port}
# user: ${tidb-user}
# secretName: backup-basic-tidb-secret
br:
cluster: basic
clusterNamespace: default
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/restore-nfs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,6 @@ metadata:
namespace: default
spec:
# backupType: full
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# to:
# host: ${tidb_host}
# port: ${tidb_port}
# user: ${tidb_user}
# secretName: restore-demo2-tidb-secret
br:
cluster: basic
clusterNamespace: default
Expand Down
Loading

0 comments on commit 850cbc2

Please sign in to comment.