diff --git a/Makefile b/Makefile index 26ca00eec8..12e05ad474 100644 --- a/Makefile +++ b/Makefile @@ -71,7 +71,7 @@ cover: smoke: $(SUDO) NYDUS_BUILDER=${NYDUS_BUILDER} NYDUS_NYDUSD=${NYDUS_NYDUSD} ${GO_EXECUTABLE_PATH} test -race -v ./tests - $(SUDO) NYDUS_BUILDER=${NYDUS_BUILDER} NYDUS_NYDUSD=${NYDUS_NYDUSD} ${GO_EXECUTABLE_PATH} test -race -v ./tests -args -fs-version=6 + $(SUDO) NYDUS_BUILDER=${NYDUS_BUILDER} NYDUS_NYDUSD=${NYDUS_NYDUSD} ${GO_EXECUTABLE_PATH} test -race -v ./tests .PHONY: integration integration: diff --git a/pkg/converter/convert_unix.go b/pkg/converter/convert_unix.go index a218cfab93..4ace1d321a 100644 --- a/pkg/converter/convert_unix.go +++ b/pkg/converter/convert_unix.go @@ -113,14 +113,14 @@ func unpackOciTar(ctx context.Context, dst string, reader io.Reader) error { } // Unpack a Nydus formatted tar stream into a directory. -func unpackNydusTar(bootDst, blobDst string, ra content.ReaderAt) error { +func unpackNydusBlob(bootDst, blobDst string, ra content.ReaderAt) error { boot, err := os.OpenFile(bootDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) if err != nil { return errors.Wrapf(err, "write to bootstrap %s", bootDst) } defer boot.Close() - if err = unpackBootstrapFromNydusTar(ra, boot); err != nil { + if err = unpackFileFromNydusBlob(ra, bootstrapNameInTar, boot); err != nil { return errors.Wrap(err, "unpack bootstrap from nydus") } @@ -130,134 +130,76 @@ func unpackNydusTar(bootDst, blobDst string, ra content.ReaderAt) error { } defer blob.Close() - if err = unpackBlobFromNydusTar(ra, blob); err != nil { + if err = unpackFileFromNydusBlob(ra, blobNameInTar, blob); err != nil { return errors.Wrap(err, "unpack blob from nydus") } return nil } -// Unpack the bootstrap from nydus formatted tar stream (blob + bootstrap). +// Unpack the file from nydus formatted tar stream. // The nydus formatted tar stream is a tar-like structure that arranges the // data as follows: // -// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` -func unpackBootstrapFromNydusTar(ra content.ReaderAt, target io.Writer) error { - cur := ra.Size() - reader := newSeekReader(ra) - +// `data | tar_header | data | tar_header` +func unpackFileFromNydusBlob(ra content.ReaderAt, targetName string, target io.Writer) error { const headerSize = 512 - // Seek from tail to head of nydus formatted tar stream to find nydus - // bootstrap data. - for { - if headerSize > cur { - return fmt.Errorf("invalid tar format at pos %d", cur) - } - - // Try to seek to the part of tar header. - var err error - cur, err = reader.Seek(cur-headerSize, io.SeekCurrent) - if err != nil { - return errors.Wrapf(err, "seek to %d for tar header", cur-headerSize) - } - - tr := tar.NewReader(reader) - // Parse tar header. - hdr, err := tr.Next() - if err != nil { - return errors.Wrap(err, "parse tar header") - } - - if hdr.Name == bootstrapNameInTar { - // Try to seek to the part of tar data (bootstrap_data). - if hdr.Size > cur { - return fmt.Errorf("invalid tar format at pos %d", cur) - } - bootstrapOffset := cur - hdr.Size - _, err = reader.Seek(bootstrapOffset, io.SeekStart) - if err != nil { - return errors.Wrap(err, "seek to bootstrap data offset") - } - - // Copy tar data (bootstrap_data) to provided target writer. - if _, err := io.CopyN(target, reader, hdr.Size); err != nil { - return errors.Wrap(err, "copy bootstrap data to reader") - } - - return nil - } - - if cur == hdr.Size { - break - } + if headerSize > ra.Size() { + return fmt.Errorf("invalid nydus tar size %d", ra.Size()) } - return fmt.Errorf("can't find bootstrap in nydus tar") -} - -// Unpack the blob from nydus formatted tar stream (blob + bootstrap). -// The nydus formatted tar stream is a tar-like structure that arranges the -// data as follows: -// -// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` -func unpackBlobFromNydusTar(ra content.ReaderAt, target io.Writer) error { - cur := ra.Size() + cur := ra.Size() - headerSize reader := newSeekReader(ra) - const headerSize = 512 - - // Seek from tail to head of nydus formatted tar stream to find nydus - // bootstrap data. + // Seek from tail to head of nydus formatted tar stream to find + // target data. for { - if headerSize > cur { - break - } - - // Try to seek to the part of tar header. - var err error - cur, err = reader.Seek(cur-headerSize, io.SeekStart) + // Try to seek the part of tar header. + _, err := reader.Seek(cur, io.SeekStart) if err != nil { - return errors.Wrapf(err, "seek to %d for tar header", cur-headerSize) + return errors.Wrapf(err, "seek %d for nydus tar header", cur) } - tr := tar.NewReader(reader) // Parse tar header. + tr := tar.NewReader(reader) hdr, err := tr.Next() if err != nil { - return errors.Wrap(err, "parse tar header") + return errors.Wrap(err, "parse nydus tar header") } - if hdr.Name == bootstrapNameInTar { - if hdr.Size > cur { - return fmt.Errorf("invalid tar format at pos %d", cur) - } - cur, err = reader.Seek(cur-hdr.Size, io.SeekStart) - if err != nil { - return errors.Wrap(err, "seek to bootstrap data offset") - } - } else if hdr.Name == blobNameInTar { - if hdr.Size > cur { - return fmt.Errorf("invalid tar format at pos %d", cur) - } + if cur < hdr.Size { + return errors.Wrapf(err, "invalid nydus tar data, name %s, size %d", hdr.Name, hdr.Size) + } + + if hdr.Name == targetName { + // Try to seek the part of tar data. _, err = reader.Seek(cur-hdr.Size, io.SeekStart) if err != nil { - return errors.Wrap(err, "seek to blob data offset") + return errors.Wrap(err, "seek target data offset") } + + // Copy tar data to provided target writer. if _, err := io.CopyN(target, reader, hdr.Size); err != nil { - return errors.Wrap(err, "copy blob data to reader") + return errors.Wrap(err, "copy target data to reader") } + return nil } + + cur = cur - hdr.Size - headerSize + if cur < 0 { + break + } } - return nil + return fmt.Errorf("can't find target %s in nydus tar", targetName) } // Pack converts an OCI tar stream to nydus formatted stream with a tar-like // structure that arranges the data as follows: // -// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` +// `data | tar_header | data | tar_header` // // The caller should write OCI tar stream into the returned `io.WriteCloser`, // then the Pack method will write the nydus formatted stream to `dest` @@ -351,22 +293,25 @@ func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption) } defer os.RemoveAll(workDir) + getBootstrapPath := func(layerIdx int) string { + digestHex := layers[layerIdx].Digest.Hex() + return filepath.Join(workDir, digestHex) + } + eg, _ := errgroup.WithContext(ctx) sourceBootstrapPaths := []string{} for idx := range layers { - sourceBootstrapPaths = append(sourceBootstrapPaths, filepath.Join(workDir, layers[idx].Digest.Hex())) + sourceBootstrapPaths = append(sourceBootstrapPaths, getBootstrapPath(idx)) eg.Go(func(idx int) func() error { return func() error { - layer := layers[idx] - // Use the hex hash string of whole tar blob as the bootstrap name. - bootstrap, err := os.Create(filepath.Join(workDir, layer.Digest.Hex())) + bootstrap, err := os.Create(getBootstrapPath(idx)) if err != nil { return errors.Wrap(err, "create source bootstrap") } defer bootstrap.Close() - if err := unpackBootstrapFromNydusTar(layer.ReaderAt, bootstrap); err != nil { + if err := unpackFileFromNydusBlob(layers[idx].ReaderAt, bootstrapNameInTar, bootstrap); err != nil { return errors.Wrap(err, "unpack nydus tar") } @@ -428,7 +373,7 @@ func Unpack(ctx context.Context, ra content.ReaderAt, dest io.Writer, opt Unpack defer os.RemoveAll(workDir) bootPath, blobPath := filepath.Join(workDir, bootstrapNameInTar), filepath.Join(workDir, blobNameInTar) - if err = unpackNydusTar(bootPath, blobPath, ra); err != nil { + if err = unpackNydusBlob(bootPath, blobPath, ra); err != nil { return errors.Wrap(err, "unpack nydus tar") } @@ -726,33 +671,33 @@ func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descript layers := []Layer{} var chainID digest.Digest - for _, blobDesc := range descs { - ra, err := cs.ReaderAt(ctx, blobDesc) + for _, nydusBlobDesc := range descs { + ra, err := cs.ReaderAt(ctx, nydusBlobDesc) if err != nil { - return nil, nil, errors.Wrapf(err, "get reader for blob %q", blobDesc.Digest) + return nil, nil, errors.Wrapf(err, "get reader for blob %q", nydusBlobDesc.Digest) } defer ra.Close() layers = append(layers, Layer{ - Digest: blobDesc.Digest, + Digest: nydusBlobDesc.Digest, ReaderAt: ra, }) if chainID == "" { - chainID = identity.ChainID([]digest.Digest{blobDesc.Digest}) + chainID = identity.ChainID([]digest.Digest{nydusBlobDesc.Digest}) } else { - chainID = identity.ChainID([]digest.Digest{chainID, blobDesc.Digest}) + chainID = identity.ChainID([]digest.Digest{chainID, nydusBlobDesc.Digest}) } } // Merge all nydus bootstraps into a final nydus bootstrap. pr, pw := io.Pipe() - blobDigestChan := make(chan []digest.Digest, 1) + originalBlobDigestChan := make(chan []digest.Digest, 1) go func() { defer pw.Close() - blobDigests, err := Merge(ctx, layers, pw, opt) + originalBlobDigests, err := Merge(ctx, layers, pw, opt) if err != nil { pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap")) } - blobDigestChan <- blobDigests + originalBlobDigestChan <- originalBlobDigests }() // Compress final nydus bootstrap to tar.gz and write into content store. @@ -791,8 +736,9 @@ func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descript return nil, nil, errors.Wrap(err, "get info from content store") } - blobDigests := <-blobDigestChan + blobDigests := <-originalBlobDigestChan blobDescs := []ocispec.Descriptor{} + for _, blobDigest := range blobDigests { blobInfo, err := cs.Info(ctx, blobDigest) if err != nil { diff --git a/tests/converter_test.go b/tests/converter_test.go index fbdfeeb1ef..af355fae34 100644 --- a/tests/converter_test.go +++ b/tests/converter_test.go @@ -12,7 +12,6 @@ import ( "context" "crypto/rand" "errors" - "flag" "fmt" "io" "io/fs" @@ -38,8 +37,6 @@ import ( const envNydusdPath = "NYDUS_NYDUSD" -var fsVersion = flag.String("fs-version", "5", "specifie the fs version for test") - func hugeString(mb int) string { var buf strings.Builder size := mb * 1024 * 1024 @@ -126,16 +123,7 @@ func writeToFile(t *testing.T, reader io.Reader, fileName string) { require.NoError(t, err) } -var expectedFileTree = map[string]string{ - "dir-1": "", - "dir-1/file-2": "lower-file-2", - "dir-2": "", - "dir-2/file-1": hugeString(3), - "dir-2/file-2": "upper-file-2", - "dir-2/file-3": "upper-file-3", -} - -func buildChunkDictTar(t *testing.T) io.ReadCloser { +func buildChunkDictTar(t *testing.T, n int) io.ReadCloser { pr, pw := io.Pipe() tw := tar.NewWriter(pw) @@ -143,9 +131,10 @@ func buildChunkDictTar(t *testing.T) io.ReadCloser { defer pw.Close() writeDirToTar(t, tw, "dir-1") - writeFileToTar(t, tw, "dir-1/file-1", "lower-file-1") - writeFileToTar(t, tw, "dir-1/file-2", "lower-file-2") - writeFileToTar(t, tw, "dir-1/file-3", "lower-file-3") + + for i := 1; i < n; i++ { + writeFileToTar(t, tw, fmt.Sprintf("dir-1/file-%d", i), fmt.Sprintf("lower-file-%d", i)) + } require.NoError(t, tw.Close()) }() @@ -153,7 +142,9 @@ func buildChunkDictTar(t *testing.T) io.ReadCloser { return pr } -func buildOCILowerTar(t *testing.T) io.ReadCloser { +func buildOCILowerTar(t *testing.T, n int) (io.ReadCloser, map[string]string) { + fileTree := map[string]string{} + pr, pw := io.Pipe() tw := tar.NewWriter(pw) @@ -161,19 +152,31 @@ func buildOCILowerTar(t *testing.T) io.ReadCloser { defer pw.Close() writeDirToTar(t, tw, "dir-1") - writeFileToTar(t, tw, "dir-1/file-1", "lower-file-1") - writeFileToTar(t, tw, "dir-1/file-2", "lower-file-2") + fileTree["dir-1"] = "" + + for i := 1; i < n; i++ { + writeFileToTar(t, tw, fmt.Sprintf("dir-1/file-%d", i), fmt.Sprintf("lower-file-%d", i)) + fileTree[fmt.Sprintf("dir-1/file-%d", i)] = fmt.Sprintf("lower-file-%d", i) + } writeDirToTar(t, tw, "dir-2") + fileTree["dir-2"] = "" + writeFileToTar(t, tw, "dir-2/file-1", "lower-file-1") + fileTree["dir-2/file-1"] = "lower-file-1" require.NoError(t, tw.Close()) }() - return pr + return pr, fileTree } -func buildOCIUpperTar(t *testing.T, teePath string) io.ReadCloser { +func buildOCIUpperTar(t *testing.T, teePath string, lowerFileTree map[string]string) (io.ReadCloser, map[string]string) { + if lowerFileTree == nil { + lowerFileTree = map[string]string{} + } + + hugeStr := hugeString(3) pr, pw := io.Pipe() go func() { @@ -189,21 +192,37 @@ func buildOCIUpperTar(t *testing.T, teePath string) io.ReadCloser { } writeDirToTar(t, tw, "dir-1") + lowerFileTree["dir-1"] = "" + writeFileToTar(t, tw, "dir-1/.wh.file-1", "") + delete(lowerFileTree, "dir-1/file-1") writeDirToTar(t, tw, "dir-2") + lowerFileTree["dir-2"] = "" + writeFileToTar(t, tw, "dir-2/.wh..wh..opq", "") - writeFileToTar(t, tw, "dir-2/file-1", expectedFileTree["dir-2/file-1"]) + for k := range lowerFileTree { + if strings.HasPrefix(k, "dir-2/") { + delete(lowerFileTree, k) + } + } + + writeFileToTar(t, tw, "dir-2/file-1", hugeStr) + lowerFileTree["dir-2/file-1"] = hugeStr + writeFileToTar(t, tw, "dir-2/file-2", "upper-file-2") + lowerFileTree["dir-2/file-2"] = "upper-file-2" + writeFileToTar(t, tw, "dir-2/file-3", "upper-file-3") + lowerFileTree["dir-2/file-3"] = "upper-file-3" require.NoError(t, tw.Close()) }() - return pr + return pr, lowerFileTree } -func convertLayer(t *testing.T, source io.ReadCloser, chunkDict, workDir string, fsVersion string) (string, digest.Digest) { +func packLayer(t *testing.T, source io.ReadCloser, chunkDict, workDir string, fsVersion string) (string, digest.Digest) { var data bytes.Buffer writer := io.Writer(&data) @@ -247,20 +266,13 @@ func unpackLayer(t *testing.T, workDir string, ra content.ReaderAt) (string, dig return tarPath, digest } -func verify(t *testing.T, workDir string) { +func verify(t *testing.T, workDir string, expectedFileTree map[string]string) { mountDir := filepath.Join(workDir, "mnt") blobDir := filepath.Join(workDir, "blobs") nydusdPath := os.Getenv(envNydusdPath) if nydusdPath == "" { nydusdPath = "nydusd" } - mode := "cached" - digestValidate := true - // Currently v6 does not support digestValidate, and only direct mode is supported - if *fsVersion == "6" { - mode = "direct" - digestValidate = false - } config := NydusdConfig{ EnablePrefetch: true, NydusdPath: nydusdPath, @@ -271,8 +283,8 @@ func verify(t *testing.T, workDir string) { BlobCacheDir: filepath.Join(workDir, "cache"), APISockPath: filepath.Join(workDir, "nydusd-api.sock"), MountPath: mountDir, - Mode: mode, - DigestValidate: digestValidate, + Mode: "direct", + DigestValidate: false, } nydusd, err := NewNydusd(config) @@ -316,11 +328,11 @@ func verify(t *testing.T, workDir string) { require.Equal(t, expectedFileTree, actualFileTree) } -func buildChunkDict(t *testing.T, workDir string) (string, string) { - dictOCITarReader := buildChunkDictTar(t) +func buildChunkDict(t *testing.T, workDir, fsVersion string, n int) (string, string) { + dictOCITarReader := buildChunkDictTar(t, n) blobDir := filepath.Join(workDir, "blobs") - nydusTarPath, lowerNydusBlobDigest := convertLayer(t, dictOCITarReader, "", blobDir, *fsVersion) + nydusTarPath, lowerNydusBlobDigest := packLayer(t, dictOCITarReader, "", blobDir, fsVersion) ra, err := local.OpenReader(nydusTarPath) require.NoError(t, err) defer ra.Close() @@ -355,14 +367,19 @@ func buildChunkDict(t *testing.T, workDir string) (string, string) { return bootstrapPath, filepath.Base(dictBlobPath) } -// sudo go test -v -count=1 -run TestConverter ./tests -func TestConverter(t *testing.T) { +// sudo go test -v -count=1 -run TestPack ./tests +func TestPack(t *testing.T) { + testPack(t, "5") + testPack(t, "6") +} + +func testPack(t *testing.T, fsVersion string) { workDir, err := os.MkdirTemp("", "nydus-converter-test-") require.NoError(t, err) defer os.RemoveAll(workDir) - lowerOCITarReader := buildOCILowerTar(t) - upperOCITarReader := buildOCIUpperTar(t, "") + lowerOCITarReader, expectedLowerFileTree := buildOCILowerTar(t, 100) + upperOCITarReader, expectedOverlayFileTree := buildOCIUpperTar(t, "", expectedLowerFileTree) blobDir := filepath.Join(workDir, "blobs") err = os.MkdirAll(blobDir, 0755) @@ -376,10 +393,10 @@ func TestConverter(t *testing.T) { err = os.MkdirAll(mountDir, 0755) require.NoError(t, err) - chunkDictBootstrapPath, chunkDictBlobHash := buildChunkDict(t, workDir) + chunkDictBootstrapPath, chunkDictBlobHash := buildChunkDict(t, workDir, fsVersion, 100) - lowerNydusTarPath, lowerNydusBlobDigest := convertLayer(t, lowerOCITarReader, chunkDictBootstrapPath, blobDir, *fsVersion) - upperNydusTarPath, upperNydusBlobDigest := convertLayer(t, upperOCITarReader, chunkDictBootstrapPath, blobDir, *fsVersion) + lowerNydusTarPath, lowerNydusBlobDigest := packLayer(t, lowerOCITarReader, chunkDictBootstrapPath, blobDir, fsVersion) + upperNydusTarPath, upperNydusBlobDigest := packLayer(t, upperOCITarReader, chunkDictBootstrapPath, blobDir, fsVersion) lowerTarRa, err := local.OpenReader(lowerNydusTarPath) require.NoError(t, err) @@ -409,20 +426,55 @@ func TestConverter(t *testing.T) { ChunkDictPath: chunkDictBootstrapPath, }) require.NoError(t, err) - expectedBlobDigests := []digest.Digest{digest.NewDigestFromHex(string(digest.SHA256), chunkDictBlobHash), upperNydusBlobDigest} + chunkDictBlobDigest := digest.NewDigestFromHex(string(digest.SHA256), chunkDictBlobHash) + expectedBlobDigests := []digest.Digest{chunkDictBlobDigest, upperNydusBlobDigest} require.Equal(t, expectedBlobDigests, blobDigests) - verify(t, workDir) + verify(t, workDir, expectedOverlayFileTree) dropCache(t) - verify(t, workDir) + verify(t, workDir, expectedOverlayFileTree) ensureFile(t, filepath.Join(cacheDir, chunkDictBlobHash)+".chunk_map") ensureNoFile(t, filepath.Join(cacheDir, lowerNydusBlobDigest.Hex())+".chunk_map") ensureFile(t, filepath.Join(cacheDir, upperNydusBlobDigest.Hex())+".chunk_map") } -// sudo go test -v -count=1 -run TestContainerdImageConvert ./tests -func TestContainerdImageConvert(t *testing.T) { +// sudo go test -v -count=1 -run TestUnpack ./tests +func TestUnpack(t *testing.T) { + testUnpack(t, "5") + testUnpack(t, "6") +} + +func testUnpack(t *testing.T, fsVersion string) { + workDir, err := os.MkdirTemp("", "nydus-converter-test-") + require.NoError(t, err) + defer os.RemoveAll(workDir) + + ociTar := filepath.Join(workDir, "oci.tar") + ociTarReader, _ := buildOCIUpperTar(t, ociTar, nil) + nydusTar, _ := packLayer(t, ociTarReader, "", workDir, fsVersion) + + tarTa, err := local.OpenReader(nydusTar) + require.NoError(t, err) + defer tarTa.Close() + + _, newTarDigest := unpackLayer(t, workDir, tarTa) + + ociTarReader, err = os.OpenFile(ociTar, os.O_RDONLY, 0644) + require.NoError(t, err) + ociTarDigest, err := digest.Canonical.FromReader(ociTarReader) + require.NoError(t, err) + + require.Equal(t, ociTarDigest, newTarDigest) +} + +// sudo go test -v -count=1 -run TestImageConvert ./tests +func TestImageConvert(t *testing.T) { + testImageConvert(t, "5") + testImageConvert(t, "6") +} + +func testImageConvert(t *testing.T, fsVersion string) { const ( srcImageRef = "docker.io/library/nginx:latest" targetImageRef = "localhost:5000/nydus/nginx:nydus-latest" @@ -441,7 +493,7 @@ func TestContainerdImageConvert(t *testing.T) { defer os.RemoveAll(workDir) nydusOpts := &converter.PackOption{ WorkDir: workDir, - FsVersion: "5", + FsVersion: fsVersion, } convertFunc := converter.LayerConvertFunc(*nydusOpts) convertHooks := containerdconverter.ConvertHooks{ @@ -487,26 +539,3 @@ func TestContainerdImageConvert(t *testing.T) { return } } - -func TestUnpack(t *testing.T) { - workDir, err := os.MkdirTemp("", "nydus-converter-test-") - require.NoError(t, err) - defer os.RemoveAll(workDir) - - ociTar := filepath.Join(workDir, "oci.tar") - ociTarReader := buildOCIUpperTar(t, ociTar) - nydusTar, _ := convertLayer(t, ociTarReader, "", workDir, *fsVersion) - - tarTa, err := local.OpenReader(nydusTar) - require.NoError(t, err) - defer tarTa.Close() - - _, newTarDigest := unpackLayer(t, workDir, tarTa) - - ociTarReader, err = os.OpenFile(ociTar, os.O_RDONLY, 0644) - require.NoError(t, err) - ociTarDigest, err := digest.Canonical.FromReader(ociTarReader) - require.NoError(t, err) - - require.Equal(t, ociTarDigest, newTarDigest) -} diff --git a/tests/nydusd.go b/tests/nydusd.go index 114925ffdb..1f73c5e531 100644 --- a/tests/nydusd.go +++ b/tests/nydusd.go @@ -75,13 +75,6 @@ func makeConfig(conf NydusdConfig) error { tpl := template.Must(template.New("").Parse(configTpl)) var ret bytes.Buffer - if conf.BackendType == "" { - conf.BackendType = "localfs" - conf.BackendConfig = `{"dir": "/fake"}` - conf.EnablePrefetch = false - } else { - conf.EnablePrefetch = true - } if err := tpl.Execute(&ret, conf); err != nil { return errors.New("prepare config template for Nydusd") }