From c7dbe62da40b684f382d5e7b7f2b3f46ee01ed89 Mon Sep 17 00:00:00 2001 From: slothever Date: Sun, 28 Jan 2024 18:22:40 +0800 Subject: [PATCH] [fix](glue)support access glue iceberg with credential list support access glue and s3 iceberg with credential list support iceberg hadoop catalog on s3 (cherry picked from commit ef85d0d5173e5108e64b9e6ec30a6c31e401c174) --- be/src/util/s3_util.cpp | 42 ++++++++----- bin/start_be.sh | 13 +--- conf/be.conf | 13 ++++ docs/dev.json | 4 ++ .../docs/lakehouse/cloud-auth/cloud-auth.md | 61 +++++++++++++++++++ .../docs/lakehouse/multi-catalog/iceberg.md | 15 ++++- docs/sidebars.json | 7 +++ .../docs/lakehouse/cloud-auth/cloud-auth.md | 61 +++++++++++++++++++ .../docs/lakehouse/multi-catalog/iceberg.md | 15 ++++- .../org/apache/doris/common/util/S3Util.java | 20 +++++- .../iceberg/IcebergHadoopExternalCatalog.java | 6 +- .../iceberg/IcebergRestExternalCatalog.java | 7 +-- .../property/constants/S3Properties.java | 4 +- .../tablefunction/S3TableValuedFunction.java | 7 +-- .../property/PropertyConverterTest.java | 3 +- .../cold_heat_separation/policy/create.groovy | 6 +- 16 files changed, 234 insertions(+), 50 deletions(-) create mode 100644 docs/en/docs/lakehouse/cloud-auth/cloud-auth.md create mode 100644 docs/zh-CN/docs/lakehouse/cloud-auth/cloud-auth.md diff --git a/be/src/util/s3_util.cpp b/be/src/util/s3_util.cpp index 94756a3a0bad5b..c2bcb302a4fefa 100644 --- a/be/src/util/s3_util.cpp +++ b/be/src/util/s3_util.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -111,8 +112,7 @@ S3ClientFactory& S3ClientFactory::instance() { bool S3ClientFactory::is_s3_conf_valid(const std::map& prop) { StringCaseMap properties(prop.begin(), prop.end()); - if (properties.find(S3_AK) == properties.end() || properties.find(S3_SK) == properties.end() || - properties.find(S3_ENDPOINT) == properties.end() || + if (properties.find(S3_ENDPOINT) == properties.end() || properties.find(S3_REGION) == properties.end()) { DCHECK(false) << "aws properties is incorrect."; LOG(ERROR) << "aws properties is incorrect."; @@ -122,7 +122,7 @@ bool S3ClientFactory::is_s3_conf_valid(const std::map& } bool S3ClientFactory::is_s3_conf_valid(const S3Conf& s3_conf) { - return !s3_conf.ak.empty() && !s3_conf.sk.empty() && !s3_conf.endpoint.empty(); + return !s3_conf.endpoint.empty(); } std::shared_ptr S3ClientFactory::create(const S3Conf& s3_conf) { @@ -139,12 +139,6 @@ std::shared_ptr S3ClientFactory::create(const S3Conf& s3_conf } } - Aws::Auth::AWSCredentials aws_cred(s3_conf.ak, s3_conf.sk); - DCHECK(!aws_cred.IsExpiredOrEmpty()); - if (!s3_conf.token.empty()) { - aws_cred.SetSessionToken(s3_conf.token); - } - Aws::Client::ClientConfiguration aws_config = S3ClientFactory::getClientConfiguration(); aws_config.endpointOverride = s3_conf.endpoint; aws_config.region = s3_conf.region; @@ -167,11 +161,25 @@ std::shared_ptr S3ClientFactory::create(const S3Conf& s3_conf if (s3_conf.connect_timeout_ms > 0) { aws_config.connectTimeoutMs = s3_conf.connect_timeout_ms; } - - std::shared_ptr new_client = std::make_shared( - std::move(aws_cred), std::move(aws_config), - Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, - s3_conf.use_virtual_addressing); + std::shared_ptr new_client; + if (!s3_conf.ak.empty() && !s3_conf.sk.empty()) { + Aws::Auth::AWSCredentials aws_cred(s3_conf.ak, s3_conf.sk); + DCHECK(!aws_cred.IsExpiredOrEmpty()); + if (!s3_conf.token.empty()) { + aws_cred.SetSessionToken(s3_conf.token); + } + new_client = std::make_shared( + std::move(aws_cred), std::move(aws_config), + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, + s3_conf.use_virtual_addressing); + } else { + std::shared_ptr aws_provider_chain = + std::make_shared(); + new_client = std::make_shared( + std::move(aws_provider_chain), std::move(aws_config), + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, + s3_conf.use_virtual_addressing); + } { std::lock_guard l(_lock); @@ -186,8 +194,10 @@ Status S3ClientFactory::convert_properties_to_s3_conf( return Status::InvalidArgument("S3 properties are incorrect, please check properties."); } StringCaseMap properties(prop.begin(), prop.end()); - s3_conf->ak = properties.find(S3_AK)->second; - s3_conf->sk = properties.find(S3_SK)->second; + if (properties.find(S3_AK) != properties.end() && properties.find(S3_SK) != properties.end()) { + s3_conf->ak = properties.find(S3_AK)->second; + s3_conf->sk = properties.find(S3_SK)->second; + } if (properties.find(S3_TOKEN) != properties.end()) { s3_conf->token = properties.find(S3_TOKEN)->second; } diff --git a/bin/start_be.sh b/bin/start_be.sh index 51420d765f56ce..eecafc2a73ab01 100755 --- a/bin/start_be.sh +++ b/bin/start_be.sh @@ -36,7 +36,6 @@ OPTS="$(getopt \ eval set -- "${OPTS}" RUN_DAEMON=0 -RUN_IN_AWS=0 RUN_CONSOLE=0 while true; do case "$1" in @@ -44,10 +43,6 @@ while true; do RUN_DAEMON=1 shift ;; - --aws) - RUN_IN_AWS=1 - shift - ;; --console) RUN_CONSOLE=1 shift @@ -242,10 +237,7 @@ else LIMIT="/bin/limit3 -c 0 -n 65536" fi -## If you are not running in aws cloud, disable this env since https://github.com/aws/aws-sdk-cpp/issues/1410. -if [[ "${RUN_IN_AWS}" -eq 0 ]]; then - export AWS_EC2_METADATA_DISABLED=true -fi +export AWS_MAX_ATTEMPTS=2 ## set asan and ubsan env to generate core file export ASAN_OPTIONS=symbolize=1:abort_on_error=1:disable_coredump=0:unmap_shadow_on_exit=1:detect_container_overflow=0 @@ -349,9 +341,6 @@ else export JEMALLOC_CONF="${JEMALLOC_CONF},prof_prefix:${JEMALLOC_PROF_PRFIX}" fi -export AWS_EC2_METADATA_DISABLED=true -export AWS_MAX_ATTEMPTS=2 - if [[ "${RUN_DAEMON}" -eq 1 ]]; then nohup ${LIMIT:+${LIMIT}} "${DORIS_HOME}/lib/doris_be" "$@" >>"${LOG_DIR}/be.out" 2>&1 + +# Overview + +When accessing a service on the cloud, we need to provide the credentials needed to access the service so that the service can be authenticated by IAM of cloud vendors. + +## AWS + +Now Doris support two types of authentication to access AWS service. + +### Catalog Credentials + +The Catalog supports filling in basic Credentials properties, such as: +1. For S3: `s3.endpoint`,`s3.access_key`,`s3.secret_key`。 +2. For Glue: `glue.endpoint`,`glue.access_key`,`glue.secret_key`。 + +When access Glue though Iceberg Catalog, we can access tables on Glue by filling in the following properties: + +```sql +CREATE CATALOG glue PROPERTIES ( + "type"="iceberg", + "iceberg.catalog.type" = "glue", + "glue.endpoint" = "https://glue.us-east-1.amazonaws.com", + "glue.access_key" = "ak", + "glue.secret_key" = "sk" +); +``` + +### System Credentials + +For applications running on AWS resources, such as EC2 instances, this approach enhances security by avoiding hardcoded credentials. + +If we create the Catalog but not fill any Credentials in properties, the `DefaultAWSCredentialsProviderChain` will be used to read in the system environment variables or instance profile. + +For details about how to configure environment variables and system properties, see: [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html) . +- The configurable environment variables are: `AWS_ACCESS_KEY_ID`、`AWS_SECRET_ACCESS_KEY`、`AWS_SESSION_TOKEN`、`AWS_ROLE_ARN`、`AWS_WEB_IDENTITY_TOKEN_FILE` and so on. +- In addition, you can also use [aws configure](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html) to configure Credentials, the Credentials file will be written to the `~/.aws` directory. diff --git a/docs/en/docs/lakehouse/multi-catalog/iceberg.md b/docs/en/docs/lakehouse/multi-catalog/iceberg.md index 2baa05770ff135..3a46811fd6ce32 100644 --- a/docs/en/docs/lakehouse/multi-catalog/iceberg.md +++ b/docs/en/docs/lakehouse/multi-catalog/iceberg.md @@ -78,6 +78,17 @@ CREATE CATALOG iceberg_hadoop_ha PROPERTIES ( ); ``` +```sql +CREATE CATALOG iceberg_s3 PROPERTIES ( + 'type'='iceberg', + 'iceberg.catalog.type' = 'hadoop', + 'warehouse' = 's3://bucket/dir/key', + 's3.endpoint' = 's3.us-east-1.amazonaws.com', + 's3.access_key' = 'ak', + 's3.secret_key' = 'sk' +); +``` + #### Hive Metastore ```sql @@ -106,7 +117,9 @@ CREATE CATALOG glue PROPERTIES ( ); ``` -For Iceberg properties, see [Iceberg Glue Catalog](https://iceberg.apache.org/docs/latest/aws/#glue-catalog) +1. For Iceberg properties, see [Iceberg Glue Catalog](https://iceberg.apache.org/docs/latest/aws/#glue-catalog). + +2. If you do not fill the credentials(`glue.access_key` and `glue.secret_key`) in glue catalog, the default DefaultAWSCredentialsProviderChain will be used, and it will read credentials and the system environment variables or instance profile properties on AWS EC2. #### Alibaba Cloud DLF diff --git a/docs/sidebars.json b/docs/sidebars.json index 0f139b14956656..ad201a04e7abc8 100644 --- a/docs/sidebars.json +++ b/docs/sidebars.json @@ -219,6 +219,13 @@ "lakehouse/multi-catalog/faq-multi-catalog" ] }, + { + "type": "category", + "label": "Cloud Service Authentication", + "items": [ + "lakehouse/cloud-auth/cloud-auth" + ] + }, "lakehouse/file", "lakehouse/filecache", "lakehouse/compute-node", diff --git a/docs/zh-CN/docs/lakehouse/cloud-auth/cloud-auth.md b/docs/zh-CN/docs/lakehouse/cloud-auth/cloud-auth.md new file mode 100644 index 00000000000000..8d31711eabdba8 --- /dev/null +++ b/docs/zh-CN/docs/lakehouse/cloud-auth/cloud-auth.md @@ -0,0 +1,61 @@ +--- +{ + "title": "云服务认证接入", + "language": "zh-CN" +} +--- + + + +# 概述 + +当访问云上的服务时,我们需要提供访问服务所需要的凭证,以便服务能够通过各云厂商IAM的认证。 + +## AWS + +现在Doris访问AWS服务时,能够支持两种类型的身份认证。 + +### 使用Catalog属性认证 + +Catalog支持填写基本的Credentials属性,比如: +1. 访问S3时,可以使用s3.endpoint,s3.access_key,s3.secret_key。 +2. 访问Glue时,可以使用glue.endpoint,glue.access_key,glue.secret_key。 + +以Iceberg Catalog访问Glue为例,我们可以填写以下属性访问在Glue上托管的表: + +```sql +CREATE CATALOG glue PROPERTIES ( + "type"="iceberg", + "iceberg.catalog.type" = "glue", + "glue.endpoint" = "https://glue.us-east-1.amazonaws.com", + "glue.access_key" = "ak", + "glue.secret_key" = "sk" +); +``` + +### 使用系统属性认证 + +用于运行在AWS资源(如EC2实例)上的应用程序。可以避免硬编码写入Credentials,能够增强数据安全性。 + +当我们在创建Catalog时,未填写Credentials属性,那么此时会使用DefaultAWSCredentialsProviderChain,它能够读取系统环境变量或者instance profile中配置的属性。 + +配置环境变量和系统属性的方式可以参考:[AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html) 。 +- 可以选择的配置的环境变量有:`AWS_ACCESS_KEY_ID`、`AWS_SECRET_ACCESS_KEY`、`AWS_SESSION_TOKEN`、`AWS_ROLE_ARN`、`AWS_WEB_IDENTITY_TOKEN_FILE`等 +- 另外,还可以使用[aws configure](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html)直接配置Credentials信息,同时在`~/.aws`目录下生成credentials文件。 diff --git a/docs/zh-CN/docs/lakehouse/multi-catalog/iceberg.md b/docs/zh-CN/docs/lakehouse/multi-catalog/iceberg.md index 3e6a4826d02049..d1613a65a078df 100644 --- a/docs/zh-CN/docs/lakehouse/multi-catalog/iceberg.md +++ b/docs/zh-CN/docs/lakehouse/multi-catalog/iceberg.md @@ -78,6 +78,17 @@ CREATE CATALOG iceberg_hadoop_ha PROPERTIES ( ); ``` +```sql +CREATE CATALOG iceberg_s3 PROPERTIES ( + 'type'='iceberg', + 'iceberg.catalog.type' = 'hadoop', + 'warehouse' = 's3://bucket/dir/key', + 's3.endpoint' = 's3.us-east-1.amazonaws.com', + 's3.access_key' = 'ak', + 's3.secret_key' = 'sk' +); +``` + #### Hive Metastore ```sql @@ -106,7 +117,9 @@ CREATE CATALOG glue PROPERTIES ( ); ``` -Iceberg 属性详情参见 [Iceberg Glue Catalog](https://iceberg.apache.org/docs/latest/aws/#glue-catalog) +1. Iceberg 属性详情参见 [Iceberg Glue Catalog](https://iceberg.apache.org/docs/latest/aws/#glue-catalog) + +2. 如果在AWS服务(如EC2)中,不填写Credentials相关信息(`glue.access_key`和`glue.secret_key`),Doris就会使用默认的DefaultAWSCredentialsProviderChain,它会读取系统环境变量或者InstanceProfile中配置的属性。 #### 阿里云 DLF diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/S3Util.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/S3Util.java index 2d40af321fa49c..05bb2f6a10a271 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/S3Util.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/S3Util.java @@ -21,8 +21,15 @@ import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain; import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; +import software.amazon.awssdk.auth.credentials.EnvironmentVariableCredentialsProvider; +import software.amazon.awssdk.auth.credentials.InstanceProfileCredentialsProvider; +import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.auth.credentials.SystemPropertyCredentialsProvider; +import software.amazon.awssdk.auth.credentials.WebIdentityTokenFileCredentialsProvider; import software.amazon.awssdk.auth.signer.AwsS3V4Signer; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; @@ -39,7 +46,7 @@ public class S3Util { public static S3Client buildS3Client(URI endpoint, String region, CloudCredential credential) { - StaticCredentialsProvider scp; + AwsCredentialsProvider scp; AwsCredentials awsCredential; if (!credential.isTemporary()) { awsCredential = AwsBasicCredentials.create(credential.getAccessKey(), credential.getSecretKey()); @@ -47,7 +54,16 @@ public static S3Client buildS3Client(URI endpoint, String region, CloudCredentia awsCredential = AwsSessionCredentials.create(credential.getAccessKey(), credential.getSecretKey(), credential.getSessionToken()); } - scp = StaticCredentialsProvider.create(awsCredential); + if (!credential.isWhole()) { + scp = AwsCredentialsProviderChain.of( + SystemPropertyCredentialsProvider.create(), + EnvironmentVariableCredentialsProvider.create(), + WebIdentityTokenFileCredentialsProvider.create(), + ProfileCredentialsProvider.create(), + InstanceProfileCredentialsProvider.create()); + } else { + scp = StaticCredentialsProvider.create(awsCredential); + } EqualJitterBackoffStrategy backoffStrategy = EqualJitterBackoffStrategy .builder() .baseDelay(Duration.ofSeconds(1)) diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergHadoopExternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergHadoopExternalCatalog.java index 683a5b62b63c4a..97de2bfd55c194 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergHadoopExternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergHadoopExternalCatalog.java @@ -23,6 +23,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.conf.Configuration; import org.apache.iceberg.CatalogProperties; import org.apache.iceberg.hadoop.HadoopCatalog; @@ -53,10 +54,11 @@ public IcebergHadoopExternalCatalog(long catalogId, String name, String resource protected void initLocalObjectsImpl() { icebergCatalogType = ICEBERG_HADOOP; HadoopCatalog hadoopCatalog = new HadoopCatalog(); - hadoopCatalog.setConf(getConfiguration()); + Configuration conf = getConfiguration(); // initialize hive catalog Map catalogProperties = new HashMap<>(); - String warehouse = catalogProperty.getProperties().get(CatalogProperties.WAREHOUSE_LOCATION); + String warehouse = catalogProperty.getHadoopProperties().get(CatalogProperties.WAREHOUSE_LOCATION); + hadoopCatalog.setConf(conf); catalogProperties.put(CatalogProperties.WAREHOUSE_LOCATION, warehouse); hadoopCatalog.initialize(icebergCatalogType, catalogProperties); catalog = hadoopCatalog; diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergRestExternalCatalog.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergRestExternalCatalog.java index 25e5488b65f0f2..7eed82471f4111 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergRestExternalCatalog.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergRestExternalCatalog.java @@ -18,9 +18,9 @@ package org.apache.doris.datasource.iceberg; import org.apache.doris.datasource.CatalogProperty; -import org.apache.doris.datasource.credentials.DataLakeAWSCredentialsProvider; import org.apache.doris.datasource.iceberg.rest.DorisIcebergRestResolvedIO; import org.apache.doris.datasource.property.PropertyConverter; +import org.apache.doris.datasource.property.constants.S3Properties; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.s3a.Constants; @@ -58,9 +58,8 @@ protected void initLocalObjectsImpl() { private Configuration replaceS3Properties(Configuration conf) { Map catalogProperties = catalogProperty.getHadoopProperties(); - String credentials = catalogProperties - .getOrDefault(Constants.AWS_CREDENTIALS_PROVIDER, DataLakeAWSCredentialsProvider.class.getName()); - conf.set(Constants.AWS_CREDENTIALS_PROVIDER, credentials); + String defaultProviderList = String.join(",", S3Properties.AWS_CREDENTIALS_PROVIDERS); + conf.set(Constants.AWS_CREDENTIALS_PROVIDER, defaultProviderList); String usePahStyle = catalogProperties.getOrDefault(PropertyConverter.USE_PATH_STYLE, "true"); // Set path style conf.set(PropertyConverter.USE_PATH_STYLE, usePahStyle); diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/constants/S3Properties.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/constants/S3Properties.java index ea3a8333b30b66..c4d3cce9c2c29c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/constants/S3Properties.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/constants/S3Properties.java @@ -56,7 +56,7 @@ public class S3Properties extends BaseProperties { public static final String ROOT_PATH = "s3.root.path"; public static final String BUCKET = "s3.bucket"; public static final String VALIDITY_CHECK = "s3_validity_check"; - public static final List REQUIRED_FIELDS = Arrays.asList(ENDPOINT, ACCESS_KEY, SECRET_KEY); + public static final List REQUIRED_FIELDS = Arrays.asList(ENDPOINT); public static final List TVF_REQUIRED_FIELDS = Arrays.asList(ACCESS_KEY, SECRET_KEY); public static final List FS_KEYS = Arrays.asList(ENDPOINT, REGION, ACCESS_KEY, SECRET_KEY, SESSION_TOKEN, ROOT_PATH, BUCKET, MAX_CONNECTIONS, REQUEST_TIMEOUT_MS, CONNECTION_TIMEOUT_MS); @@ -100,7 +100,7 @@ public static class Env { public static final String DEFAULT_MAX_CONNECTIONS = "50"; public static final String DEFAULT_REQUEST_TIMEOUT_MS = "3000"; public static final String DEFAULT_CONNECTION_TIMEOUT_MS = "1000"; - public static final List REQUIRED_FIELDS = Arrays.asList(ENDPOINT, ACCESS_KEY, SECRET_KEY); + public static final List REQUIRED_FIELDS = Arrays.asList(ENDPOINT); public static final List FS_KEYS = Arrays.asList(ENDPOINT, REGION, ACCESS_KEY, SECRET_KEY, TOKEN, ROOT_PATH, BUCKET, MAX_CONNECTIONS, REQUEST_TIMEOUT_MS, CONNECTION_TIMEOUT_MS); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/S3TableValuedFunction.java b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/S3TableValuedFunction.java index 9ad6232c4e0bde..bf87b0c8ba8f26 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/tablefunction/S3TableValuedFunction.java +++ b/fe/fe-core/src/main/java/org/apache/doris/tablefunction/S3TableValuedFunction.java @@ -117,12 +117,7 @@ private void checkNecessaryS3Properties(Map props) throws Analys if (Strings.isNullOrEmpty(props.get(S3Properties.REGION))) { throw new AnalysisException(String.format("Properties '%s' is required.", S3Properties.REGION)); } - if (Strings.isNullOrEmpty(props.get(S3Properties.ACCESS_KEY))) { - throw new AnalysisException(String.format("Properties '%s' is required.", S3Properties.ACCESS_KEY)); - } - if (Strings.isNullOrEmpty(props.get(S3Properties.SECRET_KEY))) { - throw new AnalysisException(String.format("Properties '%s' is required.", S3Properties.SECRET_KEY)); - } + // do not check ak and sk, because we can read them from system environment. } private String getEndpointAndSetVirtualBucket(S3URI s3uri, Map props) diff --git a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/PropertyConverterTest.java b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/PropertyConverterTest.java index 7b2f5d4c0ac68e..cefac64c531996 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/PropertyConverterTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/PropertyConverterTest.java @@ -55,6 +55,7 @@ import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -77,7 +78,7 @@ protected void runBeforeAll() throws Exception { List withoutPrefix = ImmutableList.of("endpoint", "access_key", "secret_key"); checkSet.addAll(withoutPrefix); - checkSet.addAll(S3Properties.Env.REQUIRED_FIELDS); + checkSet.addAll(Arrays.asList(S3Properties.ENDPOINT, S3Properties.ACCESS_KEY, S3Properties.SECRET_KEY)); expectedCredential.put("access_key", "akk"); expectedCredential.put("secret_key", "skk"); } diff --git a/regression-test/suites/cold_heat_separation/policy/create.groovy b/regression-test/suites/cold_heat_separation/policy/create.groovy index 1232dbe504f13f..81f8fa5001e93c 100644 --- a/regression-test/suites/cold_heat_separation/policy/create.groovy +++ b/regression-test/suites/cold_heat_separation/policy/create.groovy @@ -220,8 +220,8 @@ suite("create_policy") { "s3_validity_check" = "false" ); """ - // errCode = 2, detailMessage = Missing [AWS_ACCESS_KEY] in properties. - assertEquals(failed_create_2, null) + // can read AWS_ACCESS_KEY from environment variable + assertEquals(failed_create_2, [[0]]) } if (has_created_2.size() == 0) { @@ -240,7 +240,7 @@ suite("create_policy") { "s3_validity_check" = "false" ); """ - // errCode = 2, detailMessage = Missing [AWS_SECRET_KEY] in properties. + // can read AWS_SECRET_KEY from environment variables assertEquals(failed_create_2, null) }