Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[#5019] feat: (hadoop-catalog): Add a framework to support multi-storage in a pluginized manner for fileset catalog #5020

Open
wants to merge 14 commits into
base: main
Choose a base branch
from
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -53,3 +53,7 @@ include clients/client-python/.gitignore
**/metastore_db
**/spark-warehouse
derby.log

web/node_modules
web/dist
web/.next
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.gravitino.catalog.hadoop;

import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.fs.FileSystem;

public interface FileSystemProvider {

/**
* Get the FileSystem instance according to the configuration and the path.
*
* <p>Compared to the FileSystem.get method, this method allows the provider to create a
* FileSystem instance with a specific configuration and path and do further initialization if
* needed.
*
* <p>For example, we can check endpoint configurations for S3AFileSystem, or set the default one.
*
* @param config The configuration for the FileSystem instance.
* @return The FileSystem instance.
* @throws IOException If the FileSystem instance cannot be created.
*/
FileSystem getFileSystem(Map<String, String> config) throws IOException;

/**
* Get the scheme of this FileSystem provider. file for LocalFileSystem, hdfs for HDFS, s3a for
* S3AFileSystem, etc.
*
* @return The scheme of this FileSystem provider.
*/
String getScheme();
}
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@
import org.apache.gravitino.audit.CallerContext;
import org.apache.gravitino.audit.FilesetAuditConstants;
import org.apache.gravitino.audit.FilesetDataOperation;
import org.apache.gravitino.catalog.hadoop.fs.HDFSFileSystemProvider;
import org.apache.gravitino.catalog.hadoop.fs.LocalFileSystemProvider;
import org.apache.gravitino.connector.CatalogInfo;
import org.apache.gravitino.connector.CatalogOperations;
import org.apache.gravitino.connector.HasPropertyMetadata;
Expand Down Expand Up @@ -75,8 +77,12 @@ public class HadoopCatalogOperations implements CatalogOperations, SupportsSchem
private static final String SCHEMA_DOES_NOT_EXIST_MSG = "Schema %s does not exist";
private static final String FILESET_DOES_NOT_EXIST_MSG = "Fileset %s does not exist";
private static final String SLASH = "/";
public static final String DEFAULT_FS = "fs.defaultFS";
private static final String LOCAL_FILE_SCHEMA = "file";
public static final String LOCAL_FILE_PATH = "file:///";

private static final Logger LOG = LoggerFactory.getLogger(HadoopCatalogOperations.class);
public static final Map<String, FileSystemProvider> FILE_SYSTEM_PROVIDERS = Maps.newHashMap();

private final EntityStore store;

Expand All @@ -88,8 +94,18 @@ public class HadoopCatalogOperations implements CatalogOperations, SupportsSchem

private Map<String, String> conf;

Map<String, String> bypassConfigs;

private CatalogInfo catalogInfo;

static {
FileSystemProvider localFileSystemProvider = new LocalFileSystemProvider();
FileSystemProvider hdfsFileSystemProvider = new HDFSFileSystemProvider();

FILE_SYSTEM_PROVIDERS.put(localFileSystemProvider.getScheme(), localFileSystemProvider);
FILE_SYSTEM_PROVIDERS.put(hdfsFileSystemProvider.getScheme(), hdfsFileSystemProvider);
}

HadoopCatalogOperations(EntityStore store) {
this.store = store;
}
Expand Down Expand Up @@ -119,32 +135,59 @@ public void initialize(
Map<String, String> config, CatalogInfo info, HasPropertyMetadata propertiesMetadata)
throws RuntimeException {
this.propertiesMetadata = propertiesMetadata;
this.catalogInfo = info;

// Initialize Hadoop Configuration.
this.conf = config;
this.hadoopConf = new Configuration();
this.catalogInfo = info;

hadoopConf = new Configuration();
Map<String, String> bypassConfigs =
config.entrySet().stream()
conf.entrySet().stream()
.filter(e -> e.getKey().startsWith(CATALOG_BYPASS_PREFIX))
.collect(
Collectors.toMap(
e -> e.getKey().substring(CATALOG_BYPASS_PREFIX.length()),
Map.Entry::getValue));
bypassConfigs.forEach(hadoopConf::set);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The initialization of Hadoop conf here seems to be useless. It's better to move these codes above into the DefaultConfigurationProvider.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, you are right, I will do it according to your suggestion.


this.bypassConfigs = bypassConfigs;

initPluginFileSystem(config);

String catalogLocation =
(String)
propertiesMetadata
.catalogPropertiesMetadata()
.getOrDefault(config, HadoopCatalogPropertiesMetadata.LOCATION);
conf.forEach(hadoopConf::set);

this.catalogStorageLocation =
StringUtils.isNotBlank(catalogLocation)
? Optional.of(catalogLocation).map(Path::new)
: Optional.empty();
}

private void initPluginFileSystem(Map<String, String> config) {
String fileSystemProviders =
(String)
propertiesMetadata
.catalogPropertiesMetadata()
.getOrDefault(config, HadoopCatalogPropertiesMetadata.FILESYSTEM_PROVIDER);

if (StringUtils.isNotBlank(fileSystemProviders)) {
String[] providers = fileSystemProviders.split(",");
for (String provider : providers) {
try {
FileSystemProvider fileSystemProvider =
(FileSystemProvider)
Class.forName(provider.trim()).getDeclaredConstructor().newInstance();
FILE_SYSTEM_PROVIDERS.put(fileSystemProvider.getScheme(), fileSystemProvider);
} catch (Exception e) {
throw new GravitinoRuntimeException(
e, "Failed to initialize file system provider: %s", provider);
}
}
}
}

@Override
public NameIdentifier[] listFilesets(Namespace namespace) throws NoSuchSchemaException {
try {
Expand Down Expand Up @@ -235,8 +278,9 @@ public Fileset createFileset(

try {
// formalize the path to avoid path without scheme, uri, authority, etc.
filesetPath = formalizePath(filesetPath, hadoopConf);
FileSystem fs = filesetPath.getFileSystem(hadoopConf);
filesetPath = formalizePath(filesetPath, bypassConfigs);

FileSystem fs = getFileSystem(filesetPath, bypassConfigs);
if (!fs.exists(filesetPath)) {
if (!fs.mkdirs(filesetPath)) {
throw new RuntimeException(
Expand Down Expand Up @@ -339,7 +383,7 @@ public boolean dropFileset(NameIdentifier ident) {

// For managed fileset, we should delete the related files.
if (filesetEntity.filesetType() == Fileset.Type.MANAGED) {
FileSystem fs = filesetPath.getFileSystem(hadoopConf);
FileSystem fs = getFileSystem(filesetPath, bypassConfigs);
if (fs.exists(filesetPath)) {
if (!fs.delete(filesetPath, true)) {
LOG.warn("Failed to delete fileset {} location {}", ident, filesetPath);
Expand Down Expand Up @@ -459,7 +503,7 @@ public Schema createSchema(NameIdentifier ident, String comment, Map<String, Str
Path schemaPath = getSchemaPath(ident.name(), properties);
if (schemaPath != null) {
try {
FileSystem fs = schemaPath.getFileSystem(hadoopConf);
FileSystem fs = getFileSystem(schemaPath, bypassConfigs);
if (!fs.exists(schemaPath)) {
if (!fs.mkdirs(schemaPath)) {
// Fail the operation when failed to create the schema path.
Expand Down Expand Up @@ -577,8 +621,7 @@ public boolean dropSchema(NameIdentifier ident, boolean cascade) throws NonEmpty
if (schemaPath == null) {
return false;
}

FileSystem fs = schemaPath.getFileSystem(hadoopConf);
FileSystem fs = getFileSystem(schemaPath, bypassConfigs);
// Nothing to delete if the schema path does not exist.
if (!fs.exists(schemaPath)) {
return false;
Expand Down Expand Up @@ -717,8 +760,8 @@ private Path getSchemaPath(String name, Map<String, String> properties) {
}

@VisibleForTesting
static Path formalizePath(Path path, Configuration configuration) throws IOException {
FileSystem defaultFs = FileSystem.get(configuration);
static Path formalizePath(Path path, Map<String, String> configuration) throws IOException {
FileSystem defaultFs = getFileSystem(path, configuration);
return path.makeQualified(defaultFs.getUri(), defaultFs.getWorkingDirectory());
}

Expand All @@ -731,7 +774,7 @@ private boolean hasCallerContext() {
private boolean checkSingleFile(Fileset fileset) {
try {
Path locationPath = new Path(fileset.storageLocation());
return locationPath.getFileSystem(hadoopConf).getFileStatus(locationPath).isFile();
return getFileSystem(locationPath, bypassConfigs).getFileStatus(locationPath).isFile();
} catch (FileNotFoundException e) {
// We should always return false here, same with the logic in `FileSystem.isFile(Path f)`.
return false;
Expand All @@ -742,4 +785,44 @@ private boolean checkSingleFile(Fileset fileset) {
fileset.name());
}
}

static FileSystem getFileSystem(Path path, Map<String, String> config) throws IOException {
Map<String, String> newConfig = Maps.newHashMap(config);
String scheme;
Path fsPath;
if (path != null) {
scheme = path.toUri().getScheme();
if (scheme == null) {
scheme = LOCAL_FILE_SCHEMA;
}
fsPath = path;
} else {

String defaultFS = config.get(DEFAULT_FS);
if (defaultFS == null) {
// Should be the local file system.
scheme = LOCAL_FILE_SCHEMA;
fsPath = new Path(LOCAL_FILE_PATH);
} else {
fsPath = new Path(defaultFS);
if (fsPath.toUri().getScheme() == null) {
scheme = LOCAL_FILE_SCHEMA;
} else {
scheme = fsPath.toUri().getScheme();
}
}
}

// For any non-local file system, we need to explicitly set the default FS.
if (!LOCAL_FILE_SCHEMA.equals(scheme)) {
newConfig.put(DEFAULT_FS, fsPath.toString());
}

FileSystemProvider provider = FILE_SYSTEM_PROVIDERS.get(scheme);
if (provider == null) {
throw new IllegalArgumentException("Unsupported scheme: " + scheme);
}

return provider.getFileSystem(newConfig);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,15 @@ public class HadoopCatalogPropertiesMetadata extends BaseCatalogPropertiesMetada
// If not, users have to specify the storage location in the Schema or Fileset level.
public static final String LOCATION = "location";

/**
* The implementation class name of the {@link FileSystemProvider} to be used by the catalog.
* Gravitino supports LocalFileSystem and HDFS by default. Users can implement their own by
* extending {@link FileSystemProvider} and specify the class name here.
*
* <p>The value can be 'xxxx.yyy.FileSystemProvider1, xxxx.yyy.FileSystemProvider2'.
*/
public static final String FILESYSTEM_PROVIDER = "filesystem.providers";

private static final Map<String, PropertyEntry<?>> HADOOP_CATALOG_PROPERTY_ENTRIES =
ImmutableMap.<String, PropertyEntry<?>>builder()
.put(
Expand All @@ -44,6 +53,14 @@ public class HadoopCatalogPropertiesMetadata extends BaseCatalogPropertiesMetada
false /* immutable */,
null,
false /* hidden */))
.put(
FILESYSTEM_PROVIDER,
PropertyEntry.stringOptionalPropertyEntry(
FILESYSTEM_PROVIDER,
"The file system provider class name, separated by comma",
false /* immutable */,
null,
false /* hidden */))
// The following two are about authentication.
.putAll(KerberosConfig.KERBEROS_PROPERTY_ENTRIES)
.putAll(AuthenticationConfig.AUTHENTICATION_PROPERTY_ENTRIES)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.gravitino.catalog.hadoop.fs;

import java.io.IOException;
import java.net.URI;
import java.util.Map;
import org.apache.gravitino.catalog.hadoop.FileSystemProvider;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

public class HDFSFileSystemProvider implements FileSystemProvider {

@Override
public FileSystem getFileSystem(Map<String, String> config) throws IOException {
Configuration configuration = new Configuration();
config.forEach(configuration::set);

String pathString = configuration.get("fs.defaultFS");
if (pathString == null) {
throw new IllegalArgumentException("The path should be specified.");
}

URI uri = new Path(pathString).toUri();
if (uri.getScheme() != null && !uri.getScheme().equals("hdfs")) {
throw new IllegalArgumentException("The path should be a HDFS path.");
}

// Should we call DistributedFileSystem to create file system instance explicitly? If we
// explicitly create a HDFS file system here, we can't reuse the file system cache in the
// FileSystem class.
String impl = configuration.get("fs.hdfs.impl");
if (impl == null) {
configuration.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
} else {
if (!impl.equals("org.apache.hadoop.hdfs.DistributedFileSystem")) {
throw new IllegalArgumentException(
"The HDFS file system implementation class should be 'org.apache.hadoop.hdfs.DistributedFileSystem'.");
}
}

try {
if (HDFSFileSystemProvider.class.getClassLoader().loadClass(configuration.get("fs.hdfs.impl"))
== null) {
throw new IllegalArgumentException(
"The HDFS file system implementation class is not found.");
}
} catch (ClassNotFoundException e) {
throw new IllegalArgumentException("The HDFS file system implementation class is not found.");
}

return FileSystem.get(uri, configuration);
}

@Override
public String getScheme() {
return "hdfs";
}
}
Loading
Loading