diff --git a/build-project.sh b/build-project.sh index 6651e80054..037313482d 100755 --- a/build-project.sh +++ b/build-project.sh @@ -3,5 +3,5 @@ set -e HDFS_VERSION=$1 -mvn install:install-file -pl smart-tests -Dos.arch=x86_64 -mvn clean install -Pdist,web-ui,hadoop-"${HDFS_VERSION}" -DskipTests -Dos.arch=x86_64 \ No newline at end of file +mvn install:install-file -pl smart-tests +mvn clean install -Pdist,web-ui,hadoop-"${HDFS_VERSION}" -DskipTests \ No newline at end of file diff --git a/docs/ssm-deployment-guide.md b/docs/ssm-deployment-guide.md index 02425875bb..280a6c2c63 100755 --- a/docs/ssm-deployment-guide.md +++ b/docs/ssm-deployment-guide.md @@ -532,7 +532,7 @@ Note: To make the scripts work, you have to set up SSH password-less connections ``` 2017-07-15 00:38:28,619 INFO org.smartdata.hdfs.HdfsStatesUpdateService.init 68: Initializing ... 2017-07-15 00:38:29,350 ERROR org.smartdata.hdfs.HdfsStatesUpdateService.checkAndMarkRunning 138: Unable to lock 'mover', please stop 'mover' first. - 2017-07-15 00:38:29,350 INFO org.smartdata.server.engine.StatesManager.initStatesUpdaterService 180: Failed to create states updater service. + 2017-07-15 00:38:29,350 INFO org.smartdata.server.engine.HdfsStatesManager.initStatesUpdaterService 180: Failed to create states updater service. ``` Notes diff --git a/smart-action/src/main/java/org/smartdata/action/AbstractActionFactory.java b/smart-action/src/main/java/org/smartdata/action/AbstractActionFactory.java index 4886187b7e..0ed669e47e 100644 --- a/smart-action/src/main/java/org/smartdata/action/AbstractActionFactory.java +++ b/smart-action/src/main/java/org/smartdata/action/AbstractActionFactory.java @@ -17,44 +17,66 @@ */ package org.smartdata.action; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import lombok.extern.slf4j.Slf4j; import org.smartdata.action.annotation.ActionSignature; -import java.util.Collections; +import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; +import java.util.List; import java.util.Map; +import java.util.Optional; +import java.util.Set; /** * A common action factory for action providers to use. */ +@Slf4j public abstract class AbstractActionFactory implements ActionFactory { - static final Logger LOG = LoggerFactory.getLogger(AbstractActionFactory.class); - private static Map> supportedActions = new HashMap<>(); - - static { - addAction(EchoAction.class); - addAction(SleepAction.class); - addAction(SyncAction.class); - addAction(ExecAction.class); - } + private static final List> COMMON_ACTIONS = Arrays.asList( + EchoAction.class, + SleepAction.class, + ExecAction.class + ); + + @Override + public Map> getSupportedActions() { + Map> supportedActions = new HashMap<>(); + COMMON_ACTIONS.forEach( + actionClass -> addActionInfo(supportedActions, actionClass)); + supportedActionClasses().forEach( + actionClass -> addActionInfo(supportedActions, actionClass)); - protected static void addAction(Class actionClass) { - ActionSignature actionSignature = actionClass.getAnnotation(ActionSignature.class); - if (actionSignature != null) { - String actionId = actionSignature.actionId(); - if (!supportedActions.containsKey(actionId)) { - supportedActions.put(actionId, actionClass); - } else { - LOG.error("There is already an Action registered with id {}.", actionId); - } - } else { - LOG.error("Action {} does not has an ActionSignature.", actionClass.getName()); - } + return supportedActions; } @Override - public Map> getSupportedActions() { - return Collections.unmodifiableMap(supportedActions); + public Set getActionMetadata() { + Set actionMetadata = new HashSet<>(); + COMMON_ACTIONS.forEach(action -> + toActionMetadata(action).ifPresent(actionMetadata::add)); + supportedActionClasses().forEach(action -> + toActionMetadata(action).ifPresent(actionMetadata::add)); + + return actionMetadata; + } + + protected abstract List> supportedActionClasses(); + + private Optional toActionMetadata(Class actionClass) { + return actionSignature(actionClass) + .map(signature -> new ActionMetadata(signature.actionId(), signature.usage())); + } + + private void addActionInfo( + Map> supportedActions, + Class actionClass) { + actionSignature(actionClass) + .map(ActionSignature::actionId) + .ifPresent(actionId -> supportedActions.put(actionId, actionClass)); + } + + private Optional actionSignature(Class actionClass) { + return Optional.ofNullable(actionClass.getAnnotation(ActionSignature.class)); } } diff --git a/smart-action/src/main/java/org/smartdata/action/ActionFactory.java b/smart-action/src/main/java/org/smartdata/action/ActionFactory.java index a66fafc263..0d5e95a0bf 100644 --- a/smart-action/src/main/java/org/smartdata/action/ActionFactory.java +++ b/smart-action/src/main/java/org/smartdata/action/ActionFactory.java @@ -18,6 +18,7 @@ package org.smartdata.action; import java.util.Map; +import java.util.Set; /** * Action factory interface. Either built-in or user defined actions will be @@ -30,4 +31,6 @@ public interface ActionFactory { * @return supported actions */ Map> getSupportedActions(); + + Set getActionMetadata(); } diff --git a/smart-action/src/main/java/org/smartdata/action/ActionMetadata.java b/smart-action/src/main/java/org/smartdata/action/ActionMetadata.java new file mode 100644 index 0000000000..3661f6d987 --- /dev/null +++ b/smart-action/src/main/java/org/smartdata/action/ActionMetadata.java @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.action; + +import lombok.Data; + +@Data +public class ActionMetadata { + private final String name; + private final String usage; +} diff --git a/smart-action/src/main/java/org/smartdata/action/ActionRegistry.java b/smart-action/src/main/java/org/smartdata/action/ActionRegistry.java index e292a32f35..5b1d7a32ce 100644 --- a/smart-action/src/main/java/org/smartdata/action/ActionRegistry.java +++ b/smart-action/src/main/java/org/smartdata/action/ActionRegistry.java @@ -17,73 +17,57 @@ */ package org.smartdata.action; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.smartdata.action.annotation.ActionSignature; -import org.smartdata.model.ActionDescriptor; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; import java.util.Map; -import java.util.ServiceConfigurationError; -import java.util.ServiceLoader; import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; /** * Actions registry. Singleton. */ +@Slf4j public class ActionRegistry { - static final Logger LOG = LoggerFactory.getLogger(ActionRegistry.class); - private static final Map> ACTIONS = new ConcurrentHashMap<>(); + @Getter + private final Set actionMetadata; + private final Map> actions; - static { - try { - ServiceLoader actionFactories = ServiceLoader.load(ActionFactory.class); - for (ActionFactory fact : actionFactories) { - ACTIONS.putAll(fact.getSupportedActions()); - } - } catch (ServiceConfigurationError e) { - LOG.error("Load actions failed from factory"); - } - } + public ActionRegistry(Collection factories) { + this.actions = new HashMap<>(); + this.actionMetadata = new HashSet<>(); + + factories.stream() + .map(ActionFactory::getSupportedActions) + .forEach(actions::putAll); - public static Set registeredActions() { - return Collections.unmodifiableSet(ACTIONS.keySet()); + factories.stream() + .map(ActionFactory::getActionMetadata) + .forEach(actionMetadata::addAll); } - public static boolean registeredAction(String name) { - return ACTIONS.containsKey(name); + public Set registeredActions() { + return actions.keySet(); } - public static List supportedActions() { - List actionDescriptors = new ArrayList<>(); - for (Class clazz : ACTIONS.values()) { - ActionSignature signature = clazz.getAnnotation(ActionSignature.class); - if (signature != null) { - actionDescriptors.add(fromSignature(signature)); - } - } - return actionDescriptors; + public boolean isRegistered(String name) { + return actions.containsKey(name); } - public static SmartAction createAction(String name) throws ActionException { - if (!registeredAction(name)) { + public SmartAction createAction(String name) throws ActionException { + if (!isRegistered(name)) { throw new ActionException("Unregistered action " + name); } + try { - SmartAction smartAction = ACTIONS.get(name).newInstance(); + SmartAction smartAction = actions.get(name).newInstance(); smartAction.setName(name); return smartAction; } catch (Exception e) { - LOG.error("Create {} action failed", name, e); + log.error("Create {} action failed", name, e); throw new ActionException(e); } } - - private static ActionDescriptor fromSignature(ActionSignature signature) { - return new ActionDescriptor( - signature.actionId(), signature.displayName(), signature.usage(), signature.description()); - } } diff --git a/smart-agent/src/main/java/org/smartdata/agent/SmartAgent.java b/smart-agent/src/main/java/org/smartdata/agent/SmartAgent.java index 1d75271733..30a3ffce24 100644 --- a/smart-agent/src/main/java/org/smartdata/agent/SmartAgent.java +++ b/smart-agent/src/main/java/org/smartdata/agent/SmartAgent.java @@ -78,6 +78,7 @@ import static org.smartdata.conf.SmartConfKeys.SMART_AGENT_MASTER_CONNECT_TIMEOUT_MS_KEY; import static org.smartdata.conf.SmartConfKeys.SMART_CMDLET_EXECUTORS_DEFAULT; import static org.smartdata.conf.SmartConfKeys.SMART_CMDLET_EXECUTORS_KEY; +import static org.smartdata.server.utils.ConfigUtil.enrichSmartConf; public class SmartAgent implements StatusReporter { private static final String NAME = "SmartAgent"; @@ -102,7 +103,7 @@ public SmartAgent(SmartConf smartConfig) throws IOException { LOG.info("Agent address: {}", agentAddress); this.akkaConfig = AgentUtils.overrideRemoteAddress( ConfigFactory.load(AgentConstants.AKKA_CONF_FILE), agentAddress); - HadoopUtil.setSmartConfByHadoop(smartConfig); + enrichSmartConf(smartConfig); this.smartConfig = smartConfig; this.httpServer = new SmartAgentHttpServer(smartConfig, diff --git a/smart-common/src/main/java/org/smartdata/AbstractService.java b/smart-common/src/main/java/org/smartdata/AbstractService.java index d9b1b40845..2c6efece53 100644 --- a/smart-common/src/main/java/org/smartdata/AbstractService.java +++ b/smart-common/src/main/java/org/smartdata/AbstractService.java @@ -32,8 +32,4 @@ public AbstractService() { public AbstractService(SmartContext context) { this.context = context; } - - public boolean inSafeMode() { - return false; - } } diff --git a/smart-common/src/main/java/org/smartdata/SmartConstants.java b/smart-common/src/main/java/org/smartdata/SmartConstants.java index 8dce87bbde..9b9eca6d14 100644 --- a/smart-common/src/main/java/org/smartdata/SmartConstants.java +++ b/smart-common/src/main/java/org/smartdata/SmartConstants.java @@ -44,10 +44,23 @@ public class SmartConstants { public static final String SMART_FILE_CHECKSUM_XATTR_NAME = "user.checksum"; public static final String FS_HDFS_IMPL = "fs.hdfs.impl"; + public static final String FS_OFS_IMPL = "fs.ofs.impl"; + public static final String FS_O3FS_IMPL = "fs.o3fs.impl"; public static final String SMART_FILE_SYSTEM = "org.smartdata.hadoop.filesystem.SmartFileSystem"; + public static final String SMART_OFS = + "org.apache.hadoop.fs.ozone.SmartRootedOzoneFileSystem"; + public static final String SMART_O3FS = + "org.apache.hadoop.fs.ozone.SmartOzoneFileSystem"; + public static final String DISTRIBUTED_FILE_SYSTEM = "org.apache.hadoop.hdfs.DistributedFileSystem"; + public static final String OFS = + "org.apache.hadoop.fs.ozone.RootedOzoneFileSystem"; + public static final String O3FS = + "org.apache.hadoop.fs.ozone.OzoneFileSystem"; + + public static final String REPLICATION_CODEC_NAME = "replication"; } diff --git a/smart-common/src/main/java/org/smartdata/SmartService.java b/smart-common/src/main/java/org/smartdata/SmartService.java index 70341b3278..d99d488218 100644 --- a/smart-common/src/main/java/org/smartdata/SmartService.java +++ b/smart-common/src/main/java/org/smartdata/SmartService.java @@ -50,4 +50,8 @@ enum State { * @throws IOException */ void stop() throws IOException; + + default boolean inSafeMode() { + return false; + } } diff --git a/smart-common/src/main/java/org/smartdata/conf/ReconfigurableRegistry.java b/smart-common/src/main/java/org/smartdata/conf/ReconfigurableRegistry.java deleted file mode 100644 index e54c29ee33..0000000000 --- a/smart-common/src/main/java/org/smartdata/conf/ReconfigurableRegistry.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.smartdata.conf; - -import com.google.common.collect.ArrayListMultimap; -import com.google.common.collect.ListMultimap; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.Set; - -public class ReconfigurableRegistry { - private static ListMultimap reconfMap = - ArrayListMultimap.create(); - public static final Logger LOG = - LoggerFactory.getLogger(ReconfigurableRegistry.class); - - public static void registReconfigurableProperty(String property, - Reconfigurable reconfigurable) { - synchronized (reconfMap) { - reconfMap.put(property, reconfigurable); - } - } - - public static void registReconfigurableProperty(List properties, - Reconfigurable reconfigurable) { - synchronized (reconfMap) { - for (String p : properties) { - reconfMap.put(p, reconfigurable); - } - } - } - - public static void applyReconfigurablePropertyValue(String property, String value) { - for (Reconfigurable c : getReconfigurables(property)) { - try { - c.reconfigureProperty(property, value); - } catch (Exception e) { - LOG.error("", e); - // ignore and continue; - } - } - } - - /** - * Return modules interest in the property. - * @param property - * @return - */ - public static List getReconfigurables(String property) { - synchronized (reconfMap) { - return reconfMap.get(property); - } - } - - /** - * Return configurable properties in system. - * @return - */ - public static Set getAllReconfigurableProperties() { - synchronized (reconfMap) { - return reconfMap.keySet(); - } - } -} diff --git a/smart-common/src/main/java/org/smartdata/conf/ReconfigureException.java b/smart-common/src/main/java/org/smartdata/conf/ReconfigureException.java deleted file mode 100644 index 8ace272eac..0000000000 --- a/smart-common/src/main/java/org/smartdata/conf/ReconfigureException.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.smartdata.conf; - -public class ReconfigureException extends Exception { - private String reason; - private String property; - private String newVal; - private String oldVal; - - public ReconfigureException(String reason) { - super(reason); - this.reason = reason; - this.property = null; - this.newVal = null; - this.oldVal = null; - } - - public ReconfigureException(String property, String newVal, String oldVal) { - super(formatMessage(property, newVal, oldVal)); - this.property = property; - this.newVal = newVal; - this.oldVal = oldVal; - } - - public ReconfigureException(String property, String newVal, String oldVal, - Throwable cause) { - super(formatMessage(property, newVal, oldVal), cause); - this.property = property; - this.newVal = newVal; - this.oldVal = oldVal; - } - - public String getReason() { - return reason; - } - - public String getProperty() { - return property; - } - - public String getNewValue() { - return newVal; - } - - public String getOldValue() { - return oldVal; - } - - private static String formatMessage(String property, String newVal, String oldVal) { - return String.format("Failed to reconfig '%s' from '%s' to '%s'", - property, oldVal, newVal); - } -} diff --git a/smart-common/src/main/java/org/smartdata/conf/SmartConf.java b/smart-common/src/main/java/org/smartdata/conf/SmartConf.java index 6da73a3427..1b32f8800a 100644 --- a/smart-common/src/main/java/org/smartdata/conf/SmartConf.java +++ b/smart-common/src/main/java/org/smartdata/conf/SmartConf.java @@ -35,6 +35,7 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; +import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY; import static org.smartdata.conf.SmartConfKeys.SMART_FS_TYPE; import static org.smartdata.conf.SmartConfKeys.SMART_FS_TYPE_DEFAULT; @@ -48,13 +49,19 @@ public class SmartConf extends Configuration { private Set agentHosts; private Set serverHosts; - public SmartConf() { + public SmartConf(Configuration conf) { + super(conf); + Configuration.addDefaultResource("smart-default.xml"); Configuration.addDefaultResource("smart-site.xml"); parseHostsFiles(); } + public SmartConf() { + this(new Configuration()); + } + public List getCoverDirs() { return ConfigUtil.getCoverDirs(this); } @@ -117,10 +124,6 @@ public Collection getStringCollection(String name, Collection de : stringCollection; } - public Map asMap() { - return asMap(key -> true); - } - private void parseHostsFiles() { SsmHostsFileReader ssmHostsFileReader = new SsmHostsFileReader(); @@ -133,6 +136,11 @@ private void parseHostsFiles() { } } + public String getDefaultFs() { + return Optional.ofNullable(get(FS_DEFAULT_NAME_KEY)) + .orElseGet(this::getRpcAddress); + } + public SmartFsType getFsType() { return getEnum(SMART_FS_TYPE, SMART_FS_TYPE_DEFAULT); } @@ -146,4 +154,10 @@ private Set parseHostsFile( Path hostsFilePath = Paths.get(configDir, fileName); return hostFileReader.parse(hostsFilePath); } + + private String getRpcAddress() { + return getFsType() == SmartFsType.HDFS + ? get(SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY) + : get(SmartConfKeys.SMART_OZONE_RPC_SERVER_KEY); + } } diff --git a/smart-common/src/main/java/org/smartdata/conf/SmartConfKeys.java b/smart-common/src/main/java/org/smartdata/conf/SmartConfKeys.java index 0489a4a675..af5a7af202 100644 --- a/smart-common/src/main/java/org/smartdata/conf/SmartConfKeys.java +++ b/smart-common/src/main/java/org/smartdata/conf/SmartConfKeys.java @@ -35,6 +35,7 @@ public class SmartConfKeys { public static final int SMART_NAMESPACE_FETCHER_BATCH_DEFAULT = 500; public static final String SMART_DFS_NAMENODE_RPCSERVER_KEY = "smart.dfs.namenode.rpcserver"; + public static final String SMART_OZONE_RPC_SERVER_KEY = "smart.ozone.rpcserver"; public static final String SMART_FS_TYPE = "smart.fs.type"; public static final SmartFsType SMART_FS_TYPE_DEFAULT = SmartFsType.HDFS; diff --git a/smart-common/src/main/java/org/smartdata/metrics/GeneralFileInfoSource.java b/smart-common/src/main/java/org/smartdata/metrics/GeneralFileInfoSource.java index c4bfede746..b7257e17ec 100644 --- a/smart-common/src/main/java/org/smartdata/metrics/GeneralFileInfoSource.java +++ b/smart-common/src/main/java/org/smartdata/metrics/GeneralFileInfoSource.java @@ -17,10 +17,17 @@ */ package org.smartdata.metrics; +import org.smartdata.model.BaseFileInfo; + import java.sql.SQLException; import java.util.Collection; +import java.util.List; import java.util.Map; public interface GeneralFileInfoSource { Map getPathsToIdsMapping(Collection paths) throws SQLException; + + List getFilePathsByPrefix(String path); + + BaseFileInfo getBaseFileInfo(String path); } diff --git a/smart-common/src/main/java/org/smartdata/model/BackUpInfo.java b/smart-common/src/main/java/org/smartdata/model/BackUpInfo.java index 300b4ddfaf..96254c964c 100644 --- a/smart-common/src/main/java/org/smartdata/model/BackUpInfo.java +++ b/smart-common/src/main/java/org/smartdata/model/BackUpInfo.java @@ -17,101 +17,23 @@ */ package org.smartdata.model; -import java.util.Objects; +import lombok.Builder; +import lombok.Data; +import lombok.RequiredArgsConstructor; import static org.smartdata.utils.StringUtil.ssmPatternToRegex; +@Data +@RequiredArgsConstructor +@Builder(toBuilder = true) public class BackUpInfo { - private long rid; - private String src; - private String dest; - private long period; // in milliseconds - private String srcPattern; + private final long rid; + private final String src; + private final String dest; + private final long period; // in milliseconds + private final String srcPattern; public BackUpInfo(long rid, String src, String dest, long period) { this(rid, src, dest, period, ssmPatternToRegex(src + "*")); } - - public BackUpInfo(long rid, String src, String dest, long period, String srcPattern) { - this.rid = rid; - this.src = src; - this.dest = dest; - this.period = period; - this.srcPattern = srcPattern; - } - - public BackUpInfo() { - } - - public long getRid() { - return rid; - } - - public void setRid(long rid) { - this.rid = rid; - } - - public String getSrc() { - return src; - } - - public void setSrc(String src) { - this.src = src; - } - - public String getDest() { - return dest; - } - - public void setDest(String dest) { - this.dest = dest; - } - - public long getPeriod() { - return period; - } - - public void setPeriod(long period) { - this.period = period; - } - - public String getSrcPattern() { - return srcPattern; - } - - public void setSrcPattern(String srcPattern) { - this.srcPattern = srcPattern; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - BackUpInfo that = (BackUpInfo) o; - return rid == that.rid - && period == that.period - && Objects.equals(src, that.src) - && Objects.equals(dest, that.dest) - && Objects.equals(srcPattern, that.srcPattern); - } - - @Override - public int hashCode() { - return Objects.hash(rid, src, dest, period, srcPattern); - } - - @Override - public String toString() { - return "BackUpInfo{" - + "rid=" + rid - + ", src='" + src + '\'' - + ", dest='" + dest + '\'' - + ", period=" + period - + ", srcPattern='" + srcPattern + '\'' - + '}'; - } } diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/client/FileSystemCache.java b/smart-common/src/main/java/org/smartdata/model/BaseFileInfo.java similarity index 66% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/client/FileSystemCache.java rename to smart-common/src/main/java/org/smartdata/model/BaseFileInfo.java index e77af9a3bf..25607c8dd2 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/client/FileSystemCache.java +++ b/smart-common/src/main/java/org/smartdata/model/BaseFileInfo.java @@ -15,15 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.smartdata.hdfs.client; +package org.smartdata.model; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DistributedFileSystem; +public interface BaseFileInfo { + String getPath(); -import java.io.Closeable; -import java.io.IOException; -import java.net.InetSocketAddress; + long getLength(); -public interface FileSystemCache extends Closeable { - T get(Configuration config, String user, InetSocketAddress ssmMasterAddress) throws IOException; + boolean isDir(); } diff --git a/smart-common/src/main/java/org/smartdata/model/FileInfo.java b/smart-common/src/main/java/org/smartdata/model/FileInfo.java index 516b2c4ce3..a7c4bfab7e 100644 --- a/smart-common/src/main/java/org/smartdata/model/FileInfo.java +++ b/smart-common/src/main/java/org/smartdata/model/FileInfo.java @@ -24,7 +24,7 @@ @Data @AllArgsConstructor @Builder(setterPrefix = "set") -public class FileInfo { +public class FileInfo implements BaseFileInfo { private String path; private long fileId; private long length; diff --git a/smart-common/src/main/java/org/smartdata/utils/ConfigUtil.java b/smart-common/src/main/java/org/smartdata/utils/ConfigUtil.java index debcfad11c..c3e1df4078 100644 --- a/smart-common/src/main/java/org/smartdata/utils/ConfigUtil.java +++ b/smart-common/src/main/java/org/smartdata/utils/ConfigUtil.java @@ -19,6 +19,7 @@ import com.google.common.net.HostAndPort; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.smartdata.conf.SmartConfKeys; @@ -30,15 +31,33 @@ import static org.smartdata.SmartConstants.DISTRIBUTED_FILE_SYSTEM; import static org.smartdata.SmartConstants.FS_HDFS_IMPL; +import static org.smartdata.SmartConstants.FS_O3FS_IMPL; +import static org.smartdata.SmartConstants.FS_OFS_IMPL; +import static org.smartdata.SmartConstants.O3FS; +import static org.smartdata.SmartConstants.OFS; import static org.smartdata.SmartConstants.SMART_FILE_SYSTEM; +import static org.smartdata.SmartConstants.SMART_O3FS; +import static org.smartdata.SmartConstants.SMART_OFS; public class ConfigUtil { public static Configuration toRemoteClusterConfig(Configuration configuration) { Configuration remoteConfig = new Configuration(configuration); - if (SMART_FILE_SYSTEM.equals(remoteConfig.get(FS_HDFS_IMPL))) { + + String hdfsImpl = remoteConfig.get(FS_HDFS_IMPL); + if (StringUtils.isBlank(hdfsImpl) || SMART_FILE_SYSTEM.equals(hdfsImpl)) { remoteConfig.set(FS_HDFS_IMPL, DISTRIBUTED_FILE_SYSTEM); } + String ofsImpl = remoteConfig.get(FS_OFS_IMPL); + if (StringUtils.isBlank(ofsImpl) || SMART_OFS.equals(ofsImpl)) { + remoteConfig.set(FS_OFS_IMPL, OFS); + } + + String o3fsImpl = remoteConfig.get(FS_O3FS_IMPL); + if (StringUtils.isBlank(o3fsImpl) || SMART_O3FS.equals(o3fsImpl)) { + remoteConfig.set(FS_O3FS_IMPL, O3FS); + } + return remoteConfig; } diff --git a/smart-common/src/main/java/org/smartdata/utils/PathUtil.java b/smart-common/src/main/java/org/smartdata/utils/PathUtil.java index 423bf50fea..324bb8b42f 100644 --- a/smart-common/src/main/java/org/smartdata/utils/PathUtil.java +++ b/smart-common/src/main/java/org/smartdata/utils/PathUtil.java @@ -74,7 +74,6 @@ public static boolean isAbsoluteRemotePath(String path) { return isAbsoluteRemotePath(new Path(path)); } - // todo replace 'stringPath.startsWith("hdfs")' calls with this method public static boolean isAbsoluteRemotePath(Path path) { return Optional.ofNullable(path) .map(Path::toUri) diff --git a/smart-common/src/main/java/org/smartdata/utils/ThrowingBiFunction.java b/smart-common/src/main/java/org/smartdata/utils/ThrowingBiFunction.java new file mode 100644 index 0000000000..cef0b76347 --- /dev/null +++ b/smart-common/src/main/java/org/smartdata/utils/ThrowingBiFunction.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.smartdata.utils; + +public interface ThrowingBiFunction { + X apply(T l, V r) throws Exception; +} diff --git a/smart-common/src/test/java/org/smartdata/conf/TestReconfigurable.java b/smart-common/src/test/java/org/smartdata/conf/TestReconfigurable.java deleted file mode 100644 index fe2b7acf16..0000000000 --- a/smart-common/src/test/java/org/smartdata/conf/TestReconfigurable.java +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.smartdata.conf; - -import org.junit.Assert; -import org.junit.Test; - -import java.util.Arrays; -import java.util.List; - -public class TestReconfigurable { - public static final String PROPERTY1 = "property1"; - public static final String PROPERTY2 = "property2"; - - private class TestReconf extends ReconfigurableBase { - private String value1 = "oldValue1"; - private String value2 = "oldValue2"; - - @Override - public void reconfigureProperty(String property, String newVal) - throws ReconfigureException { - if (property.equals(PROPERTY1) && !newVal.equals(this.value1)) { - this.value1 = newVal; - } - } - - @Override - public List getReconfigurableProperties() { - return Arrays.asList(PROPERTY1); - } - - public String getValue1() { - return value1; - } - - public String getValue2() { - return value2; - } - } - - @Test - public void testReconf() throws Exception { - Assert.assertEquals(0, - ReconfigurableRegistry.getAllReconfigurableProperties().size()); - - TestReconf reconf = new TestReconf(); - - Assert.assertEquals(1, - ReconfigurableRegistry.getAllReconfigurableProperties().size()); - - ReconfigurableRegistry.applyReconfigurablePropertyValue( - PROPERTY1, "newValue1"); - ReconfigurableRegistry.applyReconfigurablePropertyValue( - PROPERTY2, "newValue2"); - - Assert.assertEquals("newValue1", reconf.getValue1()); - Assert.assertEquals("oldValue2", reconf.getValue2()); - } -} diff --git a/smart-engine/src/main/java/org/smartdata/server/SmartEngine.java b/smart-engine/src/main/java/org/smartdata/server/SmartEngine.java index 2a8c6c8911..5dfff1da24 100644 --- a/smart-engine/src/main/java/org/smartdata/server/SmartEngine.java +++ b/smart-engine/src/main/java/org/smartdata/server/SmartEngine.java @@ -21,10 +21,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.smartdata.AbstractService; +import org.smartdata.SmartService; +import org.smartdata.action.ActionRegistry; import org.smartdata.conf.SmartConf; -import org.smartdata.conf.SmartFsType; import org.smartdata.hive.HiveMetastoreFetcherService; -import org.smartdata.ozone.OzoneFetcherService; import org.smartdata.security.AnonymousDefaultPrincipalProvider; import org.smartdata.security.SmartPrincipalManager; import org.smartdata.security.ThreadScopeSmartPrincipalManager; @@ -32,8 +32,11 @@ import org.smartdata.server.engine.CmdletManager; import org.smartdata.server.engine.RuleManager; import org.smartdata.server.engine.ServerContext; -import org.smartdata.server.engine.StatesManager; import org.smartdata.server.engine.audit.AuditService; +import org.smartdata.server.engine.file.CachedFilesManager; +import org.smartdata.server.engine.file.DbFileAccessManager; +import org.smartdata.server.engine.file.FileAccessManager; +import org.smartdata.server.engine.filesystem.FileSystemContext; import java.io.IOException; import java.util.ArrayList; @@ -49,7 +52,7 @@ public class SmartEngine extends AbstractService { @Getter private final ServerContext serverContext; @Getter - private StatesManager statesManager; + private FileAccessManager fileAccessManager; @Getter private RuleManager ruleManager; @Getter @@ -60,8 +63,12 @@ public class SmartEngine extends AbstractService { private ClusterNodesManager clusterNodesManager; @Getter private SmartPrincipalManager smartPrincipalManager; + @Getter + private CachedFilesManager cachedFilesManager; + @Getter + private ActionRegistry actionRegistry; - private final List services; + private final List services; public SmartEngine(ServerContext context) { super(context); @@ -72,41 +79,61 @@ public SmartEngine(ServerContext context) { @Override public void init() throws IOException { - statesManager = new StatesManager(serverContext); smartPrincipalManager = new ThreadScopeSmartPrincipalManager( new AnonymousDefaultPrincipalProvider()); - services.add(statesManager); + fileAccessManager = new DbFileAccessManager(serverContext); + services.add(fileAccessManager); auditService = new AuditService(serverContext.getMetaStore().userActivityDao()); - cmdletManager = new CmdletManager(serverContext, auditService, smartPrincipalManager); + + FileSystemContext fsContext = FileSystemContext.fromConfig(conf); + + actionRegistry = new ActionRegistry(fsContext.actionFactories()); + + cmdletManager = CmdletManager.builder() + .context(serverContext) + .auditService(auditService) + .smartPrincipalManager(smartPrincipalManager) + .schedulerServices(fsContext.actionSchedulerServices(serverContext)) + .actionRegistry(actionRegistry) + .build(); services.add(cmdletManager); - clusterNodesManager = new ClusterNodesManager(conf, cmdletManager); - ruleManager = new RuleManager( - serverContext, statesManager, cmdletManager, auditService, smartPrincipalManager); + + ruleManager = RuleManager.builder() + .context(serverContext) + .cmdletManager(cmdletManager) + .auditService(auditService) + .actionRegistry(actionRegistry) + .smartPrincipalManager(smartPrincipalManager) + .executorPlugins(fsContext.ruleExecutorPlugins(serverContext, cmdletManager)) + .smartObjectSupplier(fsContext.smartObjectSupplier()) + .build(); + services.add(ruleManager); - maybeEnableOzoneFetcher(); + + clusterNodesManager = new ClusterNodesManager(conf, cmdletManager); + maybeEnableHiveEventsFetcher(); - for (AbstractService s : services) { + cachedFilesManager = fsContext.cachedFilesManager(serverContext); + + services.addAll(fsContext.additionalServices(serverContext)); + for (SmartService s : services) { s.init(); } } @Override public boolean inSafeMode() { - if (services.isEmpty()) { //Not initiated + if (services.isEmpty()) { return true; } - for (AbstractService service : services) { - if (service.inSafeMode()) { - return true; - } - } - return false; + return services.stream() + .anyMatch(SmartService::inSafeMode); } @Override public void start() throws IOException { - for (AbstractService s : services) { + for (SmartService s : services) { s.start(); } } @@ -118,7 +145,7 @@ public void stop() throws IOException { } } - private void stopEngineService(AbstractService service) { + private void stopEngineService(SmartService service) { try { if (service != null) { service.stop(); @@ -144,18 +171,6 @@ private void maybeEnableHiveEventsFetcher() { services.add(hiveMetastoreFetcherService); } - private void maybeEnableOzoneFetcher() { - if (serverContext.getConf().getFsType() != SmartFsType.OZONE) { - return; - } - - OzoneFetcherService ozoneFetcherService = new OzoneFetcherService( - serverContext, - serverContext.getMetaStore().ozoneFileInfoDao() - ); - services.add(ozoneFetcherService); - } - public SmartConf getConf() { return serverContext.getConf(); } diff --git a/smart-engine/src/main/java/org/smartdata/server/cluster/HazelcastWorker.java b/smart-engine/src/main/java/org/smartdata/server/cluster/HazelcastWorker.java index e4f519cbff..3b6cda52af 100644 --- a/smart-engine/src/main/java/org/smartdata/server/cluster/HazelcastWorker.java +++ b/smart-engine/src/main/java/org/smartdata/server/cluster/HazelcastWorker.java @@ -26,6 +26,7 @@ import org.slf4j.LoggerFactory; import org.smartdata.SmartContext; import org.smartdata.action.ActionException; +import org.smartdata.action.ActionRegistry; import org.smartdata.conf.SmartConf; import org.smartdata.conf.SmartConfKeys; import org.smartdata.hdfs.impersonation.UserImpersonationStrategy; @@ -40,6 +41,7 @@ import org.smartdata.server.engine.cmdlet.CmdletFactory; import org.smartdata.server.engine.cmdlet.HazelcastExecutorService; import org.smartdata.server.engine.cmdlet.StatusReportTask; +import org.smartdata.server.engine.filesystem.FileSystemContext; import java.io.Serializable; import java.util.concurrent.Executors; @@ -60,7 +62,9 @@ public HazelcastWorker(SmartContext smartContext) { this.smartConf = smartContext.getConf(); UserImpersonationStrategy userImpersonationStrategy = UserImpersonationStrategyFactory.from(smartConf); - this.factory = new CmdletFactory(smartContext, userImpersonationStrategy); + ActionRegistry actionRegistry = + new ActionRegistry(FileSystemContext.fromConfig(smartConf).actionFactories()); + this.factory = new CmdletFactory(smartContext, userImpersonationStrategy, actionRegistry); this.cmdletExecutor = new CmdletExecutor(smartContext.getConf(), userImpersonationStrategy); this.executorService = Executors.newSingleThreadScheduledExecutor(); HazelcastInstance instance = HazelcastInstanceProvider.getInstance(smartConf); diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/CmdletManager.java b/smart-engine/src/main/java/org/smartdata/server/engine/CmdletManager.java index 747867a459..ae28c3038e 100644 --- a/smart-engine/src/main/java/org/smartdata/server/engine/CmdletManager.java +++ b/smart-engine/src/main/java/org/smartdata/server/engine/CmdletManager.java @@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.ListMultimap; +import lombok.Getter; import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; @@ -28,6 +29,7 @@ import org.slf4j.LoggerFactory; import org.smartdata.AbstractService; import org.smartdata.action.ActionException; +import org.smartdata.action.ActionRegistry; import org.smartdata.cmdlet.parser.CmdletParser; import org.smartdata.cmdlet.parser.ParsedCmdlet; import org.smartdata.conf.SmartConfKeys; @@ -35,14 +37,6 @@ import org.smartdata.exception.NotFoundException; import org.smartdata.exception.QueueFullException; import org.smartdata.exception.SsmParseException; -import org.smartdata.hdfs.scheduler.CacheScheduler; -import org.smartdata.hdfs.scheduler.CompressionScheduler; -import org.smartdata.hdfs.scheduler.Copy2S3Scheduler; -import org.smartdata.hdfs.scheduler.CopyScheduler; -import org.smartdata.hdfs.scheduler.ErasureCodingScheduler; -import org.smartdata.hdfs.scheduler.MoverScheduler; -import org.smartdata.hdfs.scheduler.SmallFileScheduler; -import org.smartdata.hive.action.HmsSyncScheduler; import org.smartdata.metastore.MetaStore; import org.smartdata.metastore.MetaStoreException; import org.smartdata.model.ActionInfo; @@ -94,15 +88,12 @@ import java.util.Objects; import java.util.Optional; import java.util.Queue; -import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; -import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.smartdata.metastore.utils.MetaStoreUtils.logAndBuildMetastoreException; import static org.smartdata.model.action.ScheduleResult.RETRY; @@ -151,12 +142,18 @@ public class CmdletManager extends AbstractService private final SmartPrincipalManager smartPrincipalManager; private final PathChecker pathChecker; private final List schedulerServices; + @Getter + private final ActionRegistry actionRegistry; + private CmdletDispatcher dispatcher; + @lombok.Builder public CmdletManager( ServerContext context, AuditService auditService, - SmartPrincipalManager smartPrincipalManager) throws IOException { + SmartPrincipalManager smartPrincipalManager, + ActionRegistry actionRegistry, + List schedulerServices) throws IOException { super(context); this.metaStore = context.getMetaStore(); @@ -168,7 +165,7 @@ public CmdletManager( this.scheduledCmdlets = new LinkedBlockingQueue<>(); this.idToLaunchCmdlets = new ConcurrentHashMap<>(); this.schedulers = ArrayListMultimap.create(); - this.schedulerServices = createSchedulerServices(context); + this.schedulerServices = schedulerServices; this.ruleCmdletTracker = new RuleCmdletTracker(); this.dispatcher = new CmdletDispatcher(context, this, scheduledCmdlets, idToLaunchCmdlets, runningCmdlets, schedulers); @@ -182,8 +179,9 @@ public CmdletManager( this.cmdletPurgeTask = new DeleteTerminatedCmdletsTask(getContext().getConf(), metaStore); this.inMemoryRegistry = new InMemoryRegistry(context, ruleCmdletTracker, executorService); + this.actionRegistry = actionRegistry; CmdletManagerContext cmdletManagerContext = new CmdletManagerContext( - context.getConf(), metaStore, context.getMetricsFactory(), inMemoryRegistry, schedulers); + context.getConf(), metaStore, context.getMetricsFactory(), inMemoryRegistry, actionRegistry, schedulers); this.detectTimeoutActionsTask = new DetectTimeoutActionsTask(cmdletManagerContext, this, idToLaunchCmdlets.keySet()); this.actionInfoHandler = new ActionInfoHandler(cmdletManagerContext); @@ -836,32 +834,6 @@ private void inferCmdletStatus( } } - private List createSchedulerServices(ServerContext context) { - return Stream.of( - createSafely(() -> new MoverScheduler(context)), - createSafely(() -> new CopyScheduler(context, context.getMetaStore())), - createSafely(() -> new Copy2S3Scheduler(context, context.getMetaStore())), - createSafely(() -> new SmallFileScheduler(context, context.getMetaStore())), - createSafely(() -> new CompressionScheduler(context, context.getMetaStore())), - createSafely(() -> new ErasureCodingScheduler(context, context.getMetaStore())), - createSafely(() -> new CacheScheduler(context)), - createSafely(() -> new HmsSyncScheduler(context, - context.getMetaStore().hmsEventDao(), - context.getMetaStore().hmsSyncProgressDao())) - ).filter(Objects::nonNull) - .collect(Collectors.toList()); - } - - private ActionSchedulerService createSafely( - Callable schedulerSupplier) { - try { - return schedulerSupplier.call(); - } catch (Exception e) { - log.error("Create scheduler service failed.", e); - return null; - } - } - private CmdletState inferTerminalCmdletState(ActionStatus actionStatus) { if (actionStatus.getThrowable() == null) { return CmdletState.DONE; diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/RuleManager.java b/smart-engine/src/main/java/org/smartdata/server/engine/RuleManager.java index 4e0e52ea70..58a10f3a96 100644 --- a/smart-engine/src/main/java/org/smartdata/server/engine/RuleManager.java +++ b/smart-engine/src/main/java/org/smartdata/server/engine/RuleManager.java @@ -24,7 +24,6 @@ import org.smartdata.conf.SmartConfKeys; import org.smartdata.exception.NotFoundException; import org.smartdata.exception.SsmParseException; -import org.smartdata.hive.rule.HmsSyncRulePlugin; import org.smartdata.metastore.MetaStore; import org.smartdata.metastore.MetaStoreException; import org.smartdata.metastore.dao.RuleDao; @@ -42,6 +41,7 @@ import org.smartdata.model.rule.RulePluginManager; import org.smartdata.model.rule.RuleTranslationResult; import org.smartdata.model.rule.TimeBasedScheduleInfo; +import org.smartdata.rule.objects.SmartObjectSupplier; import org.smartdata.rule.parser.SmartRuleStringParser; import org.smartdata.security.SmartPrincipalManager; import org.smartdata.server.engine.audit.AuditService; @@ -49,19 +49,13 @@ import org.smartdata.server.engine.audit.aspect.Audit; import org.smartdata.server.engine.audit.aspect.AuditId; import org.smartdata.server.engine.audit.aspect.ReturnsAuditId; -import org.smartdata.server.engine.rule.ErasureCodingPlugin; import org.smartdata.server.engine.rule.ExecutorScheduler; -import org.smartdata.server.engine.rule.FileCopy2S3Plugin; import org.smartdata.server.engine.rule.RuleExecutor; import org.smartdata.server.engine.rule.RuleInfoHandler; import org.smartdata.server.engine.rule.RuleInfoRepo; -import org.smartdata.server.engine.rule.SmallFilePlugin; -import org.smartdata.server.engine.rule.copy.FileCopyDrPlugin; -import org.smartdata.server.engine.rule.copy.FileCopyScheduleStrategy; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.concurrent.ConcurrentHashMap; @@ -83,16 +77,17 @@ public class RuleManager public static final Logger LOG = LoggerFactory.getLogger(RuleManager.class.getName()); private final ServerContext serverContext; - private final StatesManager statesManager; private final CmdletManager cmdletManager; private final MetaStore metaStore; private final PathChecker pathChecker; + private final ActionRegistry actionRegistry; private final AuditService auditService; private final SmartPrincipalManager smartPrincipalManager; private final RuleDao ruleDao; private final RuleInfoHandler ruleInfoHandler; private final List executorPlugins; + private final SmartObjectSupplier smartObjectSupplier; private volatile boolean isClosed = false; @@ -100,12 +95,15 @@ public class RuleManager public ExecutorScheduler execScheduler; + @lombok.Builder public RuleManager( ServerContext context, - StatesManager statesManager, CmdletManager cmdletManager, AuditService auditService, - SmartPrincipalManager smartPrincipalManager) { + ActionRegistry actionRegistry, + SmartPrincipalManager smartPrincipalManager, + SmartObjectSupplier smartObjectSupplier, + List executorPlugins) { super(context); int numExecutors = @@ -116,7 +114,6 @@ public RuleManager( execScheduler = new ExecutorScheduler(numExecutors); this.mapRules = new ConcurrentHashMap<>(); - this.statesManager = statesManager; this.cmdletManager = cmdletManager; this.serverContext = context; this.auditService = auditService; @@ -125,14 +122,9 @@ public RuleManager( this.ruleDao = metaStore.ruleDao(); this.ruleInfoHandler = new RuleInfoHandler(ruleDao); this.pathChecker = new PathChecker(context.getConf()); - - this.executorPlugins = Arrays.asList( - new FileCopyDrPlugin( - context.getMetaStore(), FileCopyScheduleStrategy.ordered()), - new FileCopy2S3Plugin(), - new SmallFilePlugin(context, cmdletManager), - new HmsSyncRulePlugin(context.getMetaStore().hmsSyncProgressDao()), - new ErasureCodingPlugin(context)); + this.executorPlugins = executorPlugins; + this.actionRegistry = actionRegistry; + this.smartObjectSupplier = smartObjectSupplier; } public RuleInfo submitRule(String rule) throws IOException { @@ -178,7 +170,8 @@ public long submitRule(String rule, RuleState initState) throws IOException { metaStore.insertNewRule(ruleInfo); - RuleInfoRepo infoRepo = new RuleInfoRepo(ruleInfo, metaStore, serverContext.getConf(), executorPlugins); + RuleInfoRepo infoRepo = new RuleInfoRepo(ruleInfo, metaStore, + serverContext.getConf(), smartObjectSupplier, executorPlugins); mapRules.put(ruleInfo.getId(), infoRepo); submitRuleToScheduler(infoRepo.launchExecutor(this)); @@ -190,7 +183,7 @@ public long submitRule(String rule, RuleState initState) throws IOException { private void doCheckActions(CmdletDescriptor cd) throws IOException { StringBuilder error = new StringBuilder(); for (int i = 0; i < cd.getActionSize(); i++) { - if (!ActionRegistry.registeredAction(cd.getActionName(i))) { + if (!actionRegistry.isRegistered(cd.getActionName(i))) { error.append("Action '").append(cd.getActionName(i)).append("' not supported.\n"); } } @@ -200,7 +193,8 @@ private void doCheckActions(CmdletDescriptor cd) throws IOException { } private RuleTranslationResult doCheckRule(String rule) throws IOException { - SmartRuleStringParser parser = new SmartRuleStringParser(rule, null, serverContext.getConf()); + SmartRuleStringParser parser = new SmartRuleStringParser( + rule, null, smartObjectSupplier, serverContext.getConf()); return parser.translate(); } @@ -286,10 +280,6 @@ public boolean isClosed() { return isClosed; } - public StatesManager getStatesManager() { - return statesManager; - } - public CmdletManager getCmdletManager() { return cmdletManager; } @@ -310,7 +300,8 @@ public void init() throws IOException { return; } for (RuleInfo rule : rules) { - mapRules.put(rule.getId(), new RuleInfoRepo(rule, metaStore, serverContext.getConf(), executorPlugins)); + mapRules.put(rule.getId(), new RuleInfoRepo(rule, metaStore, + serverContext.getConf(), smartObjectSupplier, executorPlugins)); } LOG.info("Initialized. Totally " + rules.size() + " rules loaded from DataBase."); if (LOG.isDebugEnabled()) { diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/action/ActionInfoHandler.java b/smart-engine/src/main/java/org/smartdata/server/engine/action/ActionInfoHandler.java index e0d94ecd36..3742cfa1d5 100644 --- a/smart-engine/src/main/java/org/smartdata/server/engine/action/ActionInfoHandler.java +++ b/smart-engine/src/main/java/org/smartdata/server/engine/action/ActionInfoHandler.java @@ -54,6 +54,7 @@ public class ActionInfoHandler private static final Logger LOG = LoggerFactory.getLogger(ActionInfoHandler.class); private final MetaStore metaStore; + private final ActionRegistry actionRegistry; private AtomicLong maxActionId; private final InMemoryRegistry inMemoryRegistry; @@ -61,6 +62,7 @@ public class ActionInfoHandler public ActionInfoHandler(CmdletManagerContext context) { super(context.getMetaStore().actionDao(), "actions"); this.metaStore = context.getMetaStore(); + this.actionRegistry = context.getActionRegistry(); this.inMemoryRegistry = context.getInMemoryRegistry(); } @@ -181,7 +183,7 @@ private void updateActionStatusInternal(ActionInfo actionInfo, ActionStatus stat private void updateStorageIfNeeded(ActionInfo info) { SmartAction action; try { - action = ActionRegistry.createAction(info.getActionName()); + action = actionRegistry.createAction(info.getActionName()); } catch (ActionException e) { LOG.error("Failed to create action from {}", info, e); return; @@ -222,7 +224,7 @@ private ActionInfo createInitialActionInfo( private void validateActionNames(CmdletDescriptor cmdletDescriptor) throws SsmParseException { List unknownActions = cmdletDescriptor.getActionNames() .stream() - .filter(name -> !ActionRegistry.registeredAction(name)) + .filter(name -> !actionRegistry.isRegistered(name)) .collect(Collectors.toList()); if (!unknownActions.isEmpty()) { diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletFactory.java b/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletFactory.java index 6304fd22b5..a9ad20c7f9 100644 --- a/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletFactory.java +++ b/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletFactory.java @@ -27,6 +27,7 @@ import org.smartdata.hdfs.impersonation.UserImpersonationStrategy; import org.smartdata.hive.action.HmsCmdletFactoryPlugin; import org.smartdata.model.LaunchAction; +import org.smartdata.ozone.action.OzoneCmdletFactoryPlugin; import org.smartdata.protocol.message.LaunchCmdlet; import java.io.Closeable; @@ -40,20 +41,26 @@ public class CmdletFactory implements Closeable { private final SmartContext smartContext; private final UserImpersonationStrategy userImpersonationStrategy; private final List plugins; + private final ActionRegistry actionRegistry; public CmdletFactory(SmartContext smartContext, - UserImpersonationStrategy userImpersonationStrategy) { + UserImpersonationStrategy userImpersonationStrategy, + ActionRegistry actionRegistry) { this(smartContext, userImpersonationStrategy, + actionRegistry, new HdfsCmdletFactoryPlugin(smartContext.getConf(), userImpersonationStrategy), - new HmsCmdletFactoryPlugin(smartContext.getConf(), userImpersonationStrategy) + new HmsCmdletFactoryPlugin(smartContext.getConf(), userImpersonationStrategy), + new OzoneCmdletFactoryPlugin(smartContext.getConf(), userImpersonationStrategy) ); } public CmdletFactory(SmartContext smartContext, UserImpersonationStrategy userImpersonationStrategy, + ActionRegistry actionRegistry, CmdletFactoryPlugin... plugins) { this.smartContext = smartContext; + this.actionRegistry = actionRegistry; this.userImpersonationStrategy = userImpersonationStrategy; this.plugins = Arrays.asList(plugins); } @@ -82,7 +89,7 @@ public SmartAction createAction( boolean isLastAction, LaunchAction launchAction, String actionUser) throws ActionException { - SmartAction smartAction = ActionRegistry.createAction(launchAction.getActionType()); + SmartAction smartAction = actionRegistry.createAction(launchAction.getActionType()); smartAction.setContext(smartContext); smartAction.setCmdletId(cmdletId); smartAction.setLastAction(isLastAction); diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletManagerContext.java b/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletManagerContext.java index 9594d285b9..5ab0c7f4e6 100644 --- a/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletManagerContext.java +++ b/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/CmdletManagerContext.java @@ -19,6 +19,7 @@ import com.google.common.collect.ListMultimap; import lombok.Getter; +import org.smartdata.action.ActionRegistry; import org.smartdata.conf.SmartConf; import org.smartdata.metastore.MetaStore; import org.smartdata.metrics.MetricsFactory; @@ -30,6 +31,8 @@ public class CmdletManagerContext extends ServerContext { @Getter private final InMemoryRegistry inMemoryRegistry; + @Getter + private final ActionRegistry actionRegistry; private final ListMultimap schedulers; public CmdletManagerContext( @@ -37,10 +40,12 @@ public CmdletManagerContext( MetaStore metaStore, MetricsFactory metricsFactory, InMemoryRegistry inMemoryRegistry, + ActionRegistry actionRegistry, ListMultimap schedulers) { super(conf, metaStore, metricsFactory); this.inMemoryRegistry = inMemoryRegistry; this.schedulers = schedulers; + this.actionRegistry = actionRegistry; } public List getSchedulers(String action) { diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/LocalCmdletExecutorService.java b/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/LocalCmdletExecutorService.java index cb451d3b77..a0e04d6bc5 100644 --- a/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/LocalCmdletExecutorService.java +++ b/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/LocalCmdletExecutorService.java @@ -68,7 +68,9 @@ public LocalCmdletExecutorService(SmartConf smartConf, CmdletManager cmdletManag UserImpersonationStrategy userImpersonationStrategy = UserImpersonationStrategyFactory.from(smartConf); - this.cmdletFactory = new CmdletFactory(cmdletManager.getContext(), userImpersonationStrategy); + this.cmdletFactory = new CmdletFactory(cmdletManager.getContext(), + userImpersonationStrategy, + cmdletManager.getActionRegistry()); this.cmdletExecutor = new CmdletExecutor(smartConf, userImpersonationStrategy); this.executorService = Executors.newSingleThreadScheduledExecutor(); } diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentCmdletService.java b/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentCmdletService.java index 90a7e31c28..eed01bec69 100644 --- a/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentCmdletService.java +++ b/smart-engine/src/main/java/org/smartdata/server/engine/cmdlet/agent/AgentCmdletService.java @@ -19,6 +19,7 @@ import org.smartdata.AgentService; import org.smartdata.SmartConstants; +import org.smartdata.action.ActionRegistry; import org.smartdata.conf.SmartConf; import org.smartdata.hdfs.impersonation.UserImpersonationStrategy; import org.smartdata.hdfs.impersonation.UserImpersonationStrategyFactory; @@ -26,6 +27,7 @@ import org.smartdata.protocol.message.StopCmdlet; import org.smartdata.server.engine.cmdlet.CmdletExecutor; import org.smartdata.server.engine.cmdlet.CmdletFactory; +import org.smartdata.server.engine.filesystem.FileSystemContext; import java.io.IOException; @@ -33,17 +35,17 @@ public class AgentCmdletService extends AgentService { private CmdletExecutor executor; private CmdletFactory factory; - public AgentCmdletService() { - } - @Override public void init() throws IOException { SmartAgentContext context = (SmartAgentContext) getContext(); SmartConf conf = context.getConf(); UserImpersonationStrategy userImpersonationStrategy = UserImpersonationStrategyFactory.from(conf); + + ActionRegistry actionRegistry = + new ActionRegistry(FileSystemContext.fromConfig(conf).actionFactories()); this.executor = new CmdletExecutor(conf, userImpersonationStrategy); - this.factory = new CmdletFactory(context, userImpersonationStrategy); + this.factory = new CmdletFactory(context, userImpersonationStrategy, actionRegistry); } @Override diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/file/CachedFilesManager.java b/smart-engine/src/main/java/org/smartdata/server/engine/file/CachedFilesManager.java new file mode 100644 index 0000000000..0d13083153 --- /dev/null +++ b/smart-engine/src/main/java/org/smartdata/server/engine/file/CachedFilesManager.java @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.server.engine.file; + +import org.smartdata.metastore.dao.Searchable; +import org.smartdata.metastore.queries.sort.CachedFilesSortField; +import org.smartdata.model.CachedFileStatus; +import org.smartdata.model.request.CachedFileSearchRequest; + +public interface CachedFilesManager + extends Searchable { +} diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/CachedFilesManager.java b/smart-engine/src/main/java/org/smartdata/server/engine/file/DbCachedFilesManager.java similarity index 85% rename from smart-engine/src/main/java/org/smartdata/server/engine/CachedFilesManager.java rename to smart-engine/src/main/java/org/smartdata/server/engine/file/DbCachedFilesManager.java index aa461371df..56ac8207f3 100644 --- a/smart-engine/src/main/java/org/smartdata/server/engine/CachedFilesManager.java +++ b/smart-engine/src/main/java/org/smartdata/server/engine/file/DbCachedFilesManager.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.smartdata.server.engine; +package org.smartdata.server.engine.file; import org.smartdata.metastore.dao.CacheFileDao; import org.smartdata.metastore.dao.SearchableService; @@ -23,10 +23,11 @@ import org.smartdata.model.CachedFileStatus; import org.smartdata.model.request.CachedFileSearchRequest; -public class CachedFilesManager extends - SearchableService { +public class DbCachedFilesManager extends + SearchableService + implements CachedFilesManager { - public CachedFilesManager(CacheFileDao cacheFileDao) { + public DbCachedFilesManager(CacheFileDao cacheFileDao) { super(cacheFileDao, "cached files"); } } diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/StatesManager.java b/smart-engine/src/main/java/org/smartdata/server/engine/file/DbFileAccessManager.java similarity index 52% rename from smart-engine/src/main/java/org/smartdata/server/engine/StatesManager.java rename to smart-engine/src/main/java/org/smartdata/server/engine/file/DbFileAccessManager.java index ddf570b1bb..95466d8169 100644 --- a/smart-engine/src/main/java/org/smartdata/server/engine/StatesManager.java +++ b/smart-engine/src/main/java/org/smartdata/server/engine/file/DbFileAccessManager.java @@ -15,19 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.smartdata.server.engine; +package org.smartdata.server.engine.file; import lombok.Getter; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import lombok.extern.slf4j.Slf4j; import org.smartdata.AbstractService; -import org.smartdata.conf.Reconfigurable; -import org.smartdata.conf.ReconfigurableRegistry; -import org.smartdata.conf.ReconfigureException; -import org.smartdata.conf.SmartConfKeys; -import org.smartdata.hdfs.HdfsStatesUpdateService; import org.smartdata.metastore.accesscount.DbAccessEventAggregator; -import org.smartdata.metastore.accesscount.FileAccessManager; +import org.smartdata.metastore.accesscount.DbFileAccessCountManager; +import org.smartdata.metastore.accesscount.FileAccessCountManager; import org.smartdata.metastore.accesscount.failover.AccessCountFailoverFactory; import org.smartdata.metastore.partition.FileAccessPartitionManagerImpl; import org.smartdata.metastore.partition.FileAccessPartitionService; @@ -37,11 +32,10 @@ import org.smartdata.metrics.FileAccessEventSource; import org.smartdata.metrics.impl.FileAccessMetricsFactory; import org.smartdata.model.PathChecker; +import org.smartdata.server.engine.ServerContext; import org.smartdata.server.engine.data.AccessEventFetcher; import java.io.IOException; -import java.util.Collections; -import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -51,26 +45,21 @@ import static org.springframework.transaction.annotation.Isolation.SERIALIZABLE; /** - * Polls metrics and events from NameNode. + * Polls metrics and events from FS. */ -public class StatesManager extends AbstractService implements Reconfigurable { +@Slf4j +public class DbFileAccessManager extends AbstractService implements FileAccessManager { private final ServerContext serverContext; private ScheduledExecutorService executorService; @Getter - private FileAccessManager fileAccessManager; + private DbFileAccessCountManager fileAccessCountManager; private AccessEventFetcher accessEventFetcher; private FileAccessEventSource fileAccessEventSource; - @Getter - private CachedFilesManager cachedFilesManager; - private AbstractService statesUpdaterService; private FileAccessPartitionService fileAccessPartitionService; private PathChecker pathChecker; - private volatile boolean working = false; - - public static final Logger LOG = LoggerFactory.getLogger(StatesManager.class); - public StatesManager(ServerContext context) { + public DbFileAccessManager(ServerContext context) { super(context); this.serverContext = context; } @@ -80,24 +69,27 @@ public StatesManager(ServerContext context) { */ @Override public void init() throws IOException { - LOG.info("Initializing ..."); + log.info("Initializing ..."); this.executorService = Executors.newScheduledThreadPool(5); + TransactionRunner transactionRunner = new TransactionRunner(serverContext.getMetaStore().transactionManager()); transactionRunner.setIsolationLevel(SERIALIZABLE); - this.fileAccessManager = new FileAccessManager( + this.fileAccessCountManager = new DbFileAccessCountManager( transactionRunner, serverContext.getMetaStore().accessCountEventDao(), serverContext.getMetaStore().cacheFileDao()); String accessEventSource = serverContext.getConf().get( - ACCESS_EVENT_SOURCE_KEY, ACCESS_EVENT_SOURCE_DEFAULT); + ACCESS_EVENT_SOURCE_KEY, + ACCESS_EVENT_SOURCE_DEFAULT); this.fileAccessEventSource = FileAccessMetricsFactory.createAccessEventSource(accessEventSource); + AccessCountFailoverFactory accessCountFailoverFactory = new AccessCountFailoverFactory(serverContext.getConf()); DbAccessEventAggregator accessEventAggregator = new DbAccessEventAggregator( serverContext.getMetaStore().generalFileInfoSource(), - fileAccessManager, + fileAccessCountManager, accessCountFailoverFactory.create()); this.accessEventFetcher = new AccessEventFetcher( serverContext.getConf(), @@ -106,34 +98,38 @@ public void init() throws IOException { fileAccessEventSource.getCollector(), serverContext.getMetricsFactory()); this.pathChecker = new PathChecker(serverContext.getConf()); - this.cachedFilesManager = - new CachedFilesManager(serverContext.getMetaStore().cacheFileDao()); - FileAccessPartitionRetentionPolicyExecutorFactory - fileAccessPartitionRetentionPolicyExecutorFactory = - new FileAccessPartitionRetentionPolicyExecutorFactory( - serverContext.getMetaStore()); + + FileAccessPartitionRetentionPolicyExecutorFactory retentionPolicyFactory = + new FileAccessPartitionRetentionPolicyExecutorFactory(serverContext.getMetaStore()); this.fileAccessPartitionService = new FileAccessPartitionService( executorService, new FileAccessPartitionManagerImpl(serverContext.getMetaStore()), - fileAccessPartitionRetentionPolicyExecutorFactory.createPolicyExecutor( - serverContext.getConf()) + retentionPolicyFactory.createPolicyExecutor(serverContext.getConf()) ); - initStatesUpdaterService(); - if (statesUpdaterService == null) { - ReconfigurableRegistry.registReconfigurableProperty( - getReconfigurableProperties(), this); + log.info("Initialized."); + } + + @Override + public void reportFileAccessEvent(FileAccessEvent event) { + String path = addPathSeparator(event.getPath()); + + if (pathChecker.isIgnored(path)) { + log.debug("Path {} is in the ignore list. Skip report file access event.", path); + return; } - LOG.info("Initialized."); + if (!pathChecker.isCovered(path)) { + log.debug("Path {} is not in the whitelist. Report file access event failed.", path); + return; + } + event.setTimestamp(System.currentTimeMillis()); + fileAccessEventSource.insertEventFromSmartClient(event); } @Override - public boolean inSafeMode() { - if (statesUpdaterService == null) { - return true; - } - return statesUpdaterService.inSafeMode(); + public FileAccessCountManager getFileAccessCountManager() { + return fileAccessCountManager; } /** @@ -141,27 +137,23 @@ public boolean inSafeMode() { */ @Override public void start() throws IOException { - LOG.info("Starting ..."); + log.info("Starting ..."); fileAccessPartitionService.start(); accessEventFetcher.start(); - if (statesUpdaterService != null) { - statesUpdaterService.start(); - } - working = true; - LOG.info("Started. "); + + log.info("Started. "); } @Override public void stop() throws IOException { - working = false; - LOG.info("Stopping ..."); + log.info("Stopping ..."); try { if (fileAccessPartitionService != null) { fileAccessPartitionService.stop(); } } catch (Exception e) { - LOG.error("Failed to stop FileAccessPartitionService", e); + log.error("Failed to stop FileAccessPartitionService", e); } try { @@ -169,7 +161,7 @@ public void stop() throws IOException { accessEventFetcher.stop(); } } catch (Exception e) { - LOG.error("Failed to stop AccessEventFetcher", e); + log.error("Failed to stop AccessEventFetcher", e); } try { @@ -177,15 +169,7 @@ public void stop() throws IOException { fileAccessEventSource.close(); } } catch (Exception e) { - LOG.error("Failed to close FileAccessEventSource", e); - } - - try { - if (statesUpdaterService != null) { - statesUpdaterService.stop(); - } - } catch (Exception e) { - LOG.error("Failed to stop StatesUpdaterService", e); + log.error("Failed to close FileAccessEventSource", e); } try { @@ -193,74 +177,9 @@ public void stop() throws IOException { executorService.shutdownNow(); } } catch (Exception e) { - LOG.error("Failed to shutdown ExecutorService", e); + log.error("Failed to shutdown ExecutorService", e); } - LOG.info("Stopped."); - } - - public void reportFileAccessEvent(FileAccessEvent event) { - String path = addPathSeparator(event.getPath()); - - if (pathChecker.isIgnored(path)) { - LOG.debug("Path {} is in the ignore list. Skip report file access event.", path); - return; - } - - if (!pathChecker.isCovered(path)) { - LOG.debug("Path {} is not in the whitelist. Report file access event failed.", path); - return; - } - event.setTimestamp(System.currentTimeMillis()); - this.fileAccessEventSource.insertEventFromSmartClient(event); - } - - public void reconfigureProperty(String property, String newVal) - throws ReconfigureException { - LOG.debug("Received reconfig event: property={} newVal={}", - property, newVal); - if (SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY.equals(property)) { - if (statesUpdaterService != null) { - throw new ReconfigureException( - "States update service already been initialized."); - } - - if (working) { - initStatesUpdaterService(); - } - } - } - - public List getReconfigurableProperties() { - return Collections.singletonList( - SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY); - } - - private synchronized void initStatesUpdaterService() { - try { - try { - statesUpdaterService = new HdfsStatesUpdateService(serverContext, - serverContext.getMetaStore()); - statesUpdaterService.init(); - } catch (IOException e) { - statesUpdaterService = null; - LOG.warn("================================================================"); - LOG.warn(" Failed to create states updater service for: " + e.getMessage()); - LOG.warn(" This may leads to rule/action execution error. The reason why SSM " - + "does not exit under this condition is some other feature depends on this."); - LOG.warn("================================================================"); - } - - if (working) { - try { - statesUpdaterService.start(); - } catch (IOException e) { - LOG.error("Failed to start states updater service.", e); - statesUpdaterService = null; - } - } - } catch (Throwable t) { - LOG.info("", t); - } + log.info("Stopped."); } } diff --git a/smart-common/src/main/java/org/smartdata/conf/Reconfigurable.java b/smart-engine/src/main/java/org/smartdata/server/engine/file/FileAccessManager.java similarity index 55% rename from smart-common/src/main/java/org/smartdata/conf/Reconfigurable.java rename to smart-engine/src/main/java/org/smartdata/server/engine/file/FileAccessManager.java index 71eecceffc..01b82c0c25 100644 --- a/smart-common/src/main/java/org/smartdata/conf/Reconfigurable.java +++ b/smart-engine/src/main/java/org/smartdata/server/engine/file/FileAccessManager.java @@ -15,29 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.smartdata.conf; +package org.smartdata.server.engine.file; -import java.util.List; +import org.smartdata.SmartService; +import org.smartdata.metastore.accesscount.FileAccessCountManager; +import org.smartdata.metrics.FileAccessEvent; -/** - * Properties that can be reconfigured at runtime. - * Note: ReconfigurableRegistry should be used to register - * the reconfigurable properties, otherwise won't get chance - * to reconfigure. - */ -public interface Reconfigurable { - /** - * Called when the property's value is reconfigured. - * @param property - * @param newVal - * @throws ReconfigureException - */ - void reconfigureProperty(String property, String newVal) - throws ReconfigureException; +public interface FileAccessManager extends SmartService { + void reportFileAccessEvent(FileAccessEvent event); - /** - * Return the reconfigurable properties that supported. - * @return - */ - List getReconfigurableProperties(); + FileAccessCountManager getFileAccessCountManager(); } diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/file/NoOpCachedFilesManager.java b/smart-engine/src/main/java/org/smartdata/server/engine/file/NoOpCachedFilesManager.java new file mode 100644 index 0000000000..104207bc9f --- /dev/null +++ b/smart-engine/src/main/java/org/smartdata/server/engine/file/NoOpCachedFilesManager.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.server.engine.file; + +import org.smartdata.metastore.model.SearchResult; +import org.smartdata.metastore.queries.PageRequest; +import org.smartdata.metastore.queries.sort.CachedFilesSortField; +import org.smartdata.model.CachedFileStatus; +import org.smartdata.model.request.CachedFileSearchRequest; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +public class NoOpCachedFilesManager implements CachedFilesManager { + + @Override + public SearchResult search(CachedFileSearchRequest searchRequest, + PageRequest pageRequest) throws IOException { + return new SearchResult<>(search(searchRequest), 0); + } + + @Override + public List search(CachedFileSearchRequest searchRequest) { + return Collections.emptyList(); + } +} diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/filesystem/BaseFileSystemContext.java b/smart-engine/src/main/java/org/smartdata/server/engine/filesystem/BaseFileSystemContext.java new file mode 100644 index 0000000000..afda24f60c --- /dev/null +++ b/smart-engine/src/main/java/org/smartdata/server/engine/filesystem/BaseFileSystemContext.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.server.engine.filesystem; + +import lombok.extern.slf4j.Slf4j; +import org.smartdata.metastore.MetaStore; +import org.smartdata.model.action.ActionSchedulerService; +import org.smartdata.server.engine.ServerContext; +import org.smartdata.utils.ThrowingBiFunction; + +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +@Slf4j +public abstract class BaseFileSystemContext implements FileSystemContext { + + @Override + public List actionSchedulerServices(ServerContext context) { + return actionSchedulerSuppliers() + .map(supplier -> createSafely(supplier, context)) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + } + + protected abstract Stream> actionSchedulerSuppliers(); + + protected ActionSchedulerService createSafely( + ThrowingBiFunction schedulerSupplier, + ServerContext smartContext) { + try { + return schedulerSupplier.apply(smartContext, smartContext.getMetaStore()); + } catch (Exception e) { + log.error("Create scheduler service failed.", e); + return null; + } + } + +} diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/filesystem/FileSystemContext.java b/smart-engine/src/main/java/org/smartdata/server/engine/filesystem/FileSystemContext.java new file mode 100644 index 0000000000..8ed3450c07 --- /dev/null +++ b/smart-engine/src/main/java/org/smartdata/server/engine/filesystem/FileSystemContext.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.server.engine.filesystem; + +import org.smartdata.SmartService; +import org.smartdata.action.ActionFactory; +import org.smartdata.conf.SmartConf; +import org.smartdata.conf.SmartFsType; +import org.smartdata.model.action.ActionSchedulerService; +import org.smartdata.model.rule.RuleExecutorPlugin; +import org.smartdata.rule.objects.SmartObjectSupplier; +import org.smartdata.server.engine.CmdletManager; +import org.smartdata.server.engine.ServerContext; +import org.smartdata.server.engine.file.CachedFilesManager; + +import java.util.List; + +public interface FileSystemContext { + List actionSchedulerServices(ServerContext context); + + List ruleExecutorPlugins( + ServerContext context, + CmdletManager cmdletManager); + + List actionFactories(); + + List additionalServices(ServerContext context); + + CachedFilesManager cachedFilesManager(ServerContext context); + + SmartObjectSupplier smartObjectSupplier(); + + static FileSystemContext fromConfig(SmartConf conf) { + return fromFsType(conf.getFsType()); + } + + static FileSystemContext fromFsType(SmartFsType fsType) { + return fsType == SmartFsType.HDFS + ? new HdfsContext() + : new OzoneFileSystemContext(); + } +} diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/filesystem/HdfsContext.java b/smart-engine/src/main/java/org/smartdata/server/engine/filesystem/HdfsContext.java new file mode 100644 index 0000000000..b17b3efd8c --- /dev/null +++ b/smart-engine/src/main/java/org/smartdata/server/engine/filesystem/HdfsContext.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.server.engine.filesystem; + +import org.smartdata.SmartService; +import org.smartdata.action.ActionFactory; +import org.smartdata.hdfs.HdfsStatesUpdateService; +import org.smartdata.hdfs.action.HdfsActionFactory; +import org.smartdata.hdfs.scheduler.CacheScheduler; +import org.smartdata.hdfs.scheduler.CompressionScheduler; +import org.smartdata.hdfs.scheduler.Copy2S3Scheduler; +import org.smartdata.hdfs.scheduler.CopyScheduler; +import org.smartdata.hdfs.scheduler.ErasureCodingScheduler; +import org.smartdata.hdfs.scheduler.MoverScheduler; +import org.smartdata.hdfs.scheduler.SmallFileScheduler; +import org.smartdata.hive.action.HiveActionFactory; +import org.smartdata.hive.action.HmsSyncScheduler; +import org.smartdata.hive.rule.HmsSyncRulePlugin; +import org.smartdata.metastore.MetaStore; +import org.smartdata.model.action.ActionSchedulerService; +import org.smartdata.model.rule.RuleExecutorPlugin; +import org.smartdata.rule.objects.DefaultSmartObjectSupplier; +import org.smartdata.rule.objects.SmartObjectSupplier; +import org.smartdata.server.engine.CmdletManager; +import org.smartdata.server.engine.ServerContext; +import org.smartdata.server.engine.file.CachedFilesManager; +import org.smartdata.server.engine.file.DbCachedFilesManager; +import org.smartdata.server.engine.rule.ErasureCodingPlugin; +import org.smartdata.server.engine.rule.FileCopy2S3Plugin; +import org.smartdata.server.engine.rule.SmallFilePlugin; +import org.smartdata.server.engine.rule.copy.FileCopyDrPlugin; +import org.smartdata.server.engine.rule.copy.FileCopyScheduleStrategy; +import org.smartdata.utils.ThrowingBiFunction; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.stream.Stream; + +public class HdfsContext extends BaseFileSystemContext { + + @Override + public List ruleExecutorPlugins(ServerContext context, CmdletManager cmdletManager) { + return Arrays.asList( + new FileCopyDrPlugin( + context.getMetaStore(), FileCopyScheduleStrategy.ordered()), + new FileCopy2S3Plugin(), + new SmallFilePlugin(context, cmdletManager), + new HmsSyncRulePlugin(context.getMetaStore().hmsSyncProgressDao()), + new ErasureCodingPlugin(context)); + } + + @Override + public List actionFactories() { + return Arrays.asList( + new HdfsActionFactory(), + new HiveActionFactory() + ); + } + + @Override + public List additionalServices(ServerContext context) { + HdfsStatesUpdateService statesUpdateService = new HdfsStatesUpdateService( + context, + context.getMetaStore()); + return Collections.singletonList(statesUpdateService); + } + + @Override + public CachedFilesManager cachedFilesManager(ServerContext context) { + return new DbCachedFilesManager(context.getMetaStore().cacheFileDao()); + } + + @Override + public SmartObjectSupplier smartObjectSupplier() { + return new DefaultSmartObjectSupplier(); + } + + @Override + protected Stream> actionSchedulerSuppliers() { + return Stream.of( + (ctx, metastore) -> new MoverScheduler(ctx), + CopyScheduler::new, + Copy2S3Scheduler::new, + SmallFileScheduler::new, + CompressionScheduler::new, + ErasureCodingScheduler::new, + (ctx, metastore) -> new CacheScheduler(ctx), + (ctx, metastore) -> new HmsSyncScheduler(ctx, + metastore.hmsEventDao(), metastore.hmsSyncProgressDao())); + } +} diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/filesystem/OzoneFileSystemContext.java b/smart-engine/src/main/java/org/smartdata/server/engine/filesystem/OzoneFileSystemContext.java new file mode 100644 index 0000000000..ccccffa8b0 --- /dev/null +++ b/smart-engine/src/main/java/org/smartdata/server/engine/filesystem/OzoneFileSystemContext.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.server.engine.filesystem; + +import org.smartdata.SmartService; +import org.smartdata.action.ActionFactory; +import org.smartdata.hdfs.scheduler.Copy2S3Scheduler; +import org.smartdata.hdfs.scheduler.CopyScheduler; +import org.smartdata.hive.action.HiveActionFactory; +import org.smartdata.hive.action.HmsSyncScheduler; +import org.smartdata.hive.rule.HmsSyncRulePlugin; +import org.smartdata.metastore.MetaStore; +import org.smartdata.model.action.ActionSchedulerService; +import org.smartdata.model.rule.RuleExecutorPlugin; +import org.smartdata.ozone.OzoneFetcherService; +import org.smartdata.ozone.action.OzoneActionFactory; +import org.smartdata.ozone.rule.OzoneSmartObjectSupplier; +import org.smartdata.rule.objects.SmartObjectSupplier; +import org.smartdata.server.engine.CmdletManager; +import org.smartdata.server.engine.ServerContext; +import org.smartdata.server.engine.file.CachedFilesManager; +import org.smartdata.server.engine.file.NoOpCachedFilesManager; +import org.smartdata.server.engine.rule.FileCopy2S3Plugin; +import org.smartdata.server.engine.rule.copy.FileCopyDrPlugin; +import org.smartdata.server.engine.rule.copy.FileCopyScheduleStrategy; +import org.smartdata.utils.ThrowingBiFunction; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.stream.Stream; + +public class OzoneFileSystemContext extends BaseFileSystemContext { + @Override + public List ruleExecutorPlugins(ServerContext context, CmdletManager cmdletManager) { + return Arrays.asList( + new FileCopyDrPlugin( + context.getMetaStore(), FileCopyScheduleStrategy.ordered()), + new FileCopy2S3Plugin(), + new HmsSyncRulePlugin(context.getMetaStore().hmsSyncProgressDao()) + ); + } + + @Override + public List actionFactories() { + return Arrays.asList( + new OzoneActionFactory(), + new HiveActionFactory() + ); + } + + @Override + public List additionalServices(ServerContext context) { + OzoneFetcherService ozoneFetcherService = new OzoneFetcherService( + context, + context.getMetaStore().ozoneFileInfoDao() + ); + + return Collections.singletonList(ozoneFetcherService); + } + + @Override + public CachedFilesManager cachedFilesManager(ServerContext context) { + return new NoOpCachedFilesManager(); + } + + @Override + public SmartObjectSupplier smartObjectSupplier() { + return new OzoneSmartObjectSupplier(); + } + + @Override + protected Stream> actionSchedulerSuppliers() { + return Stream.of( + CopyScheduler::new, + Copy2S3Scheduler::new, + (ctx, metastore) -> new HmsSyncScheduler(ctx, + metastore.hmsEventDao(), metastore.hmsSyncProgressDao())); + } +} diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/rule/RuleInfoRepo.java b/smart-engine/src/main/java/org/smartdata/server/engine/rule/RuleInfoRepo.java index 1e58b6177e..29e8d813fd 100644 --- a/smart-engine/src/main/java/org/smartdata/server/engine/rule/RuleInfoRepo.java +++ b/smart-engine/src/main/java/org/smartdata/server/engine/rule/RuleInfoRepo.java @@ -26,6 +26,7 @@ import org.smartdata.model.RuleState; import org.smartdata.model.rule.RuleExecutorPlugin; import org.smartdata.model.rule.RuleTranslationResult; +import org.smartdata.rule.objects.SmartObjectSupplier; import org.smartdata.rule.parser.SmartRuleStringParser; import org.smartdata.rule.parser.TranslationContext; import org.smartdata.server.engine.RuleManager; @@ -44,6 +45,8 @@ public class RuleInfoRepo { private final RuleDao ruleDao; private final SmartConf conf; private final List executorPlugins; + private final SmartObjectSupplier smartObjectSupplier; + private RuleExecutor ruleExecutor; private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); @@ -51,10 +54,12 @@ public class RuleInfoRepo { public RuleInfoRepo(RuleInfo ruleInfo, MetaStore metaStore, SmartConf conf, + SmartObjectSupplier smartObjectSupplier, List executorPlugins) { this.ruleInfo = ruleInfo; this.metaStore = metaStore; this.ruleDao = metaStore.ruleDao(); + this.smartObjectSupplier = smartObjectSupplier; this.executorPlugins = executorPlugins; this.conf = conf; } @@ -149,7 +154,8 @@ private RuleExecutor doLaunchExecutor(RuleManager ruleManager) ruleInfo.getId(), ruleInfo.getSubmitTime()); RuleTranslationResult translationResult = ruleExecutor != null ? ruleExecutor.getOriginalTranslateResult() - : new SmartRuleStringParser(ruleInfo.getRuleText(), translationCtx, conf).translate(); + : new SmartRuleStringParser(ruleInfo.getRuleText(), + translationCtx, smartObjectSupplier, conf).translate(); ruleExecutor = new RuleExecutor( conf, diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/rule/SmallFilePlugin.java b/smart-engine/src/main/java/org/smartdata/server/engine/rule/SmallFilePlugin.java index 207cb3adf9..fbf811c76a 100644 --- a/smart-engine/src/main/java/org/smartdata/server/engine/rule/SmallFilePlugin.java +++ b/smart-engine/src/main/java/org/smartdata/server/engine/rule/SmallFilePlugin.java @@ -23,7 +23,7 @@ import org.slf4j.LoggerFactory; import org.smartdata.SmartFilePermission; import org.smartdata.conf.SmartConfKeys; -import org.smartdata.hdfs.action.HdfsAction; +import org.smartdata.hdfs.action.HadoopAction; import org.smartdata.hdfs.action.SmallFileCompactAction; import org.smartdata.metastore.MetaStore; import org.smartdata.metastore.MetaStoreException; @@ -232,7 +232,7 @@ public CmdletDescriptor preSubmitCmdletDescriptor( final RuleInfo ruleInfo, RuleTranslationResult tResult, CmdletDescriptor descriptor) { for (int i = 0; i < descriptor.getActionSize(); i++) { if (COMPACT_ACTION_NAME.equals(descriptor.getActionName(i))) { - String smallFiles = descriptor.getActionArgs(i).get(HdfsAction.FILE_PATH); + String smallFiles = descriptor.getActionArgs(i).get(HadoopAction.FILE_PATH); if (smallFiles != null && !smallFiles.isEmpty()) { // Check if small file list is empty ArrayList smallFileList = new Gson().fromJson( @@ -274,7 +274,7 @@ public CmdletDescriptor preSubmitCmdletDescriptor( i, SmallFileCompactAction.CONTAINER_FILE_PERMISSION, new Gson().toJson(args.containerFilePermission)); descriptor.addActionArg( - i, HdfsAction.FILE_PATH, new Gson().toJson(args.smartFiles)); + i, HadoopAction.FILE_PATH, new Gson().toJson(args.smartFiles)); } } } diff --git a/smart-engine/src/main/java/org/smartdata/server/engine/rule/copy/FileCopyDrPlugin.java b/smart-engine/src/main/java/org/smartdata/server/engine/rule/copy/FileCopyDrPlugin.java index 938cca78a7..1950568172 100644 --- a/smart-engine/src/main/java/org/smartdata/server/engine/rule/copy/FileCopyDrPlugin.java +++ b/smart-engine/src/main/java/org/smartdata/server/engine/rule/copy/FileCopyDrPlugin.java @@ -152,14 +152,13 @@ private BackUpInfo buildBackupInfo( PATTERN_BASE_DIRS_DELIMITER, getPathPatternBaseDirs(pathPatterns)); - BackUpInfo backUpInfo = new BackUpInfo(); - backUpInfo.setRid(ruleId); - backUpInfo.setSrc(patternBaseDirs); - backUpInfo.setSrcPattern(ssmPatternsToRegex(pathPatterns)); - backUpInfo.setDest(dest); - backUpInfo.setPeriod(tResult.getScheduleInfo().getMinimalEvery()); - - return backUpInfo; + return BackUpInfo.builder() + .rid(ruleId) + .src(patternBaseDirs) + .srcPattern(ssmPatternsToRegex(pathPatterns)) + .dest(dest) + .period(tResult.getScheduleInfo().getMinimalEvery()) + .build(); } private void validatePreserveArg(String rawPreserveArg) { @@ -167,7 +166,7 @@ private void validatePreserveArg(String rawPreserveArg) { return; } - for (String attribute: rawPreserveArg.split(",")) { + for (String attribute : rawPreserveArg.split(",")) { CopyFileAction.validatePreserveArg(attribute); } } diff --git a/smart-engine/src/main/java/org/smartdata/server/utils/ConfigUtil.java b/smart-engine/src/main/java/org/smartdata/server/utils/ConfigUtil.java new file mode 100644 index 0000000000..7d391a3557 --- /dev/null +++ b/smart-engine/src/main/java/org/smartdata/server/utils/ConfigUtil.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.server.utils; + +import org.smartdata.conf.SmartConf; +import org.smartdata.conf.SmartConfKeys; +import org.smartdata.conf.SmartFsType; +import org.smartdata.hdfs.HadoopUtil; + +import java.io.IOException; +import java.net.URL; +import java.util.Optional; + +import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY; +import static org.smartdata.conf.SmartConfKeys.SMART_OZONE_RPC_SERVER_KEY; +import static org.smartdata.hdfs.HadoopUtil.getHadoopConfDir; +import static org.smartdata.hdfs.HadoopUtil.loadResourceSafely; +import static org.smartdata.ozone.OzoneSmartConf.getOzoneDefaultFsUri; + +public class ConfigUtil { + public static void enrichSmartConf(SmartConf conf) throws IOException { + if (conf.getFsType() == SmartFsType.HDFS) { + HadoopUtil.setSmartConfByHadoop(conf); + } else { + enrichWithOzoneConfigs(conf); + } + conf.set(FS_DEFAULT_NAME_KEY, conf.getDefaultFs()); + } + + public static void enrichWithOzoneConfigs(SmartConf conf) throws IOException { + String hadoopConfPath = conf.get(SmartConfKeys.SMART_HADOOP_CONF_DIR_KEY); + Optional hadoopConfDir = getHadoopConfDir(hadoopConfPath); + if (!hadoopConfDir.isPresent()) { + return; + } + + loadResourceSafely(conf, hadoopConfDir.get(), "core-site.xml"); + loadResourceSafely(conf, hadoopConfDir.get(), "ozone-default.xml"); + loadResourceSafely(conf, hadoopConfDir.get(), "ozone-site.xml"); + String ozoneRpcAddress = getOzoneDefaultFsUri(conf).toString(); + conf.set(SMART_OZONE_RPC_SERVER_KEY, ozoneRpcAddress); + } +} diff --git a/smart-hadoop-support/pom.xml b/smart-hadoop-support/pom.xml index a48d93eb9c..753f8b81a9 100644 --- a/smart-hadoop-support/pom.xml +++ b/smart-hadoop-support/pom.xml @@ -37,6 +37,7 @@ smart-hadoop smart-hadoop-common smart-inputstream + smart-hadoop-action-common diff --git a/smart-hadoop-support/smart-hadoop-3/pom.xml b/smart-hadoop-support/smart-hadoop-3/pom.xml index 7a9c3b5bdc..658846ba4e 100644 --- a/smart-hadoop-support/smart-hadoop-3/pom.xml +++ b/smart-hadoop-support/smart-hadoop-3/pom.xml @@ -77,6 +77,11 @@ smart-hadoop-common 2.2.0-SNAPSHOT + + org.smartdata + smart-hadoop-action-common + 2.2.0-SNAPSHOT + org.projectlombok lombok diff --git a/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/AddErasureCodingPolicy.java b/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/AddErasureCodingPolicy.java index 2cedf8dcaa..8b880ad5be 100644 --- a/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/AddErasureCodingPolicy.java +++ b/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/AddErasureCodingPolicy.java @@ -86,7 +86,7 @@ public void execute() throws Exception { ECSchema ecSchema = new ECSchema(codecName, numDataUnits, numParityUnits); ErasureCodingPolicy ecPolicy = new ErasureCodingPolicy(ecSchema, cellSize); AddErasureCodingPolicyResponse addEcResponse = - localFileSystem.addErasureCodingPolicies(new ErasureCodingPolicy[]{ecPolicy})[0]; + localDfs.addErasureCodingPolicies(new ErasureCodingPolicy[]{ecPolicy})[0]; if (!addEcResponse.isSucceed()) { appendLog("Failed to add the given EC policy!"); diff --git a/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/CheckErasureCodingPolicy.java b/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/CheckErasureCodingPolicy.java index 3e69b50a5b..28515cfb3f 100644 --- a/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/CheckErasureCodingPolicy.java +++ b/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/CheckErasureCodingPolicy.java @@ -49,7 +49,7 @@ public void execute() throws Exception { validateNonEmptyArg(FILE_PATH); String result = Optional.ofNullable( - localFileSystem.getErasureCodingPolicy(srcPath)) + localDfs.getErasureCodingPolicy(srcPath)) .map(ErasureCodingPolicy::toString) .orElse(RESULT_OF_NULL_EC_POLICY); diff --git a/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/DisableErasureCodingPolicy.java b/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/DisableErasureCodingPolicy.java index 49ff53d5b2..e062bdc579 100644 --- a/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/DisableErasureCodingPolicy.java +++ b/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/DisableErasureCodingPolicy.java @@ -42,7 +42,7 @@ public void init(Map args) { @Override public void execute() throws Exception { - localFileSystem.disableErasureCodingPolicy(policyName); + localDfs.disableErasureCodingPolicy(policyName); appendLog(String.format("The EC policy named %s is disabled!", policyName)); } diff --git a/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/EnableErasureCodingPolicy.java b/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/EnableErasureCodingPolicy.java index ac60aaa8e2..ecf5284eb2 100644 --- a/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/EnableErasureCodingPolicy.java +++ b/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/EnableErasureCodingPolicy.java @@ -42,7 +42,7 @@ public void init(Map args) { @Override public void execute() throws Exception { - localFileSystem.enableErasureCodingPolicy(policyName); + localDfs.enableErasureCodingPolicy(policyName); appendLog(String.format("The EC policy named %s is enabled!", policyName)); } diff --git a/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/ErasureCodingAction.java b/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/ErasureCodingAction.java index 2f4b8cfcb4..a7cc647dcb 100644 --- a/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/ErasureCodingAction.java +++ b/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/ErasureCodingAction.java @@ -65,7 +65,7 @@ protected void execute() throws Exception { validateNonEmptyArgs(FILE_PATH); // keep attribute consistent - HdfsFileStatus fileStatus = (HdfsFileStatus) localFileSystem.getFileStatus(srcPath); + HdfsFileStatus fileStatus = (HdfsFileStatus) localDfs.getFileStatus(srcPath); validateEcPolicy(ecPolicyName); ErasureCodingPolicy srcEcPolicy = fileStatus.getErasureCodingPolicy(); @@ -79,7 +79,7 @@ protected void execute() throws Exception { } if (fileStatus.isDir()) { - localFileSystem.setErasureCodingPolicy(srcPath, ecPolicyName); + localDfs.setErasureCodingPolicy(srcPath, ecPolicyName); this.progress = 1.0F; appendLog(DIR_RESULT); return; @@ -90,13 +90,13 @@ protected void execute() throws Exception { // a file only with replication policy can be appended. if (srcEcPolicy == null) { // append the file to acquire the lock to avoid modifying, real appending wouldn't occur. - outputStream = localFileSystem.append(srcPath, bufferSize); + outputStream = localDfs.append(srcPath, bufferSize); } convert(fileStatus); // The append operation will change the modification time accordingly, // so we use the FileStatus obtained before append to set ecTmp file's most attributes setAttributes(fileStatus); - localFileSystem.rename(ecTmpPath, srcPath, Options.Rename.OVERWRITE); + localDfs.rename(ecTmpPath, srcPath, Options.Rename.OVERWRITE); appendLog(CONVERT_RESULT); if (srcEcPolicy == null) { appendLog("The previous EC policy is replication."); @@ -106,8 +106,8 @@ protected void execute() throws Exception { appendLog("The current EC policy is " + ecPolicyName); } catch (ActionException ex) { try { - if (localFileSystem.exists(ecTmpPath)) { - localFileSystem.delete(ecTmpPath, false); + if (localDfs.exists(ecTmpPath)) { + localDfs.delete(ecTmpPath, false); } } catch (IOException e) { appendLog("Failed to delete tmp file created during the conversion!" + ex.getMessage()); @@ -125,7 +125,7 @@ protected void execute() throws Exception { } public void validateEcPolicy(String ecPolicyName) throws Exception { - ErasureCodingPolicyState ecPolicyState = localFileSystem.getAllErasureCodingPolicies() + ErasureCodingPolicyState ecPolicyState = localDfs.getAllErasureCodingPolicies() .stream() .filter(policyInfo -> policyInfo.getPolicy().getName().equals(ecPolicyName)) .map(ErasureCodingPolicyInfo::getState) diff --git a/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/ListErasureCodingPolicy.java b/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/ListErasureCodingPolicy.java index 90c8859ac4..2d25f4bf48 100644 --- a/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/ListErasureCodingPolicy.java +++ b/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/ListErasureCodingPolicy.java @@ -39,7 +39,7 @@ public void init(Map args) { @Override public void execute() throws Exception { - for (ErasureCodingPolicyInfo policyInfo : localFileSystem.getAllErasureCodingPolicies()) { + for (ErasureCodingPolicyInfo policyInfo : localDfs.getAllErasureCodingPolicies()) { appendResult("{" + policyInfo.toString() + "}"); } } diff --git a/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/RemoveErasureCodingPolicy.java b/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/RemoveErasureCodingPolicy.java index d53004a311..511e718df9 100644 --- a/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/RemoveErasureCodingPolicy.java +++ b/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/RemoveErasureCodingPolicy.java @@ -41,7 +41,7 @@ public void init(Map args) { @Override public void execute() throws Exception { - localFileSystem.removeErasureCodingPolicy(policyName); + localDfs.removeErasureCodingPolicy(policyName); appendLog("The EC policy named is removed: " + policyName); } diff --git a/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/UnErasureCodingAction.java b/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/UnErasureCodingAction.java index 71969d4c8b..bb98a1e80d 100644 --- a/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/UnErasureCodingAction.java +++ b/smart-hadoop-support/smart-hadoop-3/src/main/java/org/smartdata/hdfs/action/UnErasureCodingAction.java @@ -57,7 +57,7 @@ public void init(Map args) { protected void execute() throws Exception { validateNonEmptyArgs(FILE_PATH); - HdfsFileStatus fileStatus = (HdfsFileStatus) localFileSystem.getFileStatus(srcPath); + HdfsFileStatus fileStatus = (HdfsFileStatus) localDfs.getFileStatus(srcPath); ErasureCodingPolicy srcEcPolicy = fileStatus.getErasureCodingPolicy(); // if ecPolicy is null, it means replication. @@ -67,7 +67,7 @@ protected void execute() throws Exception { return; } if (fileStatus.isDir()) { - localFileSystem.setErasureCodingPolicy(srcPath, ecPolicyName); + localDfs.setErasureCodingPolicy(srcPath, ecPolicyName); progress = 1.0F; appendLog(DIR_RESULT); return; @@ -76,14 +76,14 @@ protected void execute() throws Exception { try { convert(fileStatus); setAttributes(fileStatus); - localFileSystem.rename(ecTmpPath, srcPath, Options.Rename.OVERWRITE); + localDfs.rename(ecTmpPath, srcPath, Options.Rename.OVERWRITE); appendLog(CONVERT_RESULT); appendLog(String.format("The previous EC policy is %s.", srcEcPolicy.getName())); appendLog(String.format("The current EC policy is %s.", REPLICATION_POLICY_NAME)); } catch (ActionException ex) { try { - if (localFileSystem.exists(ecTmpPath)) { - localFileSystem.delete(ecTmpPath, false); + if (localDfs.exists(ecTmpPath)) { + localDfs.delete(ecTmpPath, false); } } catch (IOException e) { LOG.error("Failed to delete tmp file created during the conversion!"); diff --git a/smart-hadoop-support/smart-hadoop-action-common/pom.xml b/smart-hadoop-support/smart-hadoop-action-common/pom.xml new file mode 100644 index 0000000000..0bbf060892 --- /dev/null +++ b/smart-hadoop-support/smart-hadoop-action-common/pom.xml @@ -0,0 +1,106 @@ + + + + 4.0.0 + + org.smartdata + smart-hadoop-support + 2.2.0-SNAPSHOT + .. + + + smart-hadoop-action-common + 2.2.0-SNAPSHOT + jar + + + + + org.smartdata + smart-hadoop-common + 2.2.0-SNAPSHOT + + + org.smartdata + smart-common + 2.2.0-SNAPSHOT + + + com.github.ben-manes.caffeine + caffeine + + + org.apache.hadoop + hadoop-distcp + ${hadoop.version} + + + ch.qos.reload4j + reload4j + + + org.slf4j + slf4j-reload4j + + + + + org.apache.hadoop + hadoop-mapreduce-client-core + ${hadoop.version} + + + org.codehaus.jackson + jackson-core-asl + + + org.codehaus.jackson + jackson-mapper-asl + + + ch.qos.reload4j + reload4j + + + org.slf4j + slf4j-reload4j + + + + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + ${hadoop.version} + + + ch.qos.reload4j + reload4j + + + org.slf4j + slf4j-reload4j + + + + + \ No newline at end of file diff --git a/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/HadoopCmdletFactoryPlugin.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/HadoopCmdletFactoryPlugin.java new file mode 100644 index 0000000000..48114f8c38 --- /dev/null +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/HadoopCmdletFactoryPlugin.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.hdfs; + +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.apache.hadoop.fs.FileSystem; +import org.smartdata.action.ActionException; +import org.smartdata.action.CmdletFactoryPlugin; +import org.smartdata.action.SmartAction; +import org.smartdata.conf.SmartConf; +import org.smartdata.conf.SmartFsType; +import org.smartdata.hdfs.action.HadoopAction; +import org.smartdata.hdfs.client.LocalFileSystemProvider; + +import java.io.IOException; + +@Slf4j +@RequiredArgsConstructor +public abstract class HadoopCmdletFactoryPlugin implements CmdletFactoryPlugin { + protected final SmartConf conf; + protected final LocalFileSystemProvider localFileSystemProvider; + + @Override + public boolean canEnrich(SmartAction action) { + return action instanceof HadoopAction + && conf.getFsType() == supportedFsType(); + } + + @Override + public void enrichAction(SmartAction action, String actionUser) throws ActionException { + if (!canEnrich(action)) { + return; + } + + HadoopAction hadoopAction = (HadoopAction) action; + setLocalFileSystem(hadoopAction, actionUser); + } + + protected abstract SmartFsType supportedFsType(); + + private void setLocalFileSystem(HadoopAction action, String actionUser) throws ActionException { + try { + T localFileSystem = localFileSystemProvider.provide( + conf, actionUser, action.localFsType()); + action.setLocalFileSystem(localFileSystem); + } catch (IOException exception) { + log.error("smartAction aid={} setDfsClient error", action.getActionId(), exception); + throw new ActionException(exception); + } + } + + @Override + public void close() throws IOException { + localFileSystemProvider.close(); + } +} diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/AppendFileAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/AppendFileAction.java similarity index 95% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/AppendFileAction.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/AppendFileAction.java index a5e71b5130..762a48064d 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/AppendFileAction.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/AppendFileAction.java @@ -29,11 +29,11 @@ @ActionSignature( actionId = "append", displayName = "append", - usage = HdfsAction.FILE_PATH + " $src" + + usage = HadoopAction.FILE_PATH + " $src" + AppendFileAction.LENGTH + " $length" + AppendFileAction.BUF_SIZE + " $size" ) -public class AppendFileAction extends HdfsActionWithRemoteClusterSupport { +public class AppendFileAction extends HadoopActionWithRemoteClusterSupport { static final String BUF_SIZE = "-bufSize"; static final String LENGTH = "-length"; diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CheckStorageAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/CheckStorageAction.java similarity index 95% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CheckStorageAction.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/CheckStorageAction.java index 830293fafe..680f144490 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CheckStorageAction.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/CheckStorageAction.java @@ -33,9 +33,9 @@ @ActionSignature( actionId = "checkstorage", displayName = "checkstorage", - usage = HdfsAction.FILE_PATH + " $file " + usage = HadoopAction.FILE_PATH + " $file " ) -public class CheckStorageAction extends HdfsActionWithRemoteClusterSupport { +public class CheckStorageAction extends HadoopActionWithRemoteClusterSupport { private Path filePath; @Override diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CheckSumAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/CheckSumAction.java similarity index 97% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CheckSumAction.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/CheckSumAction.java index 1a4644fc6a..9839cba088 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CheckSumAction.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/CheckSumAction.java @@ -36,9 +36,9 @@ @ActionSignature( actionId = "checksum", displayName = "checksum", - usage = HdfsAction.FILE_PATH + " $src " + usage = HadoopAction.FILE_PATH + " $src " ) -public class CheckSumAction extends HdfsActionWithRemoteClusterSupport { +public class CheckSumAction extends HadoopActionWithRemoteClusterSupport { private String fileRawPath; @Override diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/Copy2S3Action.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/Copy2S3Action.java similarity index 94% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/Copy2S3Action.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/Copy2S3Action.java index 1256d6411f..6f235fcbea 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/Copy2S3Action.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/Copy2S3Action.java @@ -17,7 +17,6 @@ */ package org.smartdata.hdfs.action; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.smartdata.action.ActionException; @@ -42,12 +41,12 @@ @ActionSignature( actionId = "copy2s3", displayName = "copy2s3", - usage = HdfsAction.FILE_PATH + " $src " + Copy2S3Action.DEST + + usage = HadoopAction.FILE_PATH + " $src " + Copy2S3Action.DEST + " $dest " + Copy2S3Action.BUF_SIZE + " $size" ) -public class Copy2S3Action extends HdfsActionWithRemoteClusterSupport { +public class Copy2S3Action extends HadoopActionWithRemoteClusterSupport { public static final String BUF_SIZE = "-bufSize"; - public static final String SRC = HdfsAction.FILE_PATH; + public static final String SRC = FILE_PATH; public static final String DEST = "-dest"; public static final String S3_SCHEME_PREFIX = "s3"; diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CopyDirectoryAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/CopyDirectoryAction.java similarity index 98% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CopyDirectoryAction.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/CopyDirectoryAction.java index a2af48b559..4c7def37f0 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CopyDirectoryAction.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/CopyDirectoryAction.java @@ -39,7 +39,7 @@ @ActionSignature( actionId = "dircopy", displayName = "dircopy", - usage = HdfsAction.FILE_PATH + " $file" + usage = HadoopAction.FILE_PATH + " $file" ) public class CopyDirectoryAction extends CopyPreservedAttributesAction { public static final String DEST_PATH = "-dest"; diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CopyFileAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/CopyFileAction.java similarity index 99% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CopyFileAction.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/CopyFileAction.java index b22d2cdf85..90f9cf1132 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CopyFileAction.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/CopyFileAction.java @@ -46,7 +46,7 @@ @ActionSignature( actionId = "copy", displayName = "copy", - usage = HdfsAction.FILE_PATH + " $src " + usage = HadoopAction.FILE_PATH + " $src " + CopyFileAction.DEST_PATH + " $dest " + CopyFileAction.OFFSET_INDEX + " $offset " + CopyFileAction.LENGTH + " $length " diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CopyPreservedAttributesAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/CopyPreservedAttributesAction.java similarity index 98% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CopyPreservedAttributesAction.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/CopyPreservedAttributesAction.java index fbfd6a4bb8..711a0b952a 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CopyPreservedAttributesAction.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/CopyPreservedAttributesAction.java @@ -38,7 +38,7 @@ /** * Base class for all actions with file attributes transfer support. */ -public abstract class CopyPreservedAttributesAction extends HdfsAction { +public abstract class CopyPreservedAttributesAction extends HadoopAction { public static final String PRESERVE = "-preserve"; private final Set supportedAttributes; diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/DeleteFileAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/DeleteFileAction.java similarity index 94% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/DeleteFileAction.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/DeleteFileAction.java index e2315c488c..6a9b7c8fa0 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/DeleteFileAction.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/DeleteFileAction.java @@ -33,10 +33,10 @@ @ActionSignature( actionId = "delete", displayName = "delete", - usage = HdfsAction.FILE_PATH + " $file" + usage = HadoopAction.FILE_PATH + " $file" ) -public class DeleteFileAction extends HdfsActionWithRemoteClusterSupport { +public class DeleteFileAction extends HadoopActionWithRemoteClusterSupport { private Path filePath; @Override diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/DistCpAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/DistCpAction.java similarity index 99% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/DistCpAction.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/DistCpAction.java index ca06bc8ceb..a9a23f50c5 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/DistCpAction.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/DistCpAction.java @@ -44,7 +44,7 @@ + " [additional options from " + "https://hadoop.apache.org/docs/stable/hadoop-distcp/DistCp.html#Command_Line_Options]" ) -public class DistCpAction extends HdfsAction { +public class DistCpAction extends HadoopAction { public static final String TARGET_ARG = "-target"; public static final String SOURCE_PATH_LIST_FILE = "-f"; diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/HdfsActionWithRemoteClusterSupport.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/HadoopActionWithRemoteClusterSupport.java similarity index 95% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/HdfsActionWithRemoteClusterSupport.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/HadoopActionWithRemoteClusterSupport.java index 27b533dc5e..38546da365 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/HdfsActionWithRemoteClusterSupport.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/HadoopActionWithRemoteClusterSupport.java @@ -23,7 +23,7 @@ import static org.smartdata.utils.PathUtil.getRemoteFileSystem; import static org.smartdata.utils.PathUtil.isAbsoluteRemotePath; -public abstract class HdfsActionWithRemoteClusterSupport extends HdfsAction { +public abstract class HadoopActionWithRemoteClusterSupport extends HadoopAction { @Override protected void execute() throws Exception { diff --git a/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/HdfsAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/HdfsAction.java new file mode 100644 index 0000000000..6e8449d35a --- /dev/null +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/HdfsAction.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.hdfs.action; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; + +import java.io.IOException; +import java.util.Optional; + +public abstract class HdfsAction extends HadoopAction { + + protected DistributedFileSystem localDfs; + + @Override + public void setLocalFileSystem(FileSystem localFileSystem) { + super.setLocalFileSystem(localFileSystem); + if (localFileSystem instanceof DistributedFileSystem) { + this.localDfs = (DistributedFileSystem) localFileSystem; + } + } + + protected DFSClient getLocalDfsClient() { + return Optional.ofNullable(localDfs) + .map(DistributedFileSystem::getClient) + .orElse(null); + } + + protected Optional getHdfsFileStatus(FileSystem fileSystem, Path path) throws IOException { + return getFileStatus(fileSystem, path) + .map(HdfsFileStatus.class::cast); + } +} diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ListFileAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/ListFileAction.java similarity index 96% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ListFileAction.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/ListFileAction.java index 636e8cfac7..f4539f9954 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ListFileAction.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/ListFileAction.java @@ -30,7 +30,6 @@ import java.util.Arrays; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Queue; /** @@ -39,11 +38,11 @@ @ActionSignature( actionId = "list", displayName = "list", - usage = HdfsAction.FILE_PATH + " $src1" + usage = HadoopAction.FILE_PATH + " $src1" + ListFileAction.RECURSIVELY + ListFileAction.PRETTY_SIZES ) -public class ListFileAction extends HdfsActionWithRemoteClusterSupport { +public class ListFileAction extends HadoopActionWithRemoteClusterSupport { // Options public static final String RECURSIVELY = "-r"; public static final String PRETTY_SIZES = "-h"; diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/MergeFileAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/MergeFileAction.java similarity index 94% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/MergeFileAction.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/MergeFileAction.java index a51754d379..a04643135e 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/MergeFileAction.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/MergeFileAction.java @@ -37,10 +37,10 @@ @ActionSignature( actionId = "merge", displayName = "merge", - usage = HdfsAction.FILE_PATH + " $src " + MergeFileAction.DEST_PATH + " $dest " + + usage = HadoopAction.FILE_PATH + " $src " + MergeFileAction.DEST_PATH + " $dest " + MergeFileAction.BUF_SIZE + " $size" ) -public class MergeFileAction extends HdfsActionWithRemoteClusterSupport { +public class MergeFileAction extends HadoopActionWithRemoteClusterSupport { public static final String DEST_PATH = "-dest"; public static final String BUF_SIZE = "-bufSize"; diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/MetaDataAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/MetaDataAction.java similarity index 94% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/MetaDataAction.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/MetaDataAction.java index 97a97c8695..635322e248 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/MetaDataAction.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/MetaDataAction.java @@ -30,12 +30,12 @@ @ActionSignature( actionId = "metadata", displayName = "metadata", - usage = HdfsAction.FILE_PATH + " $src " + MetaDataAction.OWNER_NAME + " $owner " + + usage = HadoopAction.FILE_PATH + " $src " + MetaDataAction.OWNER_NAME + " $owner " + MetaDataAction.GROUP_NAME + " $group " + MetaDataAction.BLOCK_REPLICATION + " $replication " + MetaDataAction.PERMISSION + " $permission " + MetaDataAction.MTIME + " $mtime " + MetaDataAction.ATIME + " $atime" ) -public class MetaDataAction extends HdfsActionWithRemoteClusterSupport { +public class MetaDataAction extends HadoopActionWithRemoteClusterSupport { public static final String OWNER_NAME = "-owner"; public static final String GROUP_NAME = "-group"; public static final String BLOCK_REPLICATION = "-replication"; diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ReadFileAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/ReadFileAction.java similarity index 95% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ReadFileAction.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/ReadFileAction.java index b1fa2a6263..c4d200da4b 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ReadFileAction.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/ReadFileAction.java @@ -34,10 +34,10 @@ @ActionSignature( actionId = "read", displayName = "read", - usage = HdfsAction.FILE_PATH + " $file " + usage = HadoopAction.FILE_PATH + " $file " + ReadFileAction.BUF_SIZE + " $size" ) -public class ReadFileAction extends HdfsActionWithRemoteClusterSupport { +public class ReadFileAction extends HadoopActionWithRemoteClusterSupport { public static final String BUF_SIZE = "-bufSize"; public static final int DEFAULT_BUFFER_SIZE = 64 * 1024; diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/RenameFileAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/RenameFileAction.java similarity index 95% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/RenameFileAction.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/RenameFileAction.java index 8da4f74140..9d989ba81f 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/RenameFileAction.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/RenameFileAction.java @@ -37,10 +37,10 @@ @ActionSignature( actionId = "rename", displayName = "rename", - usage = HdfsAction.FILE_PATH + " $src " + RenameFileAction.DEST_PATH + + usage = HadoopAction.FILE_PATH + " $src " + RenameFileAction.DEST_PATH + " $dest" ) -public class RenameFileAction extends HdfsActionWithRemoteClusterSupport { +public class RenameFileAction extends HadoopActionWithRemoteClusterSupport { public static final String DEST_PATH = "-dest"; private Path srcPath; diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/SetStoragePolicyAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/SetStoragePolicyAction.java similarity index 92% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/SetStoragePolicyAction.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/SetStoragePolicyAction.java index 8c8e0be9a4..b830d5a7b8 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/SetStoragePolicyAction.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/SetStoragePolicyAction.java @@ -27,10 +27,10 @@ @ActionSignature( actionId = "setstoragepolicy", displayName = "setstoragepolicy", - usage = HdfsAction.FILE_PATH + " $file " + usage = HadoopAction.FILE_PATH + " $file " + SetStoragePolicyAction.STORAGE_POLICY + " $policy" ) -public class SetStoragePolicyAction extends HdfsActionWithRemoteClusterSupport { +public class SetStoragePolicyAction extends HadoopActionWithRemoteClusterSupport { public static final String STORAGE_POLICY = "-storagePolicy"; private Path filePath; diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/SetXAttrAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/SetXAttrAction.java similarity index 94% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/SetXAttrAction.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/SetXAttrAction.java index 206c267d3c..18218accff 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/SetXAttrAction.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/SetXAttrAction.java @@ -34,10 +34,10 @@ @ActionSignature( actionId = "setxattr", displayName = "setxattr", - usage = HdfsAction.FILE_PATH + " $src " + SetXAttrAction.ATT_NAME + + usage = HadoopAction.FILE_PATH + " $src " + SetXAttrAction.ATT_NAME + " $name " + SetXAttrAction.ATT_VALUE + " $value" ) -public class SetXAttrAction extends HdfsActionWithRemoteClusterSupport { +public class SetXAttrAction extends HadoopActionWithRemoteClusterSupport { private static final Logger LOG = LoggerFactory.getLogger(SetXAttrAction.class); public static final String ATT_NAME = "-name"; diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/TruncateAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/TruncateAction.java similarity index 94% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/TruncateAction.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/TruncateAction.java index 1dfc73853b..73e2bbd3ea 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/TruncateAction.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/TruncateAction.java @@ -30,10 +30,10 @@ @ActionSignature( actionId = "truncate", displayName = "truncate", - usage = HdfsAction.FILE_PATH + " $src " + usage = HadoopAction.FILE_PATH + " $src " + TruncateAction.LENGTH + " $length" ) -public class TruncateAction extends HdfsActionWithRemoteClusterSupport { +public class TruncateAction extends HadoopActionWithRemoteClusterSupport { public static final String LENGTH = "-length"; private Path srcPath; diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/UpdateFileMetadataSupport.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/UpdateFileMetadataSupport.java similarity index 100% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/UpdateFileMetadataSupport.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/UpdateFileMetadataSupport.java diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/WriteFileAction.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/WriteFileAction.java similarity index 96% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/WriteFileAction.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/WriteFileAction.java index 18bfd8f63a..1e093f9fe3 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/WriteFileAction.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/action/WriteFileAction.java @@ -36,14 +36,14 @@ actionId = "write", displayName = "write", usage = - HdfsAction.FILE_PATH + HadoopAction.FILE_PATH + " $file " + WriteFileAction.LENGTH + " $length " + WriteFileAction.BUF_SIZE + " $size" ) -public class WriteFileAction extends HdfsActionWithRemoteClusterSupport { +public class WriteFileAction extends HadoopActionWithRemoteClusterSupport { public static final String LENGTH = "-length"; public static final String BUF_SIZE = "-bufSize"; diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/client/BaseFileSystemCache.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/client/BaseFileSystemCache.java similarity index 91% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/client/BaseFileSystemCache.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/client/BaseFileSystemCache.java index c8502000ef..96cb3e9966 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/client/BaseFileSystemCache.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/client/BaseFileSystemCache.java @@ -24,8 +24,7 @@ import lombok.Data; import lombok.extern.slf4j.Slf4j; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.smartdata.hdfs.HadoopUtil; +import org.apache.hadoop.fs.FileSystem; import org.smartdata.hdfs.impersonation.UserImpersonationStrategy; import java.io.IOException; @@ -38,7 +37,7 @@ @Slf4j -public abstract class BaseFileSystemCache implements FileSystemCache { +public abstract class BaseFileSystemCache implements FileSystemCache { private final Cache fileSystemCache; private final ScheduledExecutorService evictionHandlerExecutor; @@ -57,7 +56,7 @@ public BaseFileSystemCache(UserImpersonationStrategy userImpersonationStrategy, @Override public T get(Configuration config, String user, InetSocketAddress ssmMasterAddress) throws IOException { - CacheKey cacheKey = new CacheKey(ssmMasterAddress, user, HadoopUtil.getNameNodeUri(config)); + CacheKey cacheKey = new CacheKey(ssmMasterAddress, user, getServiceUri(config)); return fileSystemCache.get(cacheKey, key -> createImpersonatedFileSystem(config, key)); } @@ -68,6 +67,8 @@ public void close() throws IOException { evictionHandlerExecutor.shutdown(); } + protected abstract URI getServiceUri(Configuration config) throws IOException; + protected abstract T createFileSystem(Configuration config, CacheKey cacheKey); private T createImpersonatedFileSystem(Configuration config, CacheKey cacheKey) { @@ -95,6 +96,6 @@ private void closeFileSystem(T fileSystem) { protected static class CacheKey { private final InetSocketAddress ssmMasterAddress; private final String user; - private final URI nameNodeUri; + private final URI serviceUri; } } diff --git a/smart-server/src/test/java/org/smartdata/server/TestSmartServer.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/client/FileSystemCache.java similarity index 50% rename from smart-server/src/test/java/org/smartdata/server/TestSmartServer.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/client/FileSystemCache.java index a0edebe421..95a477d58d 100644 --- a/smart-server/src/test/java/org/smartdata/server/TestSmartServer.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/client/FileSystemCache.java @@ -15,47 +15,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.smartdata.server; +package org.smartdata.hdfs.client; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.server.balancer.TestBalancer; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.smartdata.conf.SmartConf; +import org.apache.hadoop.fs.FileSystem; +import org.smartdata.utils.StringUtil; -public class TestSmartServer { - protected SmartConf conf; - protected SmartServer ssm; +import java.io.Closeable; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.time.Duration; - private static final int DEFAULT_BLOCK_SIZE = 100; +import static org.smartdata.conf.SmartConfKeys.SMART_ACTION_CLIENT_CACHE_TTL_DEFAULT; +import static org.smartdata.conf.SmartConfKeys.SMART_ACTION_CLIENT_CACHE_TTL_KEY; - static { - TestBalancer.initTestSetup(); - } - - @Before - public void setUp() throws Exception { - conf = new SmartConf(); - initConf(conf); - - // rpcServer start in SmartServer - ssm = SmartServer.launchWith(conf); - } - - private void initConf(Configuration conf) { - - } - - @Test - public void test() throws InterruptedException { - //Thread.sleep(1000000); - } +public interface FileSystemCache extends Closeable { + T get(Configuration config, String user, InetSocketAddress ssmMasterAddress) throws IOException; - @After - public void cleanUp() { - if (ssm != null) { - ssm.shutdown(); - } + static Duration getCacheTtl(Configuration configuration) { + String cacheKeyTtl = configuration.get( + SMART_ACTION_CLIENT_CACHE_TTL_KEY, SMART_ACTION_CLIENT_CACHE_TTL_DEFAULT); + return Duration.ofMillis(StringUtil.parseTimeString(cacheKeyTtl)); } } diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/client/LocalFileSystemProvider.java b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/client/LocalFileSystemProvider.java similarity index 77% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/client/LocalFileSystemProvider.java rename to smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/client/LocalFileSystemProvider.java index 84da7b46a6..3f068bb2cd 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/client/LocalFileSystemProvider.java +++ b/smart-hadoop-support/smart-hadoop-action-common/src/main/java/org/smartdata/hdfs/client/LocalFileSystemProvider.java @@ -18,13 +18,13 @@ package org.smartdata.hdfs.client; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.smartdata.hdfs.action.HdfsAction; +import org.apache.hadoop.fs.FileSystem; +import org.smartdata.hdfs.action.HadoopAction; import java.io.Closeable; import java.io.IOException; -public interface LocalFileSystemProvider extends Closeable { - DistributedFileSystem provide( - Configuration config, String user, HdfsAction.FsType fsType) throws IOException; +public interface LocalFileSystemProvider extends Closeable { + T provide( + Configuration config, String user, HadoopAction.FsType fsType) throws IOException; } diff --git a/smart-hadoop-support/smart-hadoop-common/src/main/java/org/smartdata/hdfs/HadoopUtil.java b/smart-hadoop-support/smart-hadoop-common/src/main/java/org/smartdata/hdfs/HadoopUtil.java index fc351b8f20..0503479864 100644 --- a/smart-hadoop-support/smart-hadoop-common/src/main/java/org/smartdata/hdfs/HadoopUtil.java +++ b/smart-hadoop-support/smart-hadoop-common/src/main/java/org/smartdata/hdfs/HadoopUtil.java @@ -18,10 +18,11 @@ package org.smartdata.hdfs; import org.apache.commons.lang3.SerializationUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.ipc.RemoteException; @@ -49,6 +50,7 @@ import java.util.Map; import java.util.Optional; +import static org.smartdata.utils.PathUtil.addPathSeparator; import static org.smartdata.utils.PathUtil.getRawPath; /** @@ -155,6 +157,61 @@ public static void setSmartConfByHadoop(SmartConf conf) { } } + public static Optional getHadoopConfDir(String hadoopConfPath) + throws IOException { + if (StringUtils.isBlank(hadoopConfPath)) { + LOG.warn("Hadoop configuration path is not set"); + return Optional.empty(); + } + + URL hadoopConfDir; + hadoopConfPath = addPathSeparator(hadoopConfPath); + try { + hadoopConfDir = new URL(hadoopConfPath); + } catch (MalformedURLException e) { + hadoopConfDir = new URL("file://" + hadoopConfPath); + } + Path hadoopConfDirPath; + try { + hadoopConfDirPath = Paths.get(hadoopConfDir.toURI()); + } catch (URISyntaxException e) { + throw new IOException(e); + } + + if (Files.exists(hadoopConfDirPath) && + Files.isDirectory(hadoopConfDirPath)) { + LOG.debug("Hadoop configuration path = {}", hadoopConfPath); + } else { + throw new IOException("Hadoop configuration path doesn't exist or is not a directory: " + + hadoopConfPath); + } + + return Optional.of(hadoopConfDir); + } + + public static void loadResource(Configuration config, URL configDir, String resource) throws IOException { + try { + URL coreConfFile = new URL(configDir, resource); + Path filePath = Paths.get(coreConfFile.toURI()); + if (Files.exists(filePath)) { + config.addResource(coreConfFile); + LOG.debug("Hadoop configuration file [{}] is loaded", coreConfFile.toExternalForm()); + } else { + throw new IOException("Hadoop configuration file doesn't exist: " + coreConfFile.toExternalForm()); + } + } catch (Exception exception) { + throw new IOException("Error loading configuration file " + resource, exception); + } + } + + public static void loadResourceSafely(Configuration config, URL configDir, String resource) { + try { + loadResource(config, configDir, resource); + } catch (Exception exception) { + LOG.warn("Error loading resource {}: {}", resource, exception.getMessage()); + } + } + /** * Get hadoop configuration from the configure files in the given directory. * @@ -162,65 +219,15 @@ public static void setSmartConfByHadoop(SmartConf conf) { */ public static HdfsConfiguration getHadoopConf(String hadoopConfPath) throws IOException { - if (hadoopConfPath == null || hadoopConfPath.isEmpty()) { - LOG.warn("Hadoop configuration path is not set"); + Optional hadoopConfDir = getHadoopConfDir(hadoopConfPath); + if (!hadoopConfDir.isPresent()) { return null; - } else { - URL hadoopConfDir; - HdfsConfiguration hadoopConf = new HdfsConfiguration(); - try { - if (!hadoopConfPath.endsWith("/")) { - hadoopConfPath += "/"; - } - try { - hadoopConfDir = new URL(hadoopConfPath); - } catch (MalformedURLException e) { - hadoopConfDir = new URL("file://" + hadoopConfPath); - } - Path hadoopConfDirPath = Paths.get(hadoopConfDir.toURI()); - if (Files.exists(hadoopConfDirPath) && - Files.isDirectory(hadoopConfDirPath)) { - LOG.debug("Hadoop configuration path = " + hadoopConfPath); - } else { - throw new IOException("Hadoop configuration path [" + hadoopConfPath - + "] doesn't exist or is not a directory"); - } - - try { - URL coreConfFile = new URL(hadoopConfDir, "core-site.xml"); - Path coreFilePath = Paths.get(coreConfFile.toURI()); - if (Files.exists(coreFilePath)) { - hadoopConf.addResource(coreConfFile); - LOG.debug("Hadoop configuration file [" + - coreConfFile.toExternalForm() + "] is loaded"); - } else { - throw new IOException("Hadoop configuration file [" + - coreConfFile.toExternalForm() + "] doesn't exist"); - } - } catch (MalformedURLException e1) { - throw new IOException("Access hadoop configuration file core-site.xml failed", e1); - } - - try { - URL hdfsConfFile = new URL(hadoopConfDir, "hdfs-site.xml"); - Path hdfsFilePath = Paths.get(hdfsConfFile.toURI()); - if (Files.exists(hdfsFilePath)) { - hadoopConf.addResource(hdfsConfFile); - LOG.debug("Hadoop configuration file [" + - hdfsConfFile.toExternalForm() + "] is loaded"); - } else { - throw new IOException("Hadoop configuration file [" + - hdfsConfFile.toExternalForm() + "] doesn't exist"); - } - } catch (MalformedURLException e1) { - throw new IOException("Access hadoop configuration file hdfs-site.xml failed", e1); - } - } catch (URISyntaxException e) { - throw new IOException("Access hadoop configuration path [" + hadoopConfPath - + "] failed" + e); - } - return hadoopConf; } + + HdfsConfiguration conf = new HdfsConfiguration(); + loadResource(conf, hadoopConfDir.get(), "core-site.xml"); + loadResource(conf, hadoopConfDir.get(), "hdfs-site.xml"); + return conf; } public static URI getNameNodeUri(Configuration conf) @@ -329,7 +336,7 @@ public static FileState getFileState(DFSClient dfsClient, String filePath) } public static FileState getFileState( - DistributedFileSystem fileSystem, + FileSystem fileSystem, org.apache.hadoop.fs.Path filePath) throws IOException { try { diff --git a/smart-hadoop-support/smart-hadoop-common/src/main/java/org/smartdata/hdfs/action/HdfsAction.java b/smart-hadoop-support/smart-hadoop-common/src/main/java/org/smartdata/hdfs/action/HadoopAction.java similarity index 75% rename from smart-hadoop-support/smart-hadoop-common/src/main/java/org/smartdata/hdfs/action/HdfsAction.java rename to smart-hadoop-support/smart-hadoop-common/src/main/java/org/smartdata/hdfs/action/HadoopAction.java index 5d55b44697..b145ab5e8e 100644 --- a/smart-hadoop-support/smart-hadoop-common/src/main/java/org/smartdata/hdfs/action/HdfsAction.java +++ b/smart-hadoop-support/smart-hadoop-common/src/main/java/org/smartdata/hdfs/action/HadoopAction.java @@ -19,17 +19,12 @@ import lombok.Setter; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.smartdata.action.SmartAction; import org.smartdata.conf.SmartConf; -import org.smartdata.conf.SmartConfKeys; import org.smartdata.model.CmdletDescriptor; import java.io.IOException; @@ -40,13 +35,13 @@ /** - * Base class for all HDFS actions. + * Base class for all Hadoop actions. */ @Setter -public abstract class HdfsAction extends SmartAction { +public abstract class HadoopAction extends SmartAction { public static final String FILE_PATH = CmdletDescriptor.HDFS_FILE_PATH; - protected DistributedFileSystem localFileSystem; + protected FileSystem localFileSystem; public enum FsType { SMART, @@ -63,16 +58,9 @@ protected void preRun() throws Exception { withDefaultFs(); } - protected DFSClient getLocalDfsClient() { - return Optional.ofNullable(localFileSystem) - .map(DistributedFileSystem::getClient) - .orElse(null); - } - protected void withDefaultFs() { - Configuration conf = getContext().getConf(); - String nameNodeURL = conf.get(SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY); - conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, nameNodeURL); + SmartConf conf = getContext().getConf(); + conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, conf.getDefaultFs()); } protected void validateNonEmptyArgs(String... keys) { @@ -115,8 +103,4 @@ protected Optional getFileStatus(FileSystem fileSystem, Path path) t return Optional.empty(); } } - - protected Optional getHdfsFileStatus(FileSystem fileSystem, Path path) throws IOException { - return getFileStatus(fileSystem, path).map(HdfsFileStatus.class::cast); - } } diff --git a/smart-hadoop-support/smart-hadoop/pom.xml b/smart-hadoop-support/smart-hadoop/pom.xml index 4dc1b07148..6e4eb947c3 100644 --- a/smart-hadoop-support/smart-hadoop/pom.xml +++ b/smart-hadoop-support/smart-hadoop/pom.xml @@ -36,7 +36,7 @@ org.smartdata - smart-hadoop-common + smart-hadoop-action-common 2.2.0-SNAPSHOT @@ -68,10 +68,6 @@ ${smart.hadoop.client.artifact} ${project.version} - - com.github.ben-manes.caffeine - caffeine - org.projectlombok lombok diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/HdfsStatesUpdateService.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/HdfsStatesUpdateService.java index f7f888e535..25319c0264 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/HdfsStatesUpdateService.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/HdfsStatesUpdateService.java @@ -28,7 +28,6 @@ import org.slf4j.LoggerFactory; import org.smartdata.SmartConstants; import org.smartdata.SmartContext; -import org.smartdata.conf.SmartConfKeys; import org.smartdata.hdfs.metric.fetcher.CachedListFetcher; import org.smartdata.hdfs.metric.fetcher.DataNodeInfoFetcher; import org.smartdata.hdfs.metric.fetcher.InotifyEventFetcher; @@ -71,21 +70,13 @@ public HdfsStatesUpdateService(SmartContext context, MetaStore metaStore) { * * @return true if initialized successfully */ - //@TODO: remove loadHadoopConf because it is done in Smart Server @Override public void init() throws IOException { LOG.info("Initializing ..."); SmartContext context = getContext(); - final Configuration conf = context.getConf(); - String hadoopConfPath = getContext().getConf() - .get(SmartConfKeys.SMART_HADOOP_CONF_DIR_KEY); - try { - HadoopUtil.loadHadoopConf(hadoopConfPath, conf); - } catch (IOException e) { - throw new IOException("Fail to load Hadoop configuration for : " + e.getMessage()); - } + Configuration conf = context.getConf(); final URI nnUri = HadoopUtil.getNameNodeUri(context.getConf()); - LOG.debug("Final Namenode URL:" + nnUri.toString()); + LOG.debug("Final Namenode URL: {}", nnUri); client = HadoopUtil.getDFSClient(nnUri, conf); checkAndCreateIdFiles(nnUri, context.getConf()); this.executorService = Executors.newScheduledThreadPool(4); @@ -206,7 +197,7 @@ private void checkAndCreateIdFiles(URI namenodeURI, Configuration conf) throws I } private FSDataOutputStream checkAndMarkRunning(URI namenodeURI, Configuration conf, - String filePath) + String filePath) throws IOException { Path path = new Path(filePath); DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(namenodeURI, conf); diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/AllDiskFileAction.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/AllDiskFileAction.java index aad1f5ee95..fbce6aa6f5 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/AllDiskFileAction.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/AllDiskFileAction.java @@ -25,7 +25,7 @@ @ActionSignature( actionId = "alldisk", displayName = "alldisk", - usage = HdfsAction.FILE_PATH + " $file " + usage = HadoopAction.FILE_PATH + " $file " ) public class AllDiskFileAction extends MoveFileAction { diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/AllSsdFileAction.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/AllSsdFileAction.java index 06d3017c84..5d123c54e4 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/AllSsdFileAction.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/AllSsdFileAction.java @@ -25,7 +25,7 @@ @ActionSignature( actionId = "allssd", displayName = "allssd", - usage = HdfsAction.FILE_PATH + " $file " + usage = HadoopAction.FILE_PATH + " $file " ) public class AllSsdFileAction extends MoveFileAction { diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ArchiveFileAction.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ArchiveFileAction.java index cab684edbd..5c35c5e65e 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ArchiveFileAction.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ArchiveFileAction.java @@ -25,7 +25,7 @@ @ActionSignature( actionId = "archive", displayName = "archive", - usage = HdfsAction.FILE_PATH + " $file " + usage = HadoopAction.FILE_PATH + " $file " ) public class ArchiveFileAction extends MoveFileAction { diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CacheFileAction.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CacheFileAction.java index b4c6dc2870..58473366db 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CacheFileAction.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CacheFileAction.java @@ -32,7 +32,7 @@ @ActionSignature( actionId = "cache", displayName = "cache", - usage = HdfsAction.FILE_PATH + " $file " + usage = HadoopAction.FILE_PATH + " $file " + CacheFileAction.REPLICA + " $replica " ) public class CacheFileAction extends HdfsAction { @@ -57,7 +57,7 @@ protected void execute() throws Exception { // set cache replication as the replication number of the file if not set if (replication == 0) { - FileStatus fileStatus = localFileSystem.getFileStatus(filePath); + FileStatus fileStatus = localDfs.getFileStatus(filePath); replication = fileStatus.isDirectory() ? 1 : fileStatus.getReplication(); } executeCacheAction(); @@ -67,7 +67,7 @@ boolean isFileCached() throws Exception { CacheDirectiveInfo filter = new CacheDirectiveInfo.Builder() .setPath(filePath) .build(); - return localFileSystem.listCacheDirectives(filter).hasNext(); + return localDfs.listCacheDirectives(filter).hasNext(); } private void executeCacheAction() throws Exception { @@ -87,6 +87,6 @@ private void addDirective() throws Exception { .setReplication(replication) .build(); - localFileSystem.addCacheDirective(filter); + localDfs.addCacheDirective(filter); } } diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CheckCompressAction.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CheckCompressAction.java index 85395ad4b5..0de0c7a069 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CheckCompressAction.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CheckCompressAction.java @@ -31,7 +31,7 @@ @ActionSignature( actionId = "checkcompress", displayName = "checkcompress", - usage = HdfsAction.FILE_PATH + usage = HadoopAction.FILE_PATH + " $file " ) public class CheckCompressAction extends HdfsAction { diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CompressionAction.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CompressionAction.java index 99618c1fd5..6dc2d4d611 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CompressionAction.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/CompressionAction.java @@ -51,7 +51,7 @@ actionId = "compress", displayName = "compress", usage = - HdfsAction.FILE_PATH + HadoopAction.FILE_PATH + " $file " + CompressionAction.BUF_SIZE + " $bufSize " @@ -116,12 +116,12 @@ protected void execute() throws Exception { "Compression Action failed due to unsupported codec: " + compressCodec); } - if (!localFileSystem.exists(filePath)) { + if (!localDfs.exists(filePath)) { throw new ActionException( "Failed to execute Compression Action: the given file doesn't exist!"); } - FileStatus srcFileStatus = localFileSystem.getFileStatus(filePath); + FileStatus srcFileStatus = localDfs.getFileStatus(filePath); // Consider directory case. if (srcFileStatus.isDirectory()) { appendLog("Compression is not applicable to a directory."); @@ -139,10 +139,10 @@ protected void execute() throws Exception { // SmartDFSClient will fail to open compressing file with PROCESSING FileStage // set by Compression scheduler. But considering DfsClient may be used, we use // append operation to lock the file to avoid any modification. - OutputStream lockStream = localFileSystem.append(filePath, bufferSize); + OutputStream lockStream = localDfs.append(filePath, bufferSize); - FSDataInputStream in = localFileSystem.open(filePath); - OutputStream out = localFileSystem.create(compressTmpPath, + FSDataInputStream in = localDfs.open(filePath); + OutputStream out = localDfs.create(compressTmpPath, true, getLocalDfsClient().getConf().getIoBufferSize(), srcFileStatus.getReplication(), @@ -152,15 +152,15 @@ protected void execute() throws Exception { appendLog("File length: " + srcFileStatus.getLen()); bufferSize = getActualBuffSize(srcFileStatus.getLen()); - String storagePolicyName = localFileSystem.getStoragePolicy(filePath).getName(); + String storagePolicyName = localDfs.getStoragePolicy(filePath).getName(); if (!storagePolicyName.equals("UNDEF")) { - localFileSystem.setStoragePolicy(compressTmpPath, storagePolicyName); + localDfs.setStoragePolicy(compressTmpPath, storagePolicyName); } compress(in, out); - FileStatus destFileStatus = localFileSystem.getFileStatus(compressTmpPath); - localFileSystem.setOwner(compressTmpPath, srcFileStatus.getOwner(), srcFileStatus.getGroup()); - localFileSystem.setPermission(compressTmpPath, srcFileStatus.getPermission()); + FileStatus destFileStatus = localDfs.getFileStatus(compressTmpPath); + localDfs.setOwner(compressTmpPath, srcFileStatus.getOwner(), srcFileStatus.getGroup()); + localDfs.setPermission(compressTmpPath, srcFileStatus.getPermission()); compressionFileState.setCompressedLength(destFileStatus.getLen()); appendLog("Compressed file length: " + destFileStatus.getLen()); compressionFileInfo = @@ -179,7 +179,7 @@ protected void execute() throws Exception { setXAttr(compressTmpPath, compressionFileState); // Rename operation is moved from CompressionScheduler. // Thus, modification for original file will be avoided. - localFileSystem.rename(compressTmpPath, filePath, Options.Rename.OVERWRITE); + localDfs.rename(compressTmpPath, filePath, Options.Rename.OVERWRITE); } else { // Add to raw path setXAttr(filePath, compressionFileState); @@ -213,7 +213,7 @@ private int getActualBuffSize(long fileSize) { } private void setXAttr(Path path, CompressionFileState compressionFileState) throws IOException { - localFileSystem.setXAttr(path, SMART_FILE_STATE_XATTR_NAME, + localDfs.setXAttr(path, SMART_FILE_STATE_XATTR_NAME, SerializationUtils.serialize(compressionFileState), EnumSet.of(XAttrSetFlag.CREATE)); } diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ConcatFileAction.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ConcatFileAction.java index 6199045038..d9e195d9e9 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ConcatFileAction.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/ConcatFileAction.java @@ -37,7 +37,7 @@ @ActionSignature( actionId = "concat", displayName = "concat", - usage = HdfsAction.FILE_PATH + " $src " + ConcatFileAction.DEST_PATH + " $dest" + usage = HadoopAction.FILE_PATH + " $src " + ConcatFileAction.DEST_PATH + " $dest" ) public class ConcatFileAction extends HdfsAction { public static final String DEST_PATH = "-dest"; @@ -68,14 +68,14 @@ protected void execute() throws Exception { } for (Path sourcePath : srcPaths) { - if (localFileSystem.getFileStatus(sourcePath).isDirectory()) { + if (localDfs.getFileStatus(sourcePath).isDirectory()) { throw new IllegalArgumentException("File parameter is not file"); } } Path firstPath = srcPaths.removeFirst(); Path[] restPaths = srcPaths.toArray(new Path[0]); - localFileSystem.concat(firstPath, restPaths); - localFileSystem.rename(firstPath, targetPath, Options.Rename.OVERWRITE); + localDfs.concat(firstPath, restPaths); + localDfs.rename(firstPath, targetPath, Options.Rename.OVERWRITE); } } diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/DecompressionAction.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/DecompressionAction.java index 6d83f1d5a0..4c704e1bda 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/DecompressionAction.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/DecompressionAction.java @@ -39,7 +39,7 @@ @ActionSignature( actionId = "decompress", displayName = "decompress", - usage = HdfsAction.FILE_PATH + usage = HadoopAction.FILE_PATH + " $file " + BUF_SIZE + " $bufSize " @@ -73,7 +73,7 @@ private void validate() throws Exception { } // Consider directory case. - if (localFileSystem.getFileStatus(filePath).isDirectory()) { + if (localDfs.getFileStatus(filePath).isDirectory()) { throw new ActionException("Decompression is not applicable to a directory."); } } @@ -86,17 +86,17 @@ protected void execute() throws Exception { throw new ActionException("File is not compressed: " + filePath); } - FileStatus compressedFileStatus = localFileSystem.getFileStatus(filePath); + FileStatus compressedFileStatus = localDfs.getFileStatus(filePath); - try (InputStream in = localFileSystem.open(filePath); + try (InputStream in = localDfs.open(filePath); // No need to lock the file by append operation, // since compressed file cannot be modified. - OutputStream out = localFileSystem.create(compressTmpPath, true)) { + OutputStream out = localDfs.create(compressTmpPath, true)) { // Keep storage policy consistent. - String storagePolicyName = localFileSystem.getStoragePolicy(filePath).getName(); + String storagePolicyName = localDfs.getStoragePolicy(filePath).getName(); if (!storagePolicyName.equals("UNDEF")) { - localFileSystem.setStoragePolicy(compressTmpPath, storagePolicyName); + localDfs.setStoragePolicy(compressTmpPath, storagePolicyName); } StreamCopyHandler.of(in, out) @@ -108,11 +108,11 @@ protected void execute() throws Exception { .runCopy(); // Overwrite the original file with decompressed data - localFileSystem.setOwner(compressTmpPath, + localDfs.setOwner(compressTmpPath, compressedFileStatus.getOwner(), compressedFileStatus.getGroup()); - localFileSystem.setPermission(compressTmpPath, compressedFileStatus.getPermission()); - localFileSystem.rename(compressTmpPath, filePath, Options.Rename.OVERWRITE); + localDfs.setPermission(compressTmpPath, compressedFileStatus.getPermission()); + localDfs.rename(compressTmpPath, filePath, Options.Rename.OVERWRITE); appendLog("The given file is successfully decompressed by codec: " + ((CompressionFileState) fileState).getCompressionImpl()); } diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/HdfsActionFactory.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/HdfsActionFactory.java index bbf11ee4e1..75b60bc414 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/HdfsActionFactory.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/HdfsActionFactory.java @@ -18,49 +18,59 @@ package org.smartdata.hdfs.action; import org.smartdata.action.AbstractActionFactory; +import org.smartdata.action.SmartAction; +import org.smartdata.action.SyncAction; + +import java.util.Arrays; +import java.util.List; /** * Built-in smart actions for HDFS system. */ public class HdfsActionFactory extends AbstractActionFactory { - static { - addAction(AllSsdFileAction.class); - addAction(AllDiskFileAction.class); - addAction(OneSsdFileAction.class); - addAction(OneDiskFileAction.class); - addAction(RamDiskFileAction.class); - addAction(ArchiveFileAction.class); - addAction(CacheFileAction.class); - addAction(UncacheFileAction.class); - addAction(ReadFileAction.class); - addAction(WriteFileAction.class); - addAction(CheckStorageAction.class); - addAction(SetXAttrAction.class); - addAction(CopyFileAction.class); - addAction(CopyDirectoryAction.class); - addAction(DeleteFileAction.class); - addAction(RenameFileAction.class); - addAction(ListFileAction.class); - addAction(ConcatFileAction.class); - addAction(AppendFileAction.class); - addAction(MergeFileAction.class); - addAction(MetaDataAction.class); - addAction(Copy2S3Action.class); - addAction(CompressionAction.class); - addAction(DecompressionAction.class); - addAction(CheckCompressAction.class); - addAction(TruncateAction.class); - addAction(SmallFileCompactAction.class); - addAction(SmallFileUncompactAction.class); - addAction(CheckSumAction.class); - addAction(DistCpAction.class); - addAction(ListErasureCodingPolicy.class); - addAction(CheckErasureCodingPolicy.class); - addAction(ErasureCodingAction.class); - addAction(UnErasureCodingAction.class); - addAction(AddErasureCodingPolicy.class); - addAction(RemoveErasureCodingPolicy.class); - addAction(EnableErasureCodingPolicy.class); - addAction(DisableErasureCodingPolicy.class); + + @Override + protected List> supportedActionClasses() { + return Arrays.asList( + AllSsdFileAction.class, + AllDiskFileAction.class, + OneSsdFileAction.class, + OneDiskFileAction.class, + RamDiskFileAction.class, + ArchiveFileAction.class, + CacheFileAction.class, + UncacheFileAction.class, + ReadFileAction.class, + WriteFileAction.class, + CheckStorageAction.class, + SetXAttrAction.class, + CopyFileAction.class, + CopyDirectoryAction.class, + DeleteFileAction.class, + RenameFileAction.class, + ListFileAction.class, + ConcatFileAction.class, + AppendFileAction.class, + MergeFileAction.class, + MetaDataAction.class, + Copy2S3Action.class, + CompressionAction.class, + DecompressionAction.class, + CheckCompressAction.class, + TruncateAction.class, + SmallFileCompactAction.class, + SmallFileUncompactAction.class, + CheckSumAction.class, + DistCpAction.class, + ListErasureCodingPolicy.class, + CheckErasureCodingPolicy.class, + ErasureCodingAction.class, + UnErasureCodingAction.class, + AddErasureCodingPolicy.class, + RemoveErasureCodingPolicy.class, + EnableErasureCodingPolicy.class, + DisableErasureCodingPolicy.class, + SyncAction.class + ); } } diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/HdfsCmdletFactoryPlugin.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/HdfsCmdletFactoryPlugin.java index 993cc232e9..02e0ef7e26 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/HdfsCmdletFactoryPlugin.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/HdfsCmdletFactoryPlugin.java @@ -17,58 +17,24 @@ */ package org.smartdata.hdfs.action; -import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.smartdata.action.ActionException; -import org.smartdata.action.CmdletFactoryPlugin; -import org.smartdata.action.SmartAction; import org.smartdata.conf.SmartConf; -import org.smartdata.hdfs.client.CachingLocalFileSystemProvider; -import org.smartdata.hdfs.client.LocalFileSystemProvider; +import org.smartdata.conf.SmartFsType; +import org.smartdata.hdfs.HadoopCmdletFactoryPlugin; +import org.smartdata.hdfs.client.CachingDfsProvider; import org.smartdata.hdfs.impersonation.UserImpersonationStrategy; -import java.io.IOException; - @Slf4j -@RequiredArgsConstructor -public class HdfsCmdletFactoryPlugin implements CmdletFactoryPlugin { - private final SmartConf conf; - private final LocalFileSystemProvider localFileSystemProvider; - - public HdfsCmdletFactoryPlugin(SmartConf conf, UserImpersonationStrategy userImpersonationStrategy) { - this.conf = conf; - this.localFileSystemProvider = new CachingLocalFileSystemProvider(conf, userImpersonationStrategy); - } - - @Override - public boolean canEnrich(SmartAction action) { - return action instanceof HdfsAction; - } - - @Override - public void enrichAction(SmartAction action, String actionUser) throws ActionException { - if (!canEnrich(action)) { - return; - } - - HdfsAction hdfsAction = (HdfsAction) action; - setLocalFileSystem(hdfsAction, actionUser); - } +public class HdfsCmdletFactoryPlugin extends HadoopCmdletFactoryPlugin { - private void setLocalFileSystem(HdfsAction action, String actionUser) throws ActionException { - try { - DistributedFileSystem localFileSystem = localFileSystemProvider.provide( - conf, actionUser, action.localFsType()); - action.setLocalFileSystem(localFileSystem); - } catch (IOException exception) { - log.error("smartAction aid={} setDfsClient error", action.getActionId(), exception); - throw new ActionException(exception); - } + public HdfsCmdletFactoryPlugin(SmartConf conf, + UserImpersonationStrategy userImpersonationStrategy) { + super(conf, new CachingDfsProvider(conf, userImpersonationStrategy)); } @Override - public void close() throws IOException { - localFileSystemProvider.close(); + protected SmartFsType supportedFsType() { + return SmartFsType.HDFS; } } diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/MoveFileAction.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/MoveFileAction.java index b6d66a47e8..b57a241309 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/MoveFileAction.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/MoveFileAction.java @@ -65,7 +65,7 @@ protected void execute() throws Exception { validateNonEmptyArgs(FILE_PATH, MOVE_PLAN); if (movePlan.isDir()) { - localFileSystem.setStoragePolicy(filePath, storagePolicy); + localDfs.setStoragePolicy(filePath, storagePolicy); appendLog("Directory moved successfully."); return; } @@ -97,12 +97,12 @@ private int move() throws Exception { private boolean recheckModification() { try { - Optional fileStatus = getHdfsFileStatus(localFileSystem, filePath); + Optional fileStatus = getHdfsFileStatus(localDfs, filePath); if (!fileStatus.isPresent()) { return true; } - return !localFileSystem.isFileClosed(filePath) + return !localDfs.isFileClosed(filePath) || (movePlan.getFileId() != 0 && fileStatus.get().getFileId() != movePlan.getFileId()) || fileStatus.get().getLen() != movePlan.getFileLength() || fileStatus.get().getModificationTime() != movePlan.getModificationTime(); diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/OneDiskFileAction.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/OneDiskFileAction.java index f79e16bcac..8480f65c84 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/OneDiskFileAction.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/OneDiskFileAction.java @@ -25,7 +25,7 @@ @ActionSignature( actionId = "onedisk", displayName = "onedisk", - usage = HdfsAction.FILE_PATH + " $file " + usage = HadoopAction.FILE_PATH + " $file " ) public class OneDiskFileAction extends MoveFileAction { diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/OneSsdFileAction.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/OneSsdFileAction.java index 5e4d3f184d..5d8124f55d 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/OneSsdFileAction.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/OneSsdFileAction.java @@ -25,7 +25,7 @@ @ActionSignature( actionId = "onessd", displayName = "onessd", - usage = HdfsAction.FILE_PATH + " $file " + usage = HadoopAction.FILE_PATH + " $file " ) public class OneSsdFileAction extends MoveFileAction { diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/RamDiskFileAction.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/RamDiskFileAction.java index 44bf6aed66..24ac5a7cd4 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/RamDiskFileAction.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/RamDiskFileAction.java @@ -25,7 +25,7 @@ @ActionSignature( actionId = "ramdisk", displayName = "ramdisk", - usage = HdfsAction.FILE_PATH + " $file " + usage = HadoopAction.FILE_PATH + " $file " ) public class RamDiskFileAction extends MoveFileAction { diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/SchemeHandlerRegistry.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/SchemeHandlerRegistry.java deleted file mode 100644 index 58284add00..0000000000 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/SchemeHandlerRegistry.java +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.smartdata.hdfs.action; - -import org.smartdata.utils.PathUtil; - -import java.util.Arrays; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.Set; -import java.util.function.Predicate; - -import static org.smartdata.utils.PathUtil.getScheme; -import static org.smartdata.utils.PathUtil.isAbsoluteRemotePath; - -public class SchemeHandlerRegistry { - private final Map, ThrowingRunnable> schemeHandlers; - private ThrowingRunnable defaultHandler; - - public SchemeHandlerRegistry() { - this.schemeHandlers = new LinkedHashMap<>(); - } - - public SchemeHandlerRegistry onLocalPath(ThrowingRunnable handler) { - schemeHandlers.put(path -> !isAbsoluteRemotePath(path), handler); - return this; - } - - public SchemeHandlerRegistry onRemotePath(ThrowingRunnable handler) { - schemeHandlers.put(PathUtil::isAbsoluteRemotePath, handler); - return this; - } - - public SchemeHandlerRegistry onSchemes(ThrowingRunnable handler, String... schemes) { - schemeHandlers.put(SchemePredicate.forSchemes(schemes), handler); - return this; - } - - public SchemeHandlerRegistry onSchemesExcluding(ThrowingRunnable handler, String... excludedSchemes) { - schemeHandlers.put(SchemePredicate.forSchemesExcluding(excludedSchemes), handler); - return this; - } - - public SchemeHandlerRegistry defaultHandler(ThrowingRunnable handler) { - this.defaultHandler = handler; - return this; - } - - public void executeForPath(String path) throws Exception { - for (Map.Entry, ThrowingRunnable> entry : schemeHandlers.entrySet()) { - if (entry.getKey().test(path)) { - entry.getValue().run(); - break; - } - } - - if (defaultHandler == null) { - throw new IllegalArgumentException("Path is not supported: " + path); - } - defaultHandler.run(); - } - - private static class SchemePredicate implements Predicate { - - private final Set schemes; - private final boolean shouldBeOneOfSchemes; - - private SchemePredicate(boolean shouldBeOneOfSchemes, String... schemes) { - this.shouldBeOneOfSchemes = shouldBeOneOfSchemes; - this.schemes = new HashSet<>(Arrays.asList(schemes)); - } - - @Override - public boolean test(String path) { - return getScheme(path) - .filter(scheme -> schemes.contains(scheme) == shouldBeOneOfSchemes) - .isPresent(); - } - - private static SchemePredicate forSchemes(String... schemes) { - return new SchemePredicate(true, schemes); - } - - private static SchemePredicate forSchemesExcluding(String... schemes) { - return new SchemePredicate(false, schemes); - } - } - - public interface ThrowingRunnable { - void run() throws Exception; - } -} \ No newline at end of file diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/SmallFileCompactAction.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/SmallFileCompactAction.java index 05ed851121..1975fc1b78 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/SmallFileCompactAction.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/SmallFileCompactAction.java @@ -52,7 +52,7 @@ @ActionSignature( actionId = "compact", displayName = "compact", - usage = HdfsAction.FILE_PATH + " $files " + usage = HadoopAction.FILE_PATH + " $files " + SmallFileCompactAction.CONTAINER_FILE + " $container_file " ) public class SmallFileCompactAction extends HdfsAction { diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/UncacheFileAction.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/UncacheFileAction.java index 38dc2e5228..3cd06d667d 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/UncacheFileAction.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/action/UncacheFileAction.java @@ -31,7 +31,7 @@ @ActionSignature( actionId = "uncache", displayName = "uncache", - usage = HdfsAction.FILE_PATH + " $file " + usage = HadoopAction.FILE_PATH + " $file " ) public class UncacheFileAction extends HdfsAction { private Path filePath; @@ -52,7 +52,7 @@ protected void execute() throws Exception { "So there is no need to execute this action.", filePath)); return; } - localFileSystem.removeCacheDirective(id); + localDfs.removeCacheDirective(id); } private Long getCacheId() throws Exception { @@ -61,7 +61,7 @@ private Long getCacheId() throws Exception { .build(); RemoteIterator directiveEntries = - localFileSystem.listCacheDirectives(filter); + localDfs.listCacheDirectives(filter); if (!directiveEntries.hasNext()) { return null; } diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/client/CachingLocalFileSystemProvider.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/client/CachingDfsProvider.java similarity index 78% rename from smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/client/CachingLocalFileSystemProvider.java rename to smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/client/CachingDfsProvider.java index 4c791a7703..9e791e102d 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/client/CachingLocalFileSystemProvider.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/client/CachingDfsProvider.java @@ -22,46 +22,38 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.smartdata.hadoop.filesystem.SmartFileSystem; -import org.smartdata.hdfs.action.HdfsAction; +import org.smartdata.hdfs.HadoopUtil; +import org.smartdata.hdfs.action.HadoopAction; import org.smartdata.hdfs.impersonation.UserImpersonationStrategy; -import org.smartdata.utils.StringUtil; import java.io.IOException; import java.net.InetSocketAddress; +import java.net.URI; import java.time.Duration; -import static org.smartdata.conf.SmartConfKeys.SMART_ACTION_CLIENT_CACHE_TTL_DEFAULT; -import static org.smartdata.conf.SmartConfKeys.SMART_ACTION_CLIENT_CACHE_TTL_KEY; import static org.smartdata.conf.SmartConfKeys.SMART_CLIENT_CONCURRENT_REPORT_ENABLED; -import static org.smartdata.hdfs.HadoopUtil.doAsCurrentUser; import static org.smartdata.utils.ConfigUtil.getSsmMasterRpcAddress; @Slf4j @RequiredArgsConstructor -public class CachingLocalFileSystemProvider implements LocalFileSystemProvider { +public class CachingDfsProvider implements LocalFileSystemProvider { private final FileSystemCache smartFsCache; private final FileSystemCache defaultFsCache; - public CachingLocalFileSystemProvider( + public CachingDfsProvider( Configuration config, UserImpersonationStrategy userImpersonationStrategy) { - String cacheKeyTtl = config.get( - SMART_ACTION_CLIENT_CACHE_TTL_KEY, SMART_ACTION_CLIENT_CACHE_TTL_DEFAULT); - Duration cacheKeyTtlDuration = Duration.ofMillis( - StringUtil.parseTimeString(cacheKeyTtl)); - + Duration cacheKeyTtlDuration = FileSystemCache.getCacheTtl(config); this.smartFsCache = new SmartFileSystemCache(userImpersonationStrategy, cacheKeyTtlDuration); this.defaultFsCache = new DefaultFileSystemCache(userImpersonationStrategy, cacheKeyTtlDuration); } @Override - public DistributedFileSystem provide(Configuration config, String user, HdfsAction.FsType fsType) + public DistributedFileSystem provide(Configuration config, String user, HadoopAction.FsType fsType) throws IOException { - InetSocketAddress ssmMasterAddress = getSsmMasterRpcAddress(config); - - return fsType == HdfsAction.FsType.SMART - ? smartFsCache.get(config, user, ssmMasterAddress) + return fsType == HadoopAction.FsType.SMART + ? smartFsCache.get(config, user, getSsmMasterRpcAddress(config)) // we don't rely on SSM in case of pure HDFS client : defaultFsCache.get(config, user, null); } @@ -80,6 +72,11 @@ private SmartFileSystemCache( super(userImpersonationStrategy, keyTtl); } + @Override + protected URI getServiceUri(Configuration config) throws IOException { + return HadoopUtil.getNameNodeUri(config); + } + @Override protected SmartFileSystem createFileSystem(Configuration config, CacheKey cacheKey) { try { @@ -96,9 +93,9 @@ protected SmartFileSystem createFileSystem(Configuration config, CacheKey cacheK private SmartFileSystem createSmartFileSystem( Configuration config, CacheKey cacheKey) throws IOException { SmartDFSClient smartDfsClient = new SmartDFSClient( - cacheKey.getNameNodeUri(), config, cacheKey.getSsmMasterAddress()); + cacheKey.getServiceUri(), config, cacheKey.getSsmMasterAddress()); SmartFileSystem fileSystem = new SmartFileSystem(smartDfsClient); - fileSystem.initialize(cacheKey.getNameNodeUri(), config); + fileSystem.initialize(cacheKey.getServiceUri(), config); return fileSystem; } } @@ -110,11 +107,16 @@ private DefaultFileSystemCache( super(userImpersonationStrategy, keyTtl); } + @Override + protected URI getServiceUri(Configuration config) throws IOException { + return HadoopUtil.getNameNodeUri(config); + } + @Override protected DistributedFileSystem createFileSystem(Configuration config, CacheKey cacheKey) { try { DistributedFileSystem fileSystem = new DistributedFileSystem(); - fileSystem.initialize(cacheKey.getNameNodeUri(), config); + fileSystem.initialize(cacheKey.getServiceUri(), config); return fileSystem; } catch (IOException exception) { throw new RuntimeException("Error creating default file system", exception); diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/file/equality/ChecksumFileEqualityStrategy.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/file/equality/ChecksumFileEqualityStrategy.java index 141ae654e8..befd23ae93 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/file/equality/ChecksumFileEqualityStrategy.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/file/equality/ChecksumFileEqualityStrategy.java @@ -25,6 +25,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.smartdata.hdfs.HadoopUtil; +import org.smartdata.model.BaseFileInfo; import org.smartdata.model.FileInfo; import java.io.IOException; @@ -52,7 +53,7 @@ private FileSystem getFileSystem(Path path, Configuration conf) throws IOExcepti } @Override - public boolean areEqual(FileInfo srcFileInfo, FileStatus destFileStatus) { + public boolean areEqual(BaseFileInfo srcFileInfo, FileStatus destFileStatus) { if (!filesLengthComparator.areEqual(srcFileInfo, destFileStatus)) { // we don't need to fetch and compare checksums // if the files are obviously not equal. diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/file/equality/FileEqualityStrategy.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/file/equality/FileEqualityStrategy.java index b594ad788d..7755288db2 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/file/equality/FileEqualityStrategy.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/file/equality/FileEqualityStrategy.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.smartdata.conf.SmartConfKeys; +import org.smartdata.model.BaseFileInfo; import org.smartdata.model.FileInfo; public interface FileEqualityStrategy { @@ -29,7 +30,7 @@ enum Strategy { CHECKSUM } - boolean areEqual(FileInfo srcFileInfo, FileStatus destFileStatus); + boolean areEqual(BaseFileInfo srcFileInfo, FileStatus destFileStatus); static FileEqualityStrategy from(Configuration conf) { String rawStrategy = conf.get( diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/file/equality/LengthFileEqualityStrategy.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/file/equality/LengthFileEqualityStrategy.java index 9942847bde..2469f09a50 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/file/equality/LengthFileEqualityStrategy.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/file/equality/LengthFileEqualityStrategy.java @@ -17,14 +17,15 @@ */ package org.smartdata.hdfs.file.equality; -import java.util.Optional; import org.apache.hadoop.fs.FileStatus; -import org.smartdata.model.FileInfo; +import org.smartdata.model.BaseFileInfo; + +import java.util.Optional; public class LengthFileEqualityStrategy implements FileEqualityStrategy { @Override - public boolean areEqual(FileInfo srcFileInfo, FileStatus destFileStatus) { + public boolean areEqual(BaseFileInfo srcFileInfo, FileStatus destFileStatus) { return Optional.ofNullable(destFileStatus) .map(FileStatus::getLen) .filter(length -> length == srcFileInfo.getLength()) diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CacheScheduler.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CacheScheduler.java index 6766a64591..7f9fe9e938 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CacheScheduler.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CacheScheduler.java @@ -26,7 +26,7 @@ import org.slf4j.LoggerFactory; import org.smartdata.SmartContext; import org.smartdata.hdfs.HadoopUtil; -import org.smartdata.hdfs.action.HdfsAction; +import org.smartdata.hdfs.action.HadoopAction; import org.smartdata.model.ActionInfo; import org.smartdata.model.CmdletInfo; import org.smartdata.model.LaunchAction; @@ -65,7 +65,7 @@ public List getSupportedActions() { } public boolean isLocked(ActionInfo actionInfo) { - String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String srcPath = actionInfo.getArgs().get(HadoopAction.FILE_PATH); return fileLock.contains(srcPath); } @@ -82,7 +82,7 @@ public boolean onSubmit(CmdletInfo cmdletInfo, ActionInfo actionInfo) { @Override public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo, LaunchCmdlet cmdlet, LaunchAction action) { - String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String srcPath = actionInfo.getArgs().get(HadoopAction.FILE_PATH); fileLock.add(srcPath); return ScheduleResult.SUCCESS; } @@ -125,7 +125,7 @@ public void onActionFinished(CmdletInfo cmdletInfo, ActionInfo actionInfo) { return; } if (isLocked(actionInfo)) { - fileLock.remove(actionInfo.getArgs().get(HdfsAction.FILE_PATH)); + fileLock.remove(actionInfo.getArgs().get(HadoopAction.FILE_PATH)); } } diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CompressionScheduler.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CompressionScheduler.java index 148c26adea..abec8ff03f 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CompressionScheduler.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CompressionScheduler.java @@ -28,7 +28,7 @@ import org.smartdata.conf.SmartConfKeys; import org.smartdata.hdfs.HadoopUtil; import org.smartdata.hdfs.action.CompressionAction; -import org.smartdata.hdfs.action.HdfsAction; +import org.smartdata.hdfs.action.HadoopAction; import org.smartdata.hdfs.action.DecompressionAction; import org.smartdata.metastore.MetaStore; import org.smartdata.metastore.MetaStoreException; @@ -111,7 +111,7 @@ public void recover(ActionInfo actionInfo) { !actionInfo.getActionName().equals(DECOMPRESSION_ACTION_ID)) { return; } - String filePath = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String filePath = actionInfo.getArgs().get(HadoopAction.FILE_PATH); fileLock.add(filePath); } @@ -168,7 +168,7 @@ public boolean supportDecompression(String path) throws MetaStoreException, IOEx } private String createTmpName(LaunchAction action) { - String path = action.getArgs().get(HdfsAction.FILE_PATH); + String path = action.getArgs().get(HadoopAction.FILE_PATH); String fileName; int index = path.lastIndexOf("/"); if (index == path.length() - 1) { @@ -188,7 +188,7 @@ private String createTmpName(LaunchAction action) { @Override public boolean onSubmit(CmdletInfo cmdletInfo, ActionInfo actionInfo) { - String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String srcPath = actionInfo.getArgs().get(HadoopAction.FILE_PATH); if (!actions.contains(actionInfo.getActionName())) { return false; @@ -233,7 +233,7 @@ public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo, } public void afterSchedule(ActionInfo actionInfo) { - String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String srcPath = actionInfo.getArgs().get(HadoopAction.FILE_PATH); // lock the file only if ec or unec action is scheduled fileLock.add(srcPath); try { @@ -249,7 +249,7 @@ public void afterSchedule(ActionInfo actionInfo) { */ @Override public boolean isSuccessfulBySpeculation(ActionInfo actionInfo) { - String path = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String path = actionInfo.getArgs().get(HadoopAction.FILE_PATH); try { FileState fileState = HadoopUtil.getFileState(dfsClient, path); FileState.FileType fileType = fileState.getFileType(); @@ -282,7 +282,7 @@ private void setOldFileId(ActionInfo actionInfo) throws IOException { return; } List oids = new ArrayList<>(); - String path = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String path = actionInfo.getArgs().get(HadoopAction.FILE_PATH); try { oids.add(dfsClient.getFileInfo(path).getFileId()); } catch (IOException e) { @@ -297,7 +297,7 @@ public void onActionFinished(CmdletInfo cmdletInfo, ActionInfo actionInfo) { if (!actionInfo.isFinished()) { return; } - String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String srcPath = actionInfo.getArgs().get(HadoopAction.FILE_PATH); try { // Compression Action failed if (actionInfo.getActionName().equals(COMPRESSION_ACTION_ID) && @@ -335,7 +335,7 @@ public void onActionFinished(CmdletInfo cmdletInfo, ActionInfo actionInfo) { */ public void takeOverAccessCount(ActionInfo actionInfo) { try { - String filePath = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String filePath = actionInfo.getArgs().get(HadoopAction.FILE_PATH); long oldFid = actionInfo.getOldFileIds().get(0); // The new fid may have not been updated in metastore, so // we get it from dfs client. @@ -376,6 +376,6 @@ private void onDecompressActionFinished(ActionInfo actionInfo) return; } // Delete the record from compression_file table - metaStore.deleteFileState(actionInfo.getArgs().get(HdfsAction.FILE_PATH)); + metaStore.deleteFileState(actionInfo.getArgs().get(HadoopAction.FILE_PATH)); } } \ No newline at end of file diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/Copy2S3Scheduler.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/Copy2S3Scheduler.java index 42f4537437..71032efdd1 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/Copy2S3Scheduler.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/Copy2S3Scheduler.java @@ -21,10 +21,11 @@ import org.slf4j.LoggerFactory; import org.smartdata.SmartContext; import org.smartdata.exception.ActionRejectedException; -import org.smartdata.hdfs.action.HdfsAction; +import org.smartdata.hdfs.action.HadoopAction; import org.smartdata.metastore.MetaStore; import org.smartdata.metastore.MetaStoreException; import org.smartdata.model.ActionInfo; +import org.smartdata.model.BaseFileInfo; import org.smartdata.model.CmdletInfo; import org.smartdata.model.FileInfo; import org.smartdata.model.FileState; @@ -64,9 +65,9 @@ public List getSupportedActions() { public boolean onSubmit(CmdletInfo cmdletInfo, ActionInfo actionInfo) throws IOException { // check args String path = Optional.ofNullable(actionInfo.getArgs()) - .map(args -> args.get(HdfsAction.FILE_PATH)) + .map(args -> args.get(HadoopAction.FILE_PATH)) .orElseThrow(() -> new ActionRejectedException( - "Required argument not found: " + HdfsAction.FILE_PATH)); + "Required argument not found: " + HadoopAction.FILE_PATH)); if (isLocked(path)) { throw new ActionRejectedException("The source file " + path + " is locked"); @@ -88,7 +89,7 @@ public boolean onSubmit(CmdletInfo cmdletInfo, ActionInfo actionInfo) throws IOE @Override public void onActionFinished(CmdletInfo cmdletInfo, ActionInfo actionInfo) { - String path = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String path = actionInfo.getArgs().get(HadoopAction.FILE_PATH); if (actionInfo.isFinished() && actionInfo.isSuccessful()) { // Insert fileState try { @@ -123,8 +124,8 @@ private boolean isLocked(String filePath) { private Optional getFileLength(String fileName) { try { - return Optional.ofNullable(metaStore.getFile(fileName)) - .map(FileInfo::getLength); + return Optional.ofNullable(metaStore.getBaseFileInfo(fileName)) + .map(BaseFileInfo::getLength); } catch (MetaStoreException e) { LOG.warn("Error fetching info about file: {}", fileName, e); return Optional.empty(); diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CopyScheduler.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CopyScheduler.java index c980f3e1d9..e9b05ea50c 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CopyScheduler.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/CopyScheduler.java @@ -31,11 +31,12 @@ import org.smartdata.exception.ActionRejectedException; import org.smartdata.hdfs.action.CopyDirectoryAction; import org.smartdata.hdfs.action.CopyFileAction; -import org.smartdata.hdfs.action.HdfsAction; +import org.smartdata.hdfs.action.HadoopAction; import org.smartdata.hdfs.file.equality.FileEqualityStrategy; import org.smartdata.metastore.MetaStore; import org.smartdata.metastore.MetaStoreException; import org.smartdata.model.ActionInfo; +import org.smartdata.model.BaseFileInfo; import org.smartdata.model.CmdletInfo; import org.smartdata.model.CompressionFileState; import org.smartdata.model.FileDiff; @@ -44,16 +45,15 @@ import org.smartdata.model.FileInfo; import org.smartdata.model.FileState; import org.smartdata.model.LaunchAction; +import org.smartdata.model.action.ActionSchedulerService; import org.smartdata.model.action.ScheduleResult; import org.smartdata.protocol.message.LaunchCmdlet; -import org.smartdata.model.action.ActionSchedulerService; import java.io.IOException; import java.net.URI; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -91,7 +91,6 @@ import static org.smartdata.model.FileDiffState.PENDING; import static org.smartdata.model.FileDiffState.isTerminalState; import static org.smartdata.model.FileDiffType.DELETE; -import static org.smartdata.utils.ConfigUtil.toRemoteClusterConfig; import static org.smartdata.utils.FileDiffUtils.getDest; import static org.smartdata.utils.FileDiffUtils.getLength; import static org.smartdata.utils.FileDiffUtils.getOffset; @@ -204,7 +203,7 @@ public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo, return ScheduleResult.FAIL; } String srcDir = action.getArgs().get(SyncAction.SRC); - String path = action.getArgs().get(HdfsAction.FILE_PATH); + String path = action.getArgs().get(HadoopAction.FILE_PATH); String destDir = action.getArgs().get(SyncAction.DEST); String preserveAttributes = action.getArgs().get(SyncAction.PRESERVE); String destPath = path.replaceFirst(srcDir, destDir); @@ -256,7 +255,7 @@ public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo, if (preserveAttributes != null) { action.getArgs().put(CopyFileAction.PRESERVE, preserveAttributes); } - if (rateLimiter != null) { + if (rateLimiter != null) { String strLen = getLength(fileDiff); if (strLen != null) { int appendLen = (int) (Long.parseLong(strLen) >> 20); @@ -278,11 +277,11 @@ public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo, break; case DELETE: action.setActionType("delete"); - action.getArgs().put(HdfsAction.FILE_PATH, destPath); + action.getArgs().put(HadoopAction.FILE_PATH, destPath); break; case RENAME: action.setActionType("rename"); - action.getArgs().put(HdfsAction.FILE_PATH, destPath); + action.getArgs().put(HadoopAction.FILE_PATH, destPath); // TODO scope check String remoteDest = getDest(fileDiff); action.getArgs().put("-dest", remoteDest.replaceFirst(srcDir, destDir)); @@ -290,7 +289,7 @@ public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo, break; case METADATA: action.setActionType("metadata"); - action.getArgs().put(HdfsAction.FILE_PATH, destPath); + action.getArgs().put(HadoopAction.FILE_PATH, destPath); break; default: break; @@ -355,7 +354,7 @@ public boolean onSubmit(CmdletInfo cmdletInfo, ActionInfo actionInfo) if (actionInfo.getArgs() == null) { throw new ActionRejectedException("No arguments for the action"); } - String path = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String path = actionInfo.getArgs().get(HadoopAction.FILE_PATH); LOG.debug("Submit file {} with lock {}", path, fileLocks); // If locked then false if (!isFileLocked(path)) { @@ -465,74 +464,15 @@ private void runBatchInitialSync() throws MetaStoreException { } } - private List listFileStatusesOfDirs(String dirName) { - List fileStatuses = new ArrayList<>(); - try { - // We simply use local HDFS conf for getting remote file system. - // The smart file system configured for local HDFS should not be - // introduced to remote file system. - Configuration remoteConf = toRemoteClusterConfig(conf); - FileSystem fs = FileSystem.get(URI.create(dirName), remoteConf); - - FileStatus[] directoryFileStatuses = fs.listStatus(new Path(dirName)); - for (FileStatus fileStatus : directoryFileStatuses) { - // add directory - fileStatuses.add(fileStatus); - - if (!fileStatus.isDirectory()) { - continue; - } - - //all the file in this fileStatuses - // todo replace recursion with queue - List childFileStatuses = listFileStatusesOfDirs(fileStatus.getPath().getName()); - if (!childFileStatuses.isEmpty()) { - fileStatuses.addAll(childFileStatuses); - } - } - } catch (IOException e) { - LOG.debug("Fetch remote file list error!", e); - } - return fileStatuses; - } - private void initialSync(String srcDir, String destDir) throws MetaStoreException { - List srcFiles = metaStore.getFilesByPrefix(srcDir); + List srcFiles = metaStore.getFilePathsByPrefix(srcDir); LOG.info("Directory initial sync {} files", srcFiles.size()); - // - Map filesToSync = new HashMap<>(); - for (FileInfo fileInfo : srcFiles) { - // Remove prefix/parent - filesToSync.put(fileInfo.getPath().replaceFirst(srcDir, ""), fileInfo); - } - - // recursively file lists - List fileStatuses = listFileStatusesOfDirs(destDir); - if (fileStatuses.isEmpty()) { - LOG.debug("Remote directory is empty!"); - } else { - LOG.debug("Remote directory contains {} files!", fileStatuses.size()); - for (FileStatus fileStatus : fileStatuses) { - // only get file name - // todo it can be buggy because of .getPath().getName() - String destName = fileStatus.getPath().getName(); - if (filesToSync.containsKey(destName)) { - FileInfo fileInfo = filesToSync.get(destName); - String src = fileInfo.getPath(); - String dest = src.replaceFirst(srcDir, destDir); - initialSyncQueue.put(src, dest); - filesToSync.remove(destName); - } - } + for (String srcFile : srcFiles) { + String dest = srcFile.replaceFirst(srcDir, destDir); + initialSyncQueue.put(srcFile, dest); } - LOG.debug("Directory Base Sync {} files", filesToSync.size()); - for (FileInfo fileInfo : filesToSync.values()) { - String src = fileInfo.getPath(); - String dest = src.replaceFirst(srcDir, destDir); - initialSyncQueue.put(src, dest); - } runBatchInitialSync(); } @@ -562,7 +502,7 @@ private void mergePendingDiffs(String src) throws MetaStoreException { } private FileDiff runFileInitialSync(String src, String dest) throws MetaStoreException { - FileInfo srcFileInfo = metaStore.getFile(src); + BaseFileInfo srcFileInfo = metaStore.getBaseFileInfo(src); if (srcFileInfo == null || fileLocks.contains(src)) { // Primary file doesn't exist or file is syncing return null; @@ -589,7 +529,7 @@ private FileDiff runFileInitialSync(String src, String dest) throws MetaStoreExc } private FileDiff createAppendFileDiff( - FileInfo srcFileInfo, FileStatus remoteFileStatus, long copyStartOffset) { + BaseFileInfo srcFileInfo, FileStatus remoteFileStatus, long copyStartOffset) { FileDiff fileDiff = new FileDiff(FileDiffType.APPEND, FileDiffState.PENDING); fileDiff.setSrc(srcFileInfo.getPath()); diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/ErasureCodingScheduler.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/ErasureCodingScheduler.java index 5867734fc2..ef0310a277 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/ErasureCodingScheduler.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/ErasureCodingScheduler.java @@ -29,7 +29,7 @@ import org.smartdata.hdfs.CompatibilityHelper; import org.smartdata.hdfs.CompatibilityHelperLoader; import org.smartdata.hdfs.HadoopUtil; -import org.smartdata.hdfs.action.HdfsAction; +import org.smartdata.hdfs.action.HadoopAction; import org.smartdata.metastore.MetaStore; import org.smartdata.metastore.MetaStoreException; import org.smartdata.model.ActionInfo; @@ -114,7 +114,7 @@ public void recover(ActionInfo actionInfo) { !actionInfo.getActionName().equals(UNEC_ACTION_ID)) { return; } - String filePath = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String filePath = actionInfo.getArgs().get(HadoopAction.FILE_PATH); fileLock.add(filePath); } @@ -125,15 +125,15 @@ public boolean onSubmit(CmdletInfo cmdletInfo, ActionInfo actionInfo) return true; } - if (actionInfo.getArgs().get(HdfsAction.FILE_PATH) == null) { + if (actionInfo.getArgs().get(HadoopAction.FILE_PATH) == null) { throw new ActionRejectedException("File path is required for action " + actionInfo.getActionName() + "!"); } - String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String srcPath = actionInfo.getArgs().get(HadoopAction.FILE_PATH); // The root dir should be excluded in checking whether file path ends with slash. if (!srcPath.equals("/") && srcPath.endsWith("/")) { srcPath = srcPath.substring(0, srcPath.length() - 1); - actionInfo.getArgs().put(HdfsAction.FILE_PATH, srcPath); + actionInfo.getArgs().put(HadoopAction.FILE_PATH, srcPath); } // For ec or unec action, check if the file is locked. if (actionInfo.getActionName().equals(EC_ACTION_ID) || @@ -154,7 +154,7 @@ public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo, return ScheduleResult.SUCCESS; } - String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String srcPath = actionInfo.getArgs().get(HadoopAction.FILE_PATH); if (srcPath == null) { actionInfo.appendLog("No file is given in this action!"); return ScheduleResult.FAIL; @@ -203,7 +203,7 @@ public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo, @Override public boolean isSuccessfulBySpeculation(ActionInfo actionInfo) { - String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String srcPath = actionInfo.getArgs().get(HadoopAction.FILE_PATH); try { HdfsFileStatus fileStatus = dfsClient.getFileInfo(srcPath); CompatibilityHelper compatibilityHelper = @@ -232,7 +232,7 @@ public boolean isSuccessfulBySpeculation(ActionInfo actionInfo) { * the old file id is kept in a map. */ public void afterSchedule(ActionInfo actionInfo) { - String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String srcPath = actionInfo.getArgs().get(HadoopAction.FILE_PATH); // lock the file only if ec or unec action is scheduled fileLock.add(srcPath); try { @@ -254,7 +254,7 @@ private void setOldFileId(ActionInfo actionInfo) throws IOException { return; } List oids = new ArrayList<>(); - String path = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String path = actionInfo.getArgs().get(HadoopAction.FILE_PATH); try { oids.add(dfsClient.getFileInfo(path).getFileId()); } catch (IOException e) { @@ -265,7 +265,7 @@ private void setOldFileId(ActionInfo actionInfo) throws IOException { } private String createTmpName(LaunchAction action) { - String path = action.getArgs().get(HdfsAction.FILE_PATH); + String path = action.getArgs().get(HadoopAction.FILE_PATH); String fileName; int index = path.lastIndexOf("/"); if (index == path.length() - 1) { @@ -292,7 +292,7 @@ public void onActionFinished(CmdletInfo cmdletInfo, ActionInfo actionInfo) { actionInfo.getActionName().equals(UNEC_ACTION_ID)) { String filePath = null; try { - filePath = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + filePath = actionInfo.getArgs().get(HadoopAction.FILE_PATH); if (!actionInfo.isSuccessful()) { return; } @@ -315,7 +315,7 @@ public void onActionFinished(CmdletInfo cmdletInfo, ActionInfo actionInfo) { */ public void takeOverAccessCount(ActionInfo actionInfo) { try { - String filePath = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String filePath = actionInfo.getArgs().get(HadoopAction.FILE_PATH); long oldFid = actionInfo.getOldFileIds().get(0); // The new fid may have not been updated in metastore, so // we get it from dfs client. diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/MoverScheduler.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/MoverScheduler.java index cc7b0e2ef0..d16a054709 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/MoverScheduler.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/MoverScheduler.java @@ -27,7 +27,7 @@ import org.smartdata.conf.SmartConfKeys; import org.smartdata.exception.ActionRejectedException; import org.smartdata.hdfs.HadoopUtil; -import org.smartdata.hdfs.action.HdfsAction; +import org.smartdata.hdfs.action.HadoopAction; import org.smartdata.hdfs.action.MoveFileAction; import org.smartdata.hdfs.metric.fetcher.DatanodeStorageReportProcTask; import org.smartdata.hdfs.metric.fetcher.MovePlanMaker; @@ -133,9 +133,9 @@ public boolean onSubmit(CmdletInfo cmdletInfo, ActionInfo actionInfo) throw new ActionRejectedException("No arguments for the action"); } - if (fileLock.contains(actionInfo.getArgs().get(HdfsAction.FILE_PATH))) { + if (fileLock.contains(actionInfo.getArgs().get(HadoopAction.FILE_PATH))) { LOG.warn("The file {} is locked by other mover action!", - actionInfo.getArgs().get(HdfsAction.FILE_PATH)); + actionInfo.getArgs().get(HadoopAction.FILE_PATH)); return false; } return true; @@ -148,7 +148,7 @@ public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo, return ScheduleResult.SUCCESS; } - String file = action.getArgs().get(HdfsAction.FILE_PATH); + String file = action.getArgs().get(HadoopAction.FILE_PATH); if (file == null) { actionInfo.appendLog("File path not specified!\n"); return ScheduleResult.FAIL; @@ -192,7 +192,7 @@ public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo, } plan.setNamenode(nnUri); action.getArgs().put(MoveFileAction.MOVE_PLAN, plan.toString()); - fileLock.add(action.getArgs().get(HdfsAction.FILE_PATH)); + fileLock.add(action.getArgs().get(HadoopAction.FILE_PATH)); return ScheduleResult.SUCCESS; } catch (IOException e) { actionInfo.appendLogLine(e.getMessage()); @@ -207,7 +207,7 @@ public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo, @Override public void onActionFinished(CmdletInfo cmdletInfo, ActionInfo actionInfo) { - fileLock.remove(actionInfo.getArgs().get(HdfsAction.FILE_PATH)); + fileLock.remove(actionInfo.getArgs().get(HadoopAction.FILE_PATH)); } private class UpdateClusterInfoTask implements Runnable { diff --git a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/SmallFileScheduler.java b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/SmallFileScheduler.java index e078110f83..40cb4361a6 100644 --- a/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/SmallFileScheduler.java +++ b/smart-hadoop-support/smart-hadoop/src/main/java/org/smartdata/hdfs/scheduler/SmallFileScheduler.java @@ -26,7 +26,7 @@ import org.smartdata.SmartFilePermission; import org.smartdata.exception.ActionRejectedException; import org.smartdata.hdfs.HadoopUtil; -import org.smartdata.hdfs.action.HdfsAction; +import org.smartdata.hdfs.action.HadoopAction; import org.smartdata.hdfs.action.SmallFileCompactAction; import org.smartdata.hdfs.action.SmallFileUncompactAction; import org.smartdata.metastore.MetaStore; @@ -172,7 +172,7 @@ public boolean onSubmit(CmdletInfo cmdletInfo, ActionInfo actionInfo) } // Check if small files is null or empty - String smallFiles = actionInfo.getArgs().get(HdfsAction.FILE_PATH); + String smallFiles = actionInfo.getArgs().get(HadoopAction.FILE_PATH); if (smallFiles == null || smallFiles.isEmpty()) { throw new ActionRejectedException("Illegal small files: " + smallFiles); } @@ -322,7 +322,7 @@ private ScheduleResult getCompactScheduleResult(ActionInfo actionInfo) { // Get container file and small file list of this action String containerFilePath = getContainerFile(actionInfo); ArrayList smallFileList = new Gson().fromJson( - actionInfo.getArgs().get(HdfsAction.FILE_PATH), + actionInfo.getArgs().get(HadoopAction.FILE_PATH), new TypeToken>() { }.getType()); @@ -431,7 +431,7 @@ private ScheduleResult getUncompactScheduleResult(ActionInfo actionInfo, // Put small files into arguments of this action Map args = new HashMap<>(2); - args.put(HdfsAction.FILE_PATH, new Gson().toJson(smallFileList)); + args.put(HadoopAction.FILE_PATH, new Gson().toJson(smallFileList)); args.put(SmallFileUncompactAction.CONTAINER_FILE, getContainerFile(actionInfo)); action.setArgs(args); @@ -560,7 +560,7 @@ private void handleCompactActionResult(ActionInfo actionInfo) { // Get container file path, small files, result of this action String containerFilePath = getContainerFile(actionInfo); List smallFileList = new Gson().fromJson( - actionInfo.getArgs().get(HdfsAction.FILE_PATH), + actionInfo.getArgs().get(HadoopAction.FILE_PATH), new TypeToken>() { }.getType()); List compactFileStates = new Gson().fromJson( @@ -622,7 +622,7 @@ public void onActionFinished(CmdletInfo cmdletInfo, ActionInfo actionInfo) { } public List getSmallFileList(ActionInfo actionInfo) { - return new Gson().fromJson(actionInfo.getArgs().get(HdfsAction.FILE_PATH), + return new Gson().fromJson(actionInfo.getArgs().get(HadoopAction.FILE_PATH), new TypeToken>() { }.getType()); } diff --git a/smart-hadoop-support/smart-hadoop/src/test/java/org/smartdata/hdfs/action/TestActionRegistry.java b/smart-hadoop-support/smart-hadoop/src/test/java/org/smartdata/hdfs/action/TestActionRegistry.java index aba7b7b8fe..a03ccdaa35 100644 --- a/smart-hadoop-support/smart-hadoop/src/test/java/org/smartdata/hdfs/action/TestActionRegistry.java +++ b/smart-hadoop-support/smart-hadoop/src/test/java/org/smartdata/hdfs/action/TestActionRegistry.java @@ -19,30 +19,38 @@ import org.apache.hadoop.util.VersionInfo; import org.junit.Assert; +import org.junit.Before; import org.junit.Test; import org.smartdata.action.ActionException; import org.smartdata.action.ActionRegistry; -import java.io.IOException; +import java.util.Collections; import java.util.Set; public class TestActionRegistry { + private ActionRegistry actionRegistry; + + @Before + public void init() { + actionRegistry = new ActionRegistry( + Collections.singletonList(new HdfsActionFactory())); + } + @Test - public void testInit() throws IOException { + public void testInit() { System.out.println(VersionInfo.getBuildVersion()); - Set actionNames = ActionRegistry.registeredActions(); - // System.out.print(actionNames.size()); - Assert.assertTrue(actionNames.size() > 0); + Set actionNames = actionRegistry.registeredActions(); + Assert.assertFalse(actionNames.isEmpty()); } @Test - public void testCreateAction() throws IOException, ActionException { - Assert.assertTrue(ActionRegistry.createAction("cache") instanceof CacheFileAction); - Set actionNames = ActionRegistry.registeredActions(); + public void testCreateAction() throws ActionException { + Assert.assertTrue(actionRegistry.createAction("cache") instanceof CacheFileAction); + Set actionNames = actionRegistry.registeredActions(); // create all kinds of actions for (String name : actionNames) { - ActionRegistry.createAction(name); + actionRegistry.createAction(name); } } } diff --git a/smart-hive-support/src/main/java/org/smartdata/hive/action/HiveActionFactory.java b/smart-hive-support/src/main/java/org/smartdata/hive/action/HiveActionFactory.java index 39f6faa9be..06572c3b7e 100644 --- a/smart-hive-support/src/main/java/org/smartdata/hive/action/HiveActionFactory.java +++ b/smart-hive-support/src/main/java/org/smartdata/hive/action/HiveActionFactory.java @@ -18,6 +18,8 @@ package org.smartdata.hive.action; import org.smartdata.action.AbstractActionFactory; +import org.smartdata.action.SmartAction; +import org.smartdata.action.SyncAction; import org.smartdata.hive.action.constraint.HmsCreateConstraintAction; import org.smartdata.hive.action.constraint.HmsDropConstraintAction; import org.smartdata.hive.action.db.HmsAlterDbAction; @@ -36,31 +38,39 @@ import org.smartdata.hive.action.table.HmsCreateTableAction; import org.smartdata.hive.action.table.HmsDropTableAction; +import java.util.Arrays; +import java.util.List; + public class HiveActionFactory extends AbstractActionFactory { - static { - addAction(HmsSyncAction.class); + @Override + protected List> supportedActionClasses() { + return Arrays.asList( + HmsSyncAction.class, + + HmsCreateDbAction.class, + HmsAlterDbAction.class, + HmsDropDbAction.class, - addAction(HmsCreateDbAction.class); - addAction(HmsAlterDbAction.class); - addAction(HmsDropDbAction.class); + HmsCreateTableAction.class, + HmsAlterTableAction.class, + HmsDropTableAction.class, - addAction(HmsCreateTableAction.class); - addAction(HmsAlterTableAction.class); - addAction(HmsDropTableAction.class); + HmsCreateFunctionAction.class, + HmsDropFunctionAction.class, - addAction(HmsCreateFunctionAction.class); - addAction(HmsDropFunctionAction.class); + HmsCreatePartitionAction.class, + HmsAlterPartitionAction.class, + HmsDropPartitionAction.class, - addAction(HmsCreatePartitionAction.class); - addAction(HmsAlterPartitionAction.class); - addAction(HmsDropPartitionAction.class); + HmsCreateConstraintAction.class, + HmsDropConstraintAction.class, - addAction(HmsCreateConstraintAction.class); - addAction(HmsDropConstraintAction.class); + HmsAlterPartitionColumnStatsAction.class, + HmsAlterTableColumnStatsAction.class, + HmsDropPartitionColumnStatsAction.class, + HmsDropTableColumnStatsAction.class, - addAction(HmsAlterPartitionColumnStatsAction.class); - addAction(HmsAlterTableColumnStatsAction.class); - addAction(HmsDropPartitionColumnStatsAction.class); - addAction(HmsDropTableColumnStatsAction.class); + SyncAction.class + ); } } diff --git a/smart-metastore/src/main/java/org/smartdata/metastore/MetaStore.java b/smart-metastore/src/main/java/org/smartdata/metastore/MetaStore.java index 62680d986f..3b7e6ae719 100644 --- a/smart-metastore/src/main/java/org/smartdata/metastore/MetaStore.java +++ b/smart-metastore/src/main/java/org/smartdata/metastore/MetaStore.java @@ -57,6 +57,7 @@ import org.smartdata.metrics.GeneralFileInfoSource; import org.smartdata.model.ActionInfo; import org.smartdata.model.BackUpInfo; +import org.smartdata.model.BaseFileInfo; import org.smartdata.model.CachedFileStatus; import org.smartdata.model.ClusterConfig; import org.smartdata.model.ClusterInfo; @@ -310,6 +311,17 @@ public FileInfo getFile(long fid) throws MetaStoreException { } } + public BaseFileInfo getBaseFileInfo(String path) throws MetaStoreException { + updateCache(); + try { + return generalFileInfoSource.getBaseFileInfo(path); + } catch (EmptyResultDataAccessException e) { + return null; + } catch (Exception e) { + throw new MetaStoreException(e); + } + } + public FileInfo getFile(String path) throws MetaStoreException { updateCache(); try { @@ -332,10 +344,10 @@ public List getFile() throws MetaStoreException { } } - public List getFilesByPrefix(String path) throws MetaStoreException { + public List getFilePathsByPrefix(String path) throws MetaStoreException { updateCache(); try { - return fileInfoDao.getFilesByPrefix(path); + return generalFileInfoSource.getFilePathsByPrefix(path); } catch (EmptyResultDataAccessException e) { return new ArrayList<>(); } catch (Exception e) { diff --git a/smart-metastore/src/main/java/org/smartdata/metastore/accesscount/DbAccessEventAggregator.java b/smart-metastore/src/main/java/org/smartdata/metastore/accesscount/DbAccessEventAggregator.java index 9f6951ae4c..793644e8ac 100644 --- a/smart-metastore/src/main/java/org/smartdata/metastore/accesscount/DbAccessEventAggregator.java +++ b/smart-metastore/src/main/java/org/smartdata/metastore/accesscount/DbAccessEventAggregator.java @@ -34,11 +34,11 @@ public class DbAccessEventAggregator implements AccessEventAggregator { private final GeneralFileInfoSource fileInfoDao; - private final FileAccessManager dbTableManager; + private final DbFileAccessCountManager dbTableManager; private final Failover accessCountFailover; public DbAccessEventAggregator(GeneralFileInfoSource fileInfoDao, - FileAccessManager dbTableManager, + DbFileAccessCountManager dbTableManager, Failover failover) { this.fileInfoDao = fileInfoDao; this.dbTableManager = dbTableManager; diff --git a/smart-metastore/src/main/java/org/smartdata/metastore/accesscount/FileAccessManager.java b/smart-metastore/src/main/java/org/smartdata/metastore/accesscount/DbFileAccessCountManager.java similarity index 96% rename from smart-metastore/src/main/java/org/smartdata/metastore/accesscount/FileAccessManager.java rename to smart-metastore/src/main/java/org/smartdata/metastore/accesscount/DbFileAccessCountManager.java index 09383e3ed2..113b9ac1e1 100644 --- a/smart-metastore/src/main/java/org/smartdata/metastore/accesscount/FileAccessManager.java +++ b/smart-metastore/src/main/java/org/smartdata/metastore/accesscount/DbFileAccessCountManager.java @@ -35,14 +35,15 @@ import java.util.stream.Collectors; @Slf4j -public class FileAccessManager extends - SearchableService { +public class DbFileAccessCountManager extends + SearchableService + implements FileAccessCountManager { private final TransactionRunner transactionRunner; private final FileAccessDao fileAccessDao; private final CacheFileDao cacheFileDao; - public FileAccessManager( + public DbFileAccessCountManager( TransactionRunner transactionRunner, FileAccessDao fileAccessDao, CacheFileDao cacheFileDao) { diff --git a/smart-metastore/src/main/java/org/smartdata/metastore/accesscount/FileAccessCountManager.java b/smart-metastore/src/main/java/org/smartdata/metastore/accesscount/FileAccessCountManager.java new file mode 100644 index 0000000000..7d82b47e17 --- /dev/null +++ b/smart-metastore/src/main/java/org/smartdata/metastore/accesscount/FileAccessCountManager.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.metastore.accesscount; + +import org.smartdata.metastore.dao.Searchable; +import org.smartdata.metastore.queries.sort.FileAccessInfoSortField; +import org.smartdata.model.FileAccessInfo; +import org.smartdata.model.request.FileAccessInfoSearchRequest; + +public interface FileAccessCountManager extends + Searchable { + +} diff --git a/smart-metastore/src/main/java/org/smartdata/metastore/dao/AbstractDao.java b/smart-metastore/src/main/java/org/smartdata/metastore/dao/AbstractDao.java index 85fee58be9..ced1b1e631 100644 --- a/smart-metastore/src/main/java/org/smartdata/metastore/dao/AbstractDao.java +++ b/smart-metastore/src/main/java/org/smartdata/metastore/dao/AbstractDao.java @@ -72,7 +72,7 @@ protected void insert( } protected int update(Map entityProperties, - String filter, Object... filterArguments) { + String filter, Object... filterArguments) { return updateInternal(entityProperties, " WHERE " + filter, filterArguments); } @@ -81,11 +81,11 @@ protected int update(Map entityProperties) { } protected int updateInternal(Map entityProperties, - String filter, Object... filterArguments) { + String filter, Object... filterArguments) { StringJoiner updateSql = new StringJoiner(", ", "UPDATE " + tableName + " SET ", filter); List setArguments = new ArrayList<>(); - for (Map.Entry property: entityProperties.entrySet()) { + for (Map.Entry property : entityProperties.entrySet()) { if (property.getValue() == null) { continue; } diff --git a/smart-metastore/src/main/java/org/smartdata/metastore/dao/impl/BaseFileInfoDao.java b/smart-metastore/src/main/java/org/smartdata/metastore/dao/impl/BaseFileInfoDao.java index bae24548c5..07112e768c 100644 --- a/smart-metastore/src/main/java/org/smartdata/metastore/dao/impl/BaseFileInfoDao.java +++ b/smart-metastore/src/main/java/org/smartdata/metastore/dao/impl/BaseFileInfoDao.java @@ -18,8 +18,10 @@ package org.smartdata.metastore.dao.impl; import lombok.Data; +import lombok.RequiredArgsConstructor; import org.smartdata.metastore.dao.AbstractDao; import org.smartdata.metrics.GeneralFileInfoSource; +import org.smartdata.model.BaseFileInfo; import org.springframework.jdbc.core.namedparam.MapSqlParameterSource; import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate; @@ -55,6 +57,24 @@ public Map getPathsToIdsMapping(Collection paths) { GeneralFileInfo::getId)); } + @Override + public List getFilePathsByPrefix(String path) { + return jdbcTemplate.query( + "SELECT * FROM " + + tableName + + " WHERE path LIKE ?", + this::extractPath, path + "%"); + } + + @Override + public BaseFileInfo getBaseFileInfo(String path) { + return jdbcTemplate.queryForObject( + "SELECT * FROM " + + tableName + + " WHERE path = ?", + this::toBaseFileInfo, path); + } + private GeneralFileInfo mapRow(ResultSet resultSet, int i) throws SQLException { return new GeneralFileInfo( resultSet.getLong("fid"), @@ -62,9 +82,29 @@ private GeneralFileInfo mapRow(ResultSet resultSet, int i) throws SQLException { ); } + private String extractPath(ResultSet resultSet, int i) throws SQLException { + return resultSet.getString("path"); + } + + private BaseFileInfo toBaseFileInfo(ResultSet resultSet, int i) throws SQLException { + return new BaseFileInfoImpl( + resultSet.getString("path"), + resultSet.getLong("length"), + resultSet.getBoolean("is_dir") + ); + } + @Data private static class GeneralFileInfo { private final long id; private final String path; } + + @RequiredArgsConstructor + @Data + private static class BaseFileInfoImpl implements BaseFileInfo { + private final String path; + private final long length; + private final boolean isDir; + } } diff --git a/smart-metastore/src/main/java/org/smartdata/metastore/dao/impl/DefaultBackUpInfoDao.java b/smart-metastore/src/main/java/org/smartdata/metastore/dao/impl/DefaultBackUpInfoDao.java index 636a7622b7..7e23a95d8e 100644 --- a/smart-metastore/src/main/java/org/smartdata/metastore/dao/impl/DefaultBackUpInfoDao.java +++ b/smart-metastore/src/main/java/org/smartdata/metastore/dao/impl/DefaultBackUpInfoDao.java @@ -51,20 +51,20 @@ public int getCountByRid(int rid) { @Override public BackUpInfo getByRid(long rid) { return jdbcTemplate.queryForObject("SELECT * FROM backup_file WHERE rid = ?", - new Object[] {rid}, new BackUpInfoRowMapper()); + new Object[]{rid}, new BackUpInfoRowMapper()); } @Override public List getBySrc(String src) { return jdbcTemplate.query( - "SELECT * FROM backup_file WHERE src = ?", new Object[] {src}, + "SELECT * FROM backup_file WHERE src = ?", new Object[]{src}, new BackUpInfoRowMapper()); } @Override public List getByDest(String dest) { return jdbcTemplate.query( - "SELECT * FROM backup_file WHERE dest = ?", new Object[] {dest}, + "SELECT * FROM backup_file WHERE dest = ?", new Object[]{dest}, new BackUpInfoRowMapper()); } @@ -110,14 +110,13 @@ private static class BackUpInfoRowMapper implements RowMapper { @Override public BackUpInfo mapRow(ResultSet resultSet, int i) throws SQLException { - BackUpInfo backUpInfo = new BackUpInfo(); - backUpInfo.setRid(resultSet.getLong("rid")); - backUpInfo.setSrc(resultSet.getString("src")); - backUpInfo.setDest(resultSet.getString("dest")); - backUpInfo.setPeriod(resultSet.getLong("period")); - backUpInfo.setSrcPattern(resultSet.getString("src_pattern")); - - return backUpInfo; + return BackUpInfo.builder() + .rid(resultSet.getLong("rid")) + .src(resultSet.getString("src")) + .dest(resultSet.getString("dest")) + .period(resultSet.getLong("period")) + .srcPattern(resultSet.getString("src_pattern")) + .build(); } } } diff --git a/smart-metastore/src/main/java/org/smartdata/metastore/dao/impl/DefaultOzoneFileInfoDao.java b/smart-metastore/src/main/java/org/smartdata/metastore/dao/impl/DefaultOzoneFileInfoDao.java index a7c1f975a4..cdc6952136 100644 --- a/smart-metastore/src/main/java/org/smartdata/metastore/dao/impl/DefaultOzoneFileInfoDao.java +++ b/smart-metastore/src/main/java/org/smartdata/metastore/dao/impl/DefaultOzoneFileInfoDao.java @@ -27,8 +27,6 @@ import java.util.Map; public class DefaultOzoneFileInfoDao extends BaseFileInfoDao implements OzoneFileInfoDao { - private static final String TABLE_NAME = "ofile"; - private static final String FILE_ID_FIELD = "fid"; private static final String PATH_FIELD = "path"; private static final String LENGTH_FIELD = "length"; @@ -39,6 +37,7 @@ public class DefaultOzoneFileInfoDao extends BaseFileInfoDao implements OzoneFil private static final String IS_VOLUME_FIELD = "is_volume"; private static final String IS_BUCKET_FIELD = "is_bucket"; private static final String IS_S3_FIELD = "is_s3"; + private static final String IS_DIR_FIELD = "is_dir"; private static final String OWNER_FIELD = "owner"; private static final String OWNER_GROUP_FIELD = "owner_group"; private static final String PERMISSION_FIELD = "permission"; @@ -53,6 +52,11 @@ public void insert(OzoneFileInfo fileInfo) { insert(fileInfo, this::toMap); } + @Override + public void clear() { + jdbcTemplate.update("DELETE FROM ofile"); + } + @Override protected SimpleJdbcInsert simpleJdbcInsert() { return super.simpleJdbcInsert() @@ -70,6 +74,7 @@ private Map toMap(OzoneFileInfo fileInfo) { parameters.put(IS_VOLUME_FIELD, fileInfo.isVolume()); parameters.put(IS_BUCKET_FIELD, fileInfo.isBucket()); parameters.put(IS_S3_FIELD, fileInfo.isS3()); + parameters.put(IS_DIR_FIELD, fileInfo.isDir()); parameters.put(OWNER_FIELD, fileInfo.getOwner()); parameters.put(OWNER_GROUP_FIELD, fileInfo.getGroup()); parameters.put(PERMISSION_FIELD, fileInfo.getPermission()); diff --git a/smart-metastore/src/main/resources/db/changelog/changelog-12.add-ozone-file-table.xml b/smart-metastore/src/main/resources/db/changelog/changelog-12.add-ozone-file-table.xml index 51050b985c..b6b60597bd 100644 --- a/smart-metastore/src/main/resources/db/changelog/changelog-12.add-ozone-file-table.xml +++ b/smart-metastore/src/main/resources/db/changelog/changelog-12.add-ozone-file-table.xml @@ -26,6 +26,7 @@ + diff --git a/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestBackUpInfoDao.java b/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestBackUpInfoDao.java index effc4e7365..58c561e76b 100644 --- a/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestBackUpInfoDao.java +++ b/smart-metastore/src/test/java/org/smartdata/metastore/dao/TestBackUpInfoDao.java @@ -36,15 +36,16 @@ public void initBackUpInfoDao() { @Test public void testInsertAndGetSingleRecord() { - BackUpInfo backUpInfo = new BackUpInfo(); - backUpInfo.setRid(1); - backUpInfo.setPeriod(1); - backUpInfo.setDest(""); - backUpInfo.setSrc(""); - backUpInfo.setSrcPattern(""); - backUpInfoDao.insert(backUpInfo); + BackUpInfo backUpInfo = BackUpInfo.builder() + .rid(1) + .period(1) + .dest("") + .src("") + .srcPattern("") + .build(); - Assert.assertTrue(backUpInfoDao.getByRid(1).equals(backUpInfo)); + backUpInfoDao.insert(backUpInfo); + Assert.assertEquals(backUpInfo, backUpInfoDao.getByRid(1)); } @Test @@ -78,17 +79,22 @@ public void testBatchInsert() { @Test public void testUpdate() { - BackUpInfo backUpInfo = new BackUpInfo(); - backUpInfo.setRid(1); - backUpInfo.setSrc("test"); - backUpInfo.setDest("test"); - backUpInfo.setPeriod(1); - backUpInfo.setSrcPattern(""); + BackUpInfo backUpInfo = BackUpInfo.builder() + .rid(1) + .src("test") + .dest("test") + .period(1) + .srcPattern("") + .build(); backUpInfoDao.insert(backUpInfo); backUpInfoDao.update(1, 2); - backUpInfo.setPeriod(2); - Assert.assertTrue(backUpInfoDao.getByRid(1).equals(backUpInfo)); + + backUpInfo = backUpInfo.toBuilder() + .period(2) + .build(); + + Assert.assertEquals(backUpInfo, backUpInfoDao.getByRid(1)); } @Test diff --git a/smart-metastore/src/test/java/org/smartdata/metastore/dao/accesscount/TestAccessEventAggregator.java b/smart-metastore/src/test/java/org/smartdata/metastore/dao/accesscount/TestAccessEventAggregator.java index 58b59a1aa2..75159bc315 100644 --- a/smart-metastore/src/test/java/org/smartdata/metastore/dao/accesscount/TestAccessEventAggregator.java +++ b/smart-metastore/src/test/java/org/smartdata/metastore/dao/accesscount/TestAccessEventAggregator.java @@ -21,7 +21,7 @@ import org.junit.Test; import org.smartdata.metastore.TestDaoBase; import org.smartdata.metastore.accesscount.DbAccessEventAggregator; -import org.smartdata.metastore.accesscount.FileAccessManager; +import org.smartdata.metastore.accesscount.DbFileAccessCountManager; import org.smartdata.metastore.accesscount.failover.AccessCountContext; import org.smartdata.metastore.accesscount.failover.Failover; import org.smartdata.metastore.model.SearchResult; @@ -49,13 +49,13 @@ public class TestAccessEventAggregator extends TestDaoBase { "/file3", 3L, "/file4", 4L ); - private FileAccessManager dbTableManager; + private DbFileAccessCountManager dbTableManager; private DbAccessEventAggregator aggregator; @Before public void setup() { dbTableManager = - new FileAccessManager(new TransactionRunner(metaStore.transactionManager()), + new DbFileAccessCountManager(new TransactionRunner(metaStore.transactionManager()), metaStore.accessCountEventDao(), metaStore.cacheFileDao()); aggregator = diff --git a/smart-metastore/src/test/java/org/smartdata/metastore/dao/accesscount/TestFileAccessManager.java b/smart-metastore/src/test/java/org/smartdata/metastore/dao/accesscount/TestFileAccessCountManager.java similarity index 96% rename from smart-metastore/src/test/java/org/smartdata/metastore/dao/accesscount/TestFileAccessManager.java rename to smart-metastore/src/test/java/org/smartdata/metastore/dao/accesscount/TestFileAccessCountManager.java index 3e06129e04..902f2863a8 100644 --- a/smart-metastore/src/test/java/org/smartdata/metastore/dao/accesscount/TestFileAccessManager.java +++ b/smart-metastore/src/test/java/org/smartdata/metastore/dao/accesscount/TestFileAccessCountManager.java @@ -21,7 +21,7 @@ import org.junit.Before; import org.junit.Test; import org.smartdata.metastore.MetaStoreException; -import org.smartdata.metastore.accesscount.FileAccessManager; +import org.smartdata.metastore.accesscount.DbFileAccessCountManager; import org.smartdata.metastore.dao.Searchable; import org.smartdata.metastore.dao.TestSearchableDao; import org.smartdata.metastore.model.AggregatedAccessCounts; @@ -45,7 +45,7 @@ import static org.junit.Assert.assertEquals; -public class TestFileAccessManager extends +public class TestFileAccessCountManager extends TestSearchableDao { private static final List TEST_FILES = Arrays.asList( @@ -56,14 +56,14 @@ public class TestFileAccessManager extends "/file4", "/file5" ); - private FileAccessManager fileAccessManager; + private DbFileAccessCountManager fileAccessManager; @Before public void setUp() { FileAccessPartitionManager fileAccessPartitionManager = new FileAccessPartitionManagerImpl(metaStore); fileAccessPartitionManager.createNewPartitions(); - fileAccessManager = new FileAccessManager( + fileAccessManager = new DbFileAccessCountManager( new TransactionRunner(metaStore.transactionManager()), metaStore.accessCountEventDao(), metaStore.cacheFileDao()); diff --git a/smart-ozone-support/smart-ozone-common/src/main/java/org/smartdata/ozone/OzoneSmartConf.java b/smart-ozone-support/smart-ozone-common/src/main/java/org/smartdata/ozone/OzoneSmartConf.java index 7b17e74f74..b85503d20e 100644 --- a/smart-ozone-support/smart-ozone-common/src/main/java/org/smartdata/ozone/OzoneSmartConf.java +++ b/smart-ozone-support/smart-ozone-common/src/main/java/org/smartdata/ozone/OzoneSmartConf.java @@ -17,11 +17,17 @@ */ package org.smartdata.ozone; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ozone.RootedOzoneFileSystem; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import java.net.URI; +import java.util.Optional; + +import static org.apache.hadoop.fs.FileSystem.DEFAULT_FS; import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY; +import static org.smartdata.conf.SmartConfKeys.SMART_OZONE_RPC_SERVER_KEY; public class OzoneSmartConf extends OzoneConfiguration { public static final String OZONE_FETCH_BATCH_SIZE = "smart.ozone.event.fetch.batch.size"; @@ -30,16 +36,10 @@ public class OzoneSmartConf extends OzoneConfiguration { public static final String OZONE_SNAPSHOT_THREADS_COUNT = "smart.ozone.snapshot.threads.count"; public static final int OZONE_SNAPSHOT_THREADS_COUNT_DEFAULT = 16; - // todo: remove option after ADH-7056 will be completed - public static final String DEFAULT_OFS_ADDRESS = "smart.ozone.ofs.default"; - public OzoneSmartConf(Configuration conf) { super(conf); - // todo: move to appropriate place during ADH-7056 implementation - set(FS_DEFAULT_NAME_KEY, get(DEFAULT_OFS_ADDRESS)); set("fs.ofs.impl", RootedOzoneFileSystem.class.getName()); - loadSystemProperties(); } @@ -51,6 +51,18 @@ public int getSnapshotFetcherThreadsCount() { return getInt(OZONE_SNAPSHOT_THREADS_COUNT, OZONE_SNAPSHOT_THREADS_COUNT_DEFAULT); } + public URI getOzoneDefaultFsUri() { + return getDefaultFsIfSet() + .map(URI::create) + .orElseGet(() -> getOzoneDefaultFsFromRpc(this)); + } + + private Optional getDefaultFsIfSet() { + return Optional.ofNullable(get(FS_DEFAULT_NAME_KEY)) + .filter(StringUtils::isNoneBlank) + .filter(fs -> !fs.equals(DEFAULT_FS)); + } + private void loadSystemProperties() { for (String propertyName : getProps().stringPropertyNames()) { String systemPropertyValue = System.getProperty(propertyName); @@ -59,4 +71,26 @@ private void loadSystemProperties() { } } } + + private Optional getOzoneRpcAddress() { + return Optional.ofNullable(get("ozone.om.address")) + .map(url -> "ofs://" + url) + .map(URI::create); + } + + public static URI getOzoneDefaultFsUri(Configuration conf) { + return new OzoneSmartConf(conf).getOzoneDefaultFsUri(); + } + + public static URI getOzoneDefaultFsUriSetBySsm(Configuration conf) { + return Optional.ofNullable(conf.get(SMART_OZONE_RPC_SERVER_KEY)) + .map(URI::create) + .orElseGet(() -> getOzoneDefaultFsUri(conf)); + } + + private static URI getOzoneDefaultFsFromRpc(OzoneSmartConf ozoneConf) { + return Optional.ofNullable(ozoneConf) + .flatMap(OzoneSmartConf::getOzoneRpcAddress) + .orElseThrow(() -> new IllegalArgumentException("Ozone RPC address is not set")); + } } diff --git a/smart-ozone-support/smart-ozone-common/src/test/java/org/smartdata/ozone/OzoneClusterHarness.java b/smart-ozone-support/smart-ozone-common/src/test/java/org/smartdata/ozone/OzoneClusterHarness.java index 6f108cac09..8bad368047 100644 --- a/smart-ozone-support/smart-ozone-common/src/test/java/org/smartdata/ozone/OzoneClusterHarness.java +++ b/smart-ozone-support/smart-ozone-common/src/test/java/org/smartdata/ozone/OzoneClusterHarness.java @@ -35,7 +35,7 @@ import java.util.HashMap; import java.util.Optional; -import static org.smartdata.ozone.OzoneSmartConf.DEFAULT_OFS_ADDRESS; +import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY; public class OzoneClusterHarness { @@ -51,8 +51,8 @@ public class OzoneClusterHarness { @Before public void init() throws Exception { SmartConf smartConf = new SmartConf(); - smartConf.set(DEFAULT_OFS_ADDRESS, "ofs://" + ozoneContainer.getOmRpcAddress()); smartConf.set("ozone.om.address", ozoneContainer.getOmRpcAddress()); + smartConf.set(FS_DEFAULT_NAME_KEY, "ofs://" + ozoneContainer.getOmRpcAddress()); ozoneConf = new OzoneSmartConf(smartConf); ozoneClient = OzoneClientFactory.getRpcClient(ozoneConf).getObjectStore(); diff --git a/smart-ozone-support/smart-ozone-fs-client/src/test/java/org/apache/hadoop/fs/ozone/SmartOzoneClientAdapterTest.java b/smart-ozone-support/smart-ozone-fs-client/src/test/java/org/apache/hadoop/fs/ozone/SmartOzoneClientAdapterTest.java index a67732f920..f8e1a5f06b 100644 --- a/smart-ozone-support/smart-ozone-fs-client/src/test/java/org/apache/hadoop/fs/ozone/SmartOzoneClientAdapterTest.java +++ b/smart-ozone-support/smart-ozone-fs-client/src/test/java/org/apache/hadoop/fs/ozone/SmartOzoneClientAdapterTest.java @@ -27,7 +27,6 @@ import org.smartdata.protocol.SmartClientProtocol; import java.io.IOException; -import java.io.InputStream; import java.net.URI; import java.nio.charset.StandardCharsets; import java.util.ArrayList; diff --git a/smart-ozone-support/smart-ozone/pom.xml b/smart-ozone-support/smart-ozone/pom.xml index b1e6e86388..53606bd4f1 100644 --- a/smart-ozone-support/smart-ozone/pom.xml +++ b/smart-ozone-support/smart-ozone/pom.xml @@ -39,6 +39,21 @@ smart-ozone-common 2.2.0-SNAPSHOT + + org.smartdata + smart-rule + 2.2.0-SNAPSHOT + + + org.smartdata + smart-hadoop-action-common + 2.2.0-SNAPSHOT + + + org.smartdata + smart-ozone-fs-client + 2.2.0-SNAPSHOT + org.projectlombok lombok diff --git a/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/OzoneFetcherService.java b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/OzoneFetcherService.java index 8c3a4cd230..cc00bfa5d7 100644 --- a/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/OzoneFetcherService.java +++ b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/OzoneFetcherService.java @@ -19,6 +19,7 @@ import lombok.extern.slf4j.Slf4j; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.ozone.RootedOzoneFileSystem; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.smartdata.AbstractService; import org.smartdata.SmartContext; @@ -65,7 +66,7 @@ private OfsSnapshotFetcher buildSnapshotFetcher() throws IOException { ozoneSmartConf.getSnapshotFetcherThreadsCount()); return new OfsSnapshotFetcher( - FileSystem.get(ozoneSmartConf), + buildOzoneFileSystem(), OzoneClientFactory.getRpcClient(ozoneSmartConf).getObjectStore(), ozoneSmartConf, executorService, @@ -73,6 +74,14 @@ private OfsSnapshotFetcher buildSnapshotFetcher() throws IOException { ); } + private FileSystem buildOzoneFileSystem() throws IOException { + // create RootedOzoneFileSystem directly because we don't need + // a ssm client here + RootedOzoneFileSystem fileSystem = new RootedOzoneFileSystem(); + fileSystem.initialize(ozoneSmartConf.getOzoneDefaultFsUri(), ozoneSmartConf); + return fileSystem; + } + private FsObjectStreamHandler buildStreamHandler() { return new AsyncFsObjectStreamHandler( buildFsObjectHandler(), @@ -86,6 +95,8 @@ private FsObjectHandler buildFsObjectHandler() { @Override public void start() { + // todo clear the file table every time until ADH-7258 is resolved + ozoneFileInfoDao.clear(); BlockingQueue fsObjectStream = ofsSnapshotFetcher.runSnapshot(); eventStreamHandler.collectAsync(fsObjectStream); } diff --git a/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/OzoneFileInfoDao.java b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/OzoneFileInfoDao.java index 23c5699fdb..eb24be48b5 100644 --- a/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/OzoneFileInfoDao.java +++ b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/OzoneFileInfoDao.java @@ -21,5 +21,9 @@ import org.smartdata.ozone.model.OzoneFileInfo; public interface OzoneFileInfoDao extends GeneralFileInfoSource { + String TABLE_NAME = "ofile"; + void insert(OzoneFileInfo fileInfo); + + void clear(); } diff --git a/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/action/OzoneActionFactory.java b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/action/OzoneActionFactory.java new file mode 100644 index 0000000000..8aac6420a9 --- /dev/null +++ b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/action/OzoneActionFactory.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.ozone.action; + +import org.smartdata.action.AbstractActionFactory; +import org.smartdata.action.SmartAction; +import org.smartdata.action.SyncAction; +import org.smartdata.hdfs.action.Copy2S3Action; +import org.smartdata.hdfs.action.CopyDirectoryAction; +import org.smartdata.hdfs.action.CopyFileAction; +import org.smartdata.hdfs.action.DeleteFileAction; +import org.smartdata.hdfs.action.DistCpAction; +import org.smartdata.hdfs.action.ListFileAction; +import org.smartdata.hdfs.action.MetaDataAction; +import org.smartdata.hdfs.action.ReadFileAction; +import org.smartdata.hdfs.action.RenameFileAction; +import org.smartdata.hdfs.action.WriteFileAction; + +import java.util.Arrays; +import java.util.List; + +public class OzoneActionFactory extends AbstractActionFactory { + + @Override + protected List> supportedActionClasses() { + return Arrays.asList( + CopyFileAction.class, + Copy2S3Action.class, + DeleteFileAction.class, + RenameFileAction.class, + ListFileAction.class, + ReadFileAction.class, + WriteFileAction.class, + SyncAction.class, + DistCpAction.class, + CopyDirectoryAction.class, + MetaDataAction.class + ); + } +} diff --git a/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/action/OzoneCmdletFactoryPlugin.java b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/action/OzoneCmdletFactoryPlugin.java new file mode 100644 index 0000000000..5eadaabd39 --- /dev/null +++ b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/action/OzoneCmdletFactoryPlugin.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.ozone.action; + +import lombok.extern.slf4j.Slf4j; +import org.apache.hadoop.fs.FileSystem; +import org.smartdata.conf.SmartConf; +import org.smartdata.conf.SmartFsType; +import org.smartdata.hdfs.HadoopCmdletFactoryPlugin; +import org.smartdata.hdfs.impersonation.UserImpersonationStrategy; +import org.smartdata.ozone.client.CachingOfsProvider; + +@Slf4j +public class OzoneCmdletFactoryPlugin extends HadoopCmdletFactoryPlugin { + + public OzoneCmdletFactoryPlugin(SmartConf conf, + UserImpersonationStrategy userImpersonationStrategy) { + super(conf, new CachingOfsProvider(conf, userImpersonationStrategy)); + } + + @Override + protected SmartFsType supportedFsType() { + return SmartFsType.OZONE; + } +} diff --git a/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/client/CachingOfsProvider.java b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/client/CachingOfsProvider.java new file mode 100644 index 0000000000..31878abbfb --- /dev/null +++ b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/client/CachingOfsProvider.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.ozone.client; + +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.ozone.RootedOzoneFileSystem; +import org.apache.hadoop.fs.ozone.SmartRootedOzoneFileSystem; +import org.smartdata.hdfs.action.HadoopAction; +import org.smartdata.hdfs.client.BaseFileSystemCache; +import org.smartdata.hdfs.client.FileSystemCache; +import org.smartdata.hdfs.client.LocalFileSystemProvider; +import org.smartdata.hdfs.impersonation.UserImpersonationStrategy; + +import java.io.IOException; +import java.net.URI; +import java.time.Duration; + +import static org.smartdata.conf.SmartConfKeys.SMART_CLIENT_CONCURRENT_REPORT_ENABLED; +import static org.smartdata.ozone.OzoneSmartConf.getOzoneDefaultFsUriSetBySsm; +import static org.smartdata.utils.ConfigUtil.getSsmMasterRpcAddress; + +@Slf4j +@RequiredArgsConstructor +public class CachingOfsProvider implements LocalFileSystemProvider { + + private final FileSystemCache smartFsCache; + private final FileSystemCache defaultFsCache; + + public CachingOfsProvider( + Configuration config, + UserImpersonationStrategy userImpersonationStrategy) { + Duration cacheKeyTtlDuration = FileSystemCache.getCacheTtl(config); + this.smartFsCache = new SmartFileSystemCache(userImpersonationStrategy, cacheKeyTtlDuration); + this.defaultFsCache = new DefaultFileSystemCache(userImpersonationStrategy, cacheKeyTtlDuration); + } + + @Override + public FileSystem provide(Configuration config, String user, HadoopAction.FsType fsType) + throws IOException { + return fsType == HadoopAction.FsType.SMART + ? smartFsCache.get(config, user, getSsmMasterRpcAddress(config)) + : defaultFsCache.get(config, user, null); + } + + @Override + public void close() throws IOException { + smartFsCache.close(); + defaultFsCache.close(); + } + + private static class SmartFileSystemCache extends BaseFileSystemCache { + + private SmartFileSystemCache( + UserImpersonationStrategy userImpersonationStrategy, + Duration keyTtl) { + super(userImpersonationStrategy, keyTtl); + } + + @Override + protected URI getServiceUri(Configuration config) { + return getOzoneDefaultFsUriSetBySsm(config); + } + + @Override + protected SmartRootedOzoneFileSystem createFileSystem(Configuration config, CacheKey cacheKey) { + try { + Configuration fsConfig = new Configuration(config); + // a smart server always has only 1 address set + // in the "smart.server.rpc.address" option + fsConfig.setBoolean(SMART_CLIENT_CONCURRENT_REPORT_ENABLED, false); + SmartRootedOzoneFileSystem fileSystem = new SmartRootedOzoneFileSystem(); + fileSystem.initialize(cacheKey.getServiceUri(), fsConfig); + return fileSystem; + } catch (IOException exception) { + throw new RuntimeException("Error creating smart file system", exception); + } + } + } + + private static class DefaultFileSystemCache extends BaseFileSystemCache { + private DefaultFileSystemCache( + UserImpersonationStrategy userImpersonationStrategy, + Duration keyTtl) { + super(userImpersonationStrategy, keyTtl); + } + + @Override + protected URI getServiceUri(Configuration config) { + return getOzoneDefaultFsUriSetBySsm(config); + } + + @Override + protected FileSystem createFileSystem(Configuration config, CacheKey cacheKey) { + try { + RootedOzoneFileSystem fileSystem = new RootedOzoneFileSystem(); + fileSystem.initialize(cacheKey.getServiceUri(), config); + return fileSystem; + } catch (IOException exception) { + throw new RuntimeException("Error creating default file system", exception); + } + } + } +} diff --git a/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/handler/DbFsObjectHandler.java b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/handler/DbFsObjectHandler.java index 698dbc20fa..128e81c9e2 100644 --- a/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/handler/DbFsObjectHandler.java +++ b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/handler/DbFsObjectHandler.java @@ -29,7 +29,7 @@ public class DbFsObjectHandler implements FsObjectHandler { private final OzoneFileInfoDao ozoneFileInfoDao; @Override - public void handle(FsObjectStreamRecord record) throws Exception { + public void handle(FsObjectStreamRecord record) { if (record instanceof OzoneFileInfo) { ozoneFileInfoDao.insert((OzoneFileInfo) record); } else { diff --git a/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/model/OzoneFileInfo.java b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/model/OzoneFileInfo.java index f176cb6edd..562840745b 100644 --- a/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/model/OzoneFileInfo.java +++ b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/model/OzoneFileInfo.java @@ -20,11 +20,12 @@ import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Data; +import org.smartdata.model.BaseFileInfo; @Data @AllArgsConstructor @Builder(toBuilder = true) -public class OzoneFileInfo implements FsObjectStreamRecord { +public class OzoneFileInfo implements FsObjectStreamRecord, BaseFileInfo { private String path; private long fileId; private long length; @@ -35,6 +36,7 @@ public class OzoneFileInfo implements FsObjectStreamRecord { private boolean isVolume; private boolean isBucket; private boolean isS3; + private boolean isDir; private String owner; private String group; private short permission; diff --git a/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/rule/OzoneFileObject.java b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/rule/OzoneFileObject.java new file mode 100644 index 0000000000..dd8ae4e7c6 --- /dev/null +++ b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/rule/OzoneFileObject.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.ozone.rule; + +import com.google.common.collect.ImmutableMap; +import org.smartdata.ozone.OzoneFileInfoDao; +import org.smartdata.rule.objects.ObjectType; +import org.smartdata.rule.objects.Property; +import org.smartdata.rule.objects.SmartObject; +import org.smartdata.rule.parser.ValueType; + +import java.util.Arrays; +import java.util.Collections; +import java.util.Map; + +public class OzoneFileObject extends SmartObject { + + private static final Map PROPERTIES = + ImmutableMap.builder() + .put("path", + new Property("path", ValueType.STRING, + null, OzoneFileInfoDao.TABLE_NAME, "path")) + .put("length", + new Property("length", ValueType.LONG, + null, OzoneFileInfoDao.TABLE_NAME, "length")) + .put("blocksize", + new Property("blocksize", ValueType.LONG, + null, OzoneFileInfoDao.TABLE_NAME, "block_size")) + .put("age", + new Property("age", ValueType.TIMEINTVAL, + null, OzoneFileInfoDao.TABLE_NAME, null, + "($NOW - modification_time)")) + .put("mtime", + new Property("mtime", ValueType.TIMEPOINT, + null, OzoneFileInfoDao.TABLE_NAME, "modification_time")) + .put("atime", + new Property("atime", ValueType.TIMEPOINT, + null, OzoneFileInfoDao.TABLE_NAME, "access_time")) + .put("isDir", + new Property("isDir", ValueType.BOOLEAN, + null, OzoneFileInfoDao.TABLE_NAME, "is_dir")) + .put("unsynced", + new Property("unsynced", ValueType.BOOLEAN, + null, "file_diff", null, + "state = 0")) + .put("accessCount", + new Property("accessCount", ValueType.LONG, + Collections.singletonList(ValueType.TIMEINTVAL), + "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")) + .put("ac", + new Property("ac", ValueType.LONG, + Collections.singletonList(ValueType.TIMEINTVAL), + "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")) + .put("accessCountTop", + new Property("accessCountTop", ValueType.LONG, + Arrays.asList(ValueType.TIMEINTVAL, ValueType.LONG), + "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")) + .put("acTop", + new Property("acTop", ValueType.LONG, + Arrays.asList(ValueType.TIMEINTVAL, ValueType.LONG), + "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")) + .put("accessCountBottom", + new Property("accessCountBottom", ValueType.LONG, + Arrays.asList(ValueType.TIMEINTVAL, ValueType.LONG), + "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")) + .put("acBot", + new Property("acBot", ValueType.LONG, + Arrays.asList(ValueType.TIMEINTVAL, ValueType.LONG), + "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")) + .build(); + + public OzoneFileObject() { + super(ObjectType.FILE, PROPERTIES, OzoneFileInfoDao.TABLE_NAME); + } +} \ No newline at end of file diff --git a/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/rule/OzoneSmartObjectSupplier.java b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/rule/OzoneSmartObjectSupplier.java new file mode 100644 index 0000000000..76a34ae5ba --- /dev/null +++ b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/rule/OzoneSmartObjectSupplier.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.ozone.rule; + +import com.google.common.collect.ImmutableMap; +import org.smartdata.rule.objects.HmsObject; +import org.smartdata.rule.objects.SmartObject; +import org.smartdata.rule.objects.StaticMapSmartObjectSupplier; + +import java.util.Map; + +public class OzoneSmartObjectSupplier extends StaticMapSmartObjectSupplier { + private final static Map SUPPORTED_OBJECTS = ImmutableMap.of( + "file", new OzoneFileObject(), + "hms", new HmsObject() + ); + + public OzoneSmartObjectSupplier() { + super(SUPPORTED_OBJECTS); + } +} diff --git a/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/snapshot/OfsSnapshotFetcher.java b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/snapshot/OfsSnapshotFetcher.java index 42f12930df..f374fc951f 100644 --- a/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/snapshot/OfsSnapshotFetcher.java +++ b/smart-ozone-support/smart-ozone/src/main/java/org/smartdata/ozone/snapshot/OfsSnapshotFetcher.java @@ -149,6 +149,7 @@ private CompletableFuture handleVolume(OzoneVolume volume) { private CompletableFuture handleVolume(FileStatus fileStatus) { OzoneFileInfo.Builder fileBuilder = OzoneFileInfo.builder() .path(pathWithoutAuthority(fileStatus.getPath())) + .isDir(true) .isVolume(true); return saveFile(fileStatus, fileBuilder) .thenComposeAsync(ignore -> executeInParallel( @@ -159,6 +160,7 @@ private CompletableFuture handleVolume(FileStatus fileStatus) { private CompletableFuture handleBucket(FileStatus fileStatus) { OzoneFileInfo.Builder fileBuilder = OzoneFileInfo.builder() .path(pathWithoutAuthority(fileStatus.getPath())) + .isDir(true) .isBucket(true); return saveFile(fileStatus, fileBuilder) .thenComposeAsync(ignore -> createBucketSnapshot(fileStatus), executor) @@ -172,6 +174,7 @@ private CompletableFuture handleBucket(FileStatus fileStatus) { private CompletableFuture handleKey(FileStatus fileStatus) { Path filePath = getOriginalFilePath(fileStatus.getPath()); OzoneFileInfo.Builder fileBuilder = OzoneFileInfo.builder() + .isDir(fileStatus.isDirectory()) .path(pathWithoutAuthority(filePath)); return saveFile(fileStatus, fileBuilder) .thenComposeAsync(ignore -> handleChildrenIfDirectory(fileStatus), executor); diff --git a/smart-rule/src/main/java/org/smartdata/rule/objects/DefaultSmartObjectSupplier.java b/smart-rule/src/main/java/org/smartdata/rule/objects/DefaultSmartObjectSupplier.java new file mode 100644 index 0000000000..3660a69c3a --- /dev/null +++ b/smart-rule/src/main/java/org/smartdata/rule/objects/DefaultSmartObjectSupplier.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.rule.objects; + +import com.google.common.collect.ImmutableMap; + +import java.util.Map; + +public class DefaultSmartObjectSupplier extends StaticMapSmartObjectSupplier { + private static final Map SUPPORTED_OBJECTS = ImmutableMap.of( + "file", new FileObject(), + "hms", new HmsObject(), + "storage", new StorageObject() + ); + + public DefaultSmartObjectSupplier() { + super(SUPPORTED_OBJECTS); + } +} diff --git a/smart-rule/src/main/java/org/smartdata/rule/objects/FileObject.java b/smart-rule/src/main/java/org/smartdata/rule/objects/FileObject.java index e066943667..01976a6ca6 100644 --- a/smart-rule/src/main/java/org/smartdata/rule/objects/FileObject.java +++ b/smart-rule/src/main/java/org/smartdata/rule/objects/FileObject.java @@ -18,11 +18,11 @@ package org.smartdata.rule.objects; +import com.google.common.collect.ImmutableMap; import org.smartdata.rule.parser.ValueType; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.Map; /** @@ -30,85 +30,79 @@ */ public class FileObject extends SmartObject { - public static final Map PROPERTIES; - - static { - PROPERTIES = new HashMap<>(); - PROPERTIES.put("path", - new Property("path", ValueType.STRING, null, "file", "path")); - PROPERTIES.put("accessCount", - new Property("accessCount", ValueType.LONG, - Collections.singletonList(ValueType.TIMEINTVAL), - "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")); - PROPERTIES.put("ac", - new Property("ac", ValueType.LONG, - Collections.singletonList(ValueType.TIMEINTVAL), - "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")); - PROPERTIES.put("accessCountTop", - new Property("accessCountTop", ValueType.LONG, - Arrays.asList(ValueType.TIMEINTVAL, ValueType.LONG), - "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")); - PROPERTIES.put("acTop", - new Property("acTop", ValueType.LONG, - Arrays.asList(ValueType.TIMEINTVAL, ValueType.LONG), - "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")); - PROPERTIES.put("accessCountBottom", - new Property("accessCountBottom", ValueType.LONG, - Arrays.asList(ValueType.TIMEINTVAL, ValueType.LONG), - "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")); - PROPERTIES.put("acBot", - new Property("acBot", ValueType.LONG, - Arrays.asList(ValueType.TIMEINTVAL, ValueType.LONG), - "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")); - PROPERTIES.put("acTopSp", - new Property("acTopSp", ValueType.LONG, - Arrays.asList(ValueType.TIMEINTVAL, ValueType.LONG, ValueType.STRING), - "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")); - PROPERTIES.put("acBotSp", - new Property("acBotSp", ValueType.LONG, - Arrays.asList(ValueType.TIMEINTVAL, ValueType.LONG, ValueType.STRING), - "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")); - PROPERTIES.put("length", - new Property("length", ValueType.LONG, - null, "file", "length")); - PROPERTIES.put("blocksize", - new Property("blocksize", ValueType.LONG, - null, "file", "block_size")); - PROPERTIES.put("inCache", - new Property("inCache", ValueType.BOOLEAN, - null, "cached_file", null)); - PROPERTIES.put("age", - new Property("age", ValueType.TIMEINTVAL, - null, "file", null, - "($NOW - modification_time)")); - PROPERTIES.put("mtime", - new Property("mtime", ValueType.TIMEPOINT, - null, "file", "modification_time")); - PROPERTIES.put("atime", - new Property("atime", ValueType.TIMEPOINT, - null, "file", "access_time")); - PROPERTIES.put("storagePolicy", - new Property("storagePolicy", ValueType.STRING, - null, "file", null, - "(SELECT policy_name FROM storage_policy WHERE sid = file.sid)")); - PROPERTIES.put("unsynced", - new Property("unsynced", ValueType.BOOLEAN, - null, "file_diff", null, - "state = 0")); - PROPERTIES.put("isDir", - new Property("isDir", ValueType.BOOLEAN, - null, "file", "is_dir")); - PROPERTIES.put("ecPolicy", - new Property("ecPolicy", ValueType.STRING, - null, "file", null, - "(SELECT policy_name FROM ec_policy WHERE id = file.ec_policy_id)")); - } + private static final Map PROPERTIES = + ImmutableMap.builder() + .put("path", + new Property("path", ValueType.STRING, null, "file", "path")) + .put("accessCount", + new Property("accessCount", ValueType.LONG, + Collections.singletonList(ValueType.TIMEINTVAL), + "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")) + .put("ac", + new Property("ac", ValueType.LONG, + Collections.singletonList(ValueType.TIMEINTVAL), + "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")) + .put("accessCountTop", + new Property("accessCountTop", ValueType.LONG, + Arrays.asList(ValueType.TIMEINTVAL, ValueType.LONG), + "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")) + .put("acTop", + new Property("acTop", ValueType.LONG, + Arrays.asList(ValueType.TIMEINTVAL, ValueType.LONG), + "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")) + .put("accessCountBottom", + new Property("accessCountBottom", ValueType.LONG, + Arrays.asList(ValueType.TIMEINTVAL, ValueType.LONG), + "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")) + .put("acBot", + new Property("acBot", ValueType.LONG, + Arrays.asList(ValueType.TIMEINTVAL, ValueType.LONG), + "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")) + .put("acTopSp", + new Property("acTopSp", ValueType.LONG, + Arrays.asList(ValueType.TIMEINTVAL, ValueType.LONG, ValueType.STRING), + "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")) + .put("acBotSp", + new Property("acBotSp", ValueType.LONG, + Arrays.asList(ValueType.TIMEINTVAL, ValueType.LONG, ValueType.STRING), + "VIRTUAL_ACCESS_COUNT_TABLE", "", "count")) + .put("length", + new Property("length", ValueType.LONG, + null, "file", "length")) + .put("blocksize", + new Property("blocksize", ValueType.LONG, + null, "file", "block_size")) + .put("inCache", + new Property("inCache", ValueType.BOOLEAN, + null, "cached_file", null)) + .put("age", + new Property("age", ValueType.TIMEINTVAL, + null, "file", null, + "($NOW - modification_time)")) + .put("mtime", + new Property("mtime", ValueType.TIMEPOINT, + null, "file", "modification_time")) + .put("atime", + new Property("atime", ValueType.TIMEPOINT, + null, "file", "access_time")) + .put("storagePolicy", + new Property("storagePolicy", ValueType.STRING, + null, "file", null, + "(SELECT policy_name FROM storage_policy WHERE sid = file.sid)")) + .put("unsynced", + new Property("unsynced", ValueType.BOOLEAN, + null, "file_diff", null, + "state = 0")) + .put("isDir", + new Property("isDir", ValueType.BOOLEAN, + null, "file", "is_dir")) + .put("ecPolicy", + new Property("ecPolicy", ValueType.STRING, + null, "file", null, + "(SELECT policy_name FROM ec_policy WHERE id = file.ec_policy_id)")) + .build(); public FileObject() { - super(ObjectType.FILE, "file"); - } - - public Map getProperties() { - return PROPERTIES; + super(ObjectType.FILE, PROPERTIES, "file"); } } diff --git a/smart-rule/src/main/java/org/smartdata/rule/objects/HmsObject.java b/smart-rule/src/main/java/org/smartdata/rule/objects/HmsObject.java index 2c99e69f63..f7ccc1c967 100644 --- a/smart-rule/src/main/java/org/smartdata/rule/objects/HmsObject.java +++ b/smart-rule/src/main/java/org/smartdata/rule/objects/HmsObject.java @@ -18,10 +18,10 @@ package org.smartdata.rule.objects; +import com.google.common.collect.ImmutableMap; import org.smartdata.rule.parser.ValueType; import java.util.Collections; -import java.util.HashMap; import java.util.Map; /** @@ -29,27 +29,19 @@ */ public class HmsObject extends SmartObject { - public static final Map PROPERTIES; - - static { - PROPERTIES = new HashMap<>(); - PROPERTIES.put("name", - new Property( - "name", - ValueType.STRING, - Collections.singletonList(ValueType.STRING), - "hive_metastore_event", - "entity_name", - "(entity_name LIKE SUBSTRING($0 FROM 1 FOR " - + "NULLIF(POSITION('.' IN $0), 0) - 1)) or entity_name", - true)); - } + private static final Map PROPERTIES = ImmutableMap.of( + "name", new Property( + "name", + ValueType.STRING, + Collections.singletonList(ValueType.STRING), + "hive_metastore_event", + "entity_name", + "(entity_name LIKE SUBSTRING($0 FROM 1 FOR " + + "NULLIF(POSITION('.' IN $0), 0) - 1)) or entity_name", + true) + ); public HmsObject() { - super(ObjectType.HMS, "hive_metastore_event"); - } - - public Map getProperties() { - return PROPERTIES; + super(ObjectType.HMS, PROPERTIES, "hive_metastore_event"); } } diff --git a/smart-rule/src/main/java/org/smartdata/rule/objects/SmartObject.java b/smart-rule/src/main/java/org/smartdata/rule/objects/SmartObject.java index 9408989668..b291c57583 100644 --- a/smart-rule/src/main/java/org/smartdata/rule/objects/SmartObject.java +++ b/smart-rule/src/main/java/org/smartdata/rule/objects/SmartObject.java @@ -30,25 +30,10 @@ public abstract class SmartObject { private final ObjectType type; + private final Map properties; private final String baseTableName; - public static SmartObject getInstance(String typeName) { - // TODO: create through class name - switch (typeName) { - case "file": - return new FileObject(); - case "storage": - return new StorageObject(); - case "hms": - return new HmsObject(); - default: - return null; - } - } - public Property getProperty(String propertyName) { - return getProperties().get(propertyName); + return properties.get(propertyName); } - - public abstract Map getProperties(); } diff --git a/smart-rule/src/main/java/org/smartdata/rule/objects/SmartObjectSupplier.java b/smart-rule/src/main/java/org/smartdata/rule/objects/SmartObjectSupplier.java new file mode 100644 index 0000000000..b4883076bd --- /dev/null +++ b/smart-rule/src/main/java/org/smartdata/rule/objects/SmartObjectSupplier.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.rule.objects; + +public interface SmartObjectSupplier { + SmartObject get(String name); +} diff --git a/smart-common/src/main/java/org/smartdata/conf/ReconfigurableBase.java b/smart-rule/src/main/java/org/smartdata/rule/objects/StaticMapSmartObjectSupplier.java similarity index 67% rename from smart-common/src/main/java/org/smartdata/conf/ReconfigurableBase.java rename to smart-rule/src/main/java/org/smartdata/rule/objects/StaticMapSmartObjectSupplier.java index 4c87f67376..95cbc966a5 100644 --- a/smart-common/src/main/java/org/smartdata/conf/ReconfigurableBase.java +++ b/smart-rule/src/main/java/org/smartdata/rule/objects/StaticMapSmartObjectSupplier.java @@ -15,19 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.smartdata.conf; +package org.smartdata.rule.objects; -import java.util.List; +import lombok.RequiredArgsConstructor; -/** - * This class helps to register all the properties of subclasses. - */ -public abstract class ReconfigurableBase implements Reconfigurable { +import java.util.Map; + +@RequiredArgsConstructor +public class StaticMapSmartObjectSupplier implements SmartObjectSupplier { + private final Map objects; - public ReconfigurableBase() { - List properties = getReconfigurableProperties(); - if (properties != null) { - ReconfigurableRegistry.registReconfigurableProperty(properties, this); - } + @Override + public SmartObject get(String name) { + return objects.get(name); } } diff --git a/smart-rule/src/main/java/org/smartdata/rule/objects/StorageObject.java b/smart-rule/src/main/java/org/smartdata/rule/objects/StorageObject.java index f2d8f28764..a1e6197d40 100644 --- a/smart-rule/src/main/java/org/smartdata/rule/objects/StorageObject.java +++ b/smart-rule/src/main/java/org/smartdata/rule/objects/StorageObject.java @@ -17,36 +17,32 @@ */ package org.smartdata.rule.objects; +import com.google.common.collect.ImmutableMap; import org.smartdata.rule.parser.ValueType; import java.util.Collections; -import java.util.HashMap; import java.util.Map; /** * Definition of rule object 'Storage'. */ public class StorageObject extends SmartObject { - public static final Map PROPERTIES; - - static { - PROPERTIES = new HashMap<>(); - PROPERTIES.put("capacity", new Property("capacity", ValueType.LONG, - Collections.singletonList(ValueType.STRING), "storage", "capacity", - "type = $0 AND capacity")); - PROPERTIES.put("free", new Property("free", ValueType.LONG, - Collections.singletonList(ValueType.STRING), "storage", "free", - "type = $0 AND free")); - PROPERTIES.put("utilization", new Property("utilization", ValueType.LONG, - Collections.singletonList(ValueType.STRING), "storage", "free", - "type = $0 AND (capacity - free) * 100.0 / capacity")); - } + public static final Map PROPERTIES = ImmutableMap.of( + "capacity", + new Property("capacity", ValueType.LONG, + Collections.singletonList(ValueType.STRING), "storage", "capacity", + "type = $0 AND capacity"), + "free", + new Property("free", ValueType.LONG, + Collections.singletonList(ValueType.STRING), "storage", "free", + "type = $0 AND free"), + "utilization", + new Property("utilization", ValueType.LONG, + Collections.singletonList(ValueType.STRING), "storage", "free", + "type = $0 AND (capacity - free) * 100.0 / capacity") + ); public StorageObject() { - super(ObjectType.STORAGE, "storage"); - } - - public Map getProperties() { - return PROPERTIES; + super(ObjectType.STORAGE, PROPERTIES, "storage"); } } diff --git a/smart-rule/src/main/java/org/smartdata/rule/parser/SmartRuleStringParser.java b/smart-rule/src/main/java/org/smartdata/rule/parser/SmartRuleStringParser.java index fc29460bff..01b0ca296c 100644 --- a/smart-rule/src/main/java/org/smartdata/rule/parser/SmartRuleStringParser.java +++ b/smart-rule/src/main/java/org/smartdata/rule/parser/SmartRuleStringParser.java @@ -17,6 +17,7 @@ */ package org.smartdata.rule.parser; +import com.google.common.collect.ImmutableMap; import org.antlr.v4.runtime.ANTLRInputStream; import org.antlr.v4.runtime.BaseErrorListener; import org.antlr.v4.runtime.CommonTokenStream; @@ -29,61 +30,48 @@ import org.smartdata.exception.SsmParseException; import org.smartdata.model.CmdletDescriptor; import org.smartdata.model.rule.RuleTranslationResult; +import org.smartdata.rule.objects.SmartObjectSupplier; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; /** Parser a rule string and translate it. */ public class SmartRuleStringParser { - private String rule; - private TranslationContext ctx = null; - private SmartConf conf; - - private static Map optCond = new HashMap<>(); - - static { - optCond.put("allssd", "storagePolicy != \"ALL_SSD\""); - optCond.put("onessd", "storagePolicy != \"ONE_SSD\""); - optCond.put("archive", "storagePolicy != \"COLD\""); - optCond.put("alldisk", "storagePolicy != \"HOT\""); - optCond.put("onedisk", "storagePolicy != \"WARM\""); - optCond.put("ramdisk", "storagePolicy != \"LAZY_PERSIST\""); - optCond.put("cache", "not inCache"); - optCond.put("uncache", "inCache"); - optCond.put("sync", "unsynced"); - optCond.put("ec", "1"); - optCond.put("unec", "1"); - } + private static final Map CONDITION_REWRITES = + ImmutableMap.builder() + .put("allssd", "storagePolicy != \"ALL_SSD\"") + .put("onessd", "storagePolicy != \"ONE_SSD\"") + .put("archive", "storagePolicy != \"COLD\"") + .put("alldisk", "storagePolicy != \"HOT\"") + .put("onedisk", "storagePolicy != \"WARM\"") + .put("ramdisk", "storagePolicy != \"LAZY_PERSIST\"") + .put("cache", "not inCache") + .put("uncache", "inCache") + .put("sync", "unsynced") + .put("ec", "1") + .put("unec", "1") + .build(); - List parseErrors = new ArrayList(); - String parserErrorMessage = ""; - public class SSMRuleErrorListener extends BaseErrorListener { - @Override - public void syntaxError( - Recognizer recognizer, - Object offendingSymbol, - int line, - int charPositionInLine, - String msg, - RecognitionException e) { - List stack = ((Parser) recognizer).getRuleInvocationStack(); - Collections.reverse(stack); - parserErrorMessage += "Line " + line + ", Char " + charPositionInLine + " : " + msg + "\n"; - parseErrors.add(e); - } - } + private final String rule; + private final SmartConf conf; + private final TranslationContext ctx; + private final SmartObjectSupplier smartObjectSupplier; + private final List parseErrors = new ArrayList<>(); + + private String parserErrorMessage = ""; - public SmartRuleStringParser(String rule, TranslationContext ctx, SmartConf conf) { + public SmartRuleStringParser(String rule, TranslationContext ctx, + SmartObjectSupplier smartObjectSupplier, SmartConf conf) { this.rule = rule; this.ctx = ctx; this.conf = conf; + this.smartObjectSupplier = smartObjectSupplier; } public RuleTranslationResult translate() throws IOException { @@ -93,11 +81,11 @@ public RuleTranslationResult translate() throws IOException { throw new IOException("No cmdlet specified in Rule"); } String actName = cmdDes.getActionName(0); - if (cmdDes.getActionSize() != 1 || optCond.get(actName) == null) { + if (cmdDes.getActionSize() != 1 || CONDITION_REWRITES.get(actName) == null) { return tr; } - String repl = optCond.get(actName); + String repl = CONDITION_REWRITES.get(actName); if (cmdDes.getActionName(0).equals("ec") || cmdDes.getActionName(0).equals("unec")) { String policy; if (cmdDes.getActionName(0).equals("ec")) { @@ -134,13 +122,30 @@ private RuleTranslationResult doTranslate(String rule) throws IOException { throw new SsmParseException(parserErrorMessage); } - SmartRuleVisitTranslator visitor = new SmartRuleVisitTranslator(ctx); + SmartRuleVisitTranslator visitor = new SmartRuleVisitTranslator(ctx, smartObjectSupplier); try { visitor.visit(tree); } catch (RuntimeException e) { - throw new SsmParseException(e.getMessage()); + throw new SsmParseException(e.getMessage(), e); } return visitor.generateSql(); } + + public class SSMRuleErrorListener extends BaseErrorListener { + @Override + public void syntaxError( + Recognizer recognizer, + Object offendingSymbol, + int line, + int charPositionInLine, + String msg, + RecognitionException e) { + List stack = ((Parser) recognizer).getRuleInvocationStack(); + Collections.reverse(stack); + parserErrorMessage += "Line " + line + ", Char " + charPositionInLine + " : " + msg + "\n"; + parseErrors.add(e); + } + } } + diff --git a/smart-rule/src/main/java/org/smartdata/rule/parser/SmartRuleVisitTranslator.java b/smart-rule/src/main/java/org/smartdata/rule/parser/SmartRuleVisitTranslator.java index b1ae26679e..393c0da3a3 100644 --- a/smart-rule/src/main/java/org/smartdata/rule/parser/SmartRuleVisitTranslator.java +++ b/smart-rule/src/main/java/org/smartdata/rule/parser/SmartRuleVisitTranslator.java @@ -31,6 +31,7 @@ import org.smartdata.rule.objects.Property; import org.smartdata.rule.objects.PropertyRealParas; import org.smartdata.rule.objects.SmartObject; +import org.smartdata.rule.objects.SmartObjectSupplier; import org.smartdata.rule.parser.SmartRuleParser.TimeintvalexprContext; import org.smartdata.utils.StringUtil; @@ -58,6 +59,7 @@ public class SmartRuleVisitTranslator extends SmartRuleBaseVisitor { private final TranslationContext transCtx; private final CmdletParser cmdletParser; + private final SmartObjectSupplier smartObjectSupplier; private TreeNode objFilter; private TreeNode conditions; @@ -67,12 +69,14 @@ public class SmartRuleVisitTranslator extends SmartRuleBaseVisitor { private int[] condPosition; private long minTimeInterval; - public SmartRuleVisitTranslator() { - this(null); + public SmartRuleVisitTranslator(SmartObjectSupplier smartObjectSupplier) { + this(null, smartObjectSupplier); } - public SmartRuleVisitTranslator(TranslationContext transCtx) { + public SmartRuleVisitTranslator(TranslationContext transCtx, + SmartObjectSupplier smartObjectSupplier) { this.transCtx = transCtx; + this.smartObjectSupplier = smartObjectSupplier; this.objects = new HashMap<>(); this.pathCheckGlob = new ArrayList<>(); this.minTimeInterval = Long.MAX_VALUE; @@ -82,18 +86,18 @@ public SmartRuleVisitTranslator(TranslationContext transCtx) { @Override public TreeNode visitObjTypeOnly(SmartRuleParser.ObjTypeOnlyContext ctx) { String objName = ctx.OBJECTTYPE().getText(); - SmartObject obj = SmartObject.getInstance(objName); - objects.put(objName, obj); - objects.put("Default", obj); + SmartObject object = smartObjectSupplier.get(objName); + objects.put(objName, object); + objects.put("Default", object); return null; } @Override public TreeNode visitObjTypeWith(SmartRuleParser.ObjTypeWithContext ctx) { String objName = ctx.OBJECTTYPE().getText(); - SmartObject obj = SmartObject.getInstance(objName); - objects.put(objName, obj); - objects.put("Default", obj); + SmartObject object = smartObjectSupplier.get(objName); + objects.put(objName, object); + objects.put("Default", object); objFilter = visit(ctx.objfilter()); return null; } @@ -270,12 +274,7 @@ public TreeNode visitTieTiIdExpr(SmartRuleParser.TieTiIdExprContext ctx) { } private SmartObject createIfNotExist(String objName) { - SmartObject obj = objects.get(objName); - if (obj == null) { - obj = SmartObject.getInstance(objName); - objects.put(objName, obj); - } - return obj; + return objects.computeIfAbsent(objName, smartObjectSupplier::get); } // ID @@ -682,10 +681,10 @@ public RuleTranslationResult generateSql() throws IOException { switch (object.getType()) { case DIRECTORY: case FILE: - ret = "SELECT path FROM file"; + ret = "SELECT path FROM " + object.getBaseTableName(); break; case HMS: - ret = "SELECT id FROM hive_metastore_event"; + ret = "SELECT id FROM " + object.getBaseTableName(); break; default: throw new IOException( diff --git a/smart-rule/src/test/java/org/smartdata/rule/TestSmartRuleParser.java b/smart-rule/src/test/java/org/smartdata/rule/TestSmartRuleParser.java index 548abab932..cdd28ca5f3 100644 --- a/smart-rule/src/test/java/org/smartdata/rule/TestSmartRuleParser.java +++ b/smart-rule/src/test/java/org/smartdata/rule/TestSmartRuleParser.java @@ -27,6 +27,7 @@ import org.junit.Assert; import org.junit.Test; import org.smartdata.model.rule.RuleTranslationResult; +import org.smartdata.rule.objects.DefaultSmartObjectSupplier; import org.smartdata.rule.parser.SmartRuleLexer; import org.smartdata.rule.parser.SmartRuleParser; import org.smartdata.rule.parser.SmartRuleVisitTranslator; @@ -123,7 +124,8 @@ private void parseAndExecuteRule(String rule) throws Exception { System.out.println("Parser tree: " + tree.toStringTree(parser)); System.out.println("Total number of errors: " + parseErrors.size()); - SmartRuleVisitTranslator visitor = new SmartRuleVisitTranslator(); + SmartRuleVisitTranslator visitor = new SmartRuleVisitTranslator( + new DefaultSmartObjectSupplier()); visitor.visit(tree); System.out.println("\nQuery:"); diff --git a/smart-rule/src/test/java/org/smartdata/rule/TestSmartRuleStringParser.java b/smart-rule/src/test/java/org/smartdata/rule/TestSmartRuleStringParser.java index 7b942a0905..663556841a 100644 --- a/smart-rule/src/test/java/org/smartdata/rule/TestSmartRuleStringParser.java +++ b/smart-rule/src/test/java/org/smartdata/rule/TestSmartRuleStringParser.java @@ -20,6 +20,7 @@ import org.junit.Test; import org.smartdata.conf.SmartConf; import org.smartdata.model.rule.RuleTranslationResult; +import org.smartdata.rule.objects.DefaultSmartObjectSupplier; import org.smartdata.rule.parser.SmartRuleStringParser; import org.smartdata.rule.parser.TranslationContext; @@ -50,7 +51,8 @@ public void testRuleTranslate() throws Exception { private void parseRule(String rule) throws Exception { TranslationContext tc = new TranslationContext(1, System.currentTimeMillis()); - SmartRuleStringParser parser = new SmartRuleStringParser(rule, tc, new SmartConf()); + SmartRuleStringParser parser = new SmartRuleStringParser(rule, tc, + new DefaultSmartObjectSupplier(), new SmartConf()); RuleTranslationResult tr = parser.translate(); int index = 1; diff --git a/smart-server/src/main/java/org/smartdata/server/SmartDaemon.java b/smart-server/src/main/java/org/smartdata/server/SmartDaemon.java index e06205bbe1..8e3aee79b7 100644 --- a/smart-server/src/main/java/org/smartdata/server/SmartDaemon.java +++ b/smart-server/src/main/java/org/smartdata/server/SmartDaemon.java @@ -34,6 +34,8 @@ import java.io.IOException; +import static org.smartdata.server.utils.ConfigUtil.enrichSmartConf; + public class SmartDaemon implements ServerDaemon { private static final Logger LOG = LoggerFactory.getLogger(SmartDaemon.class); private final String[] args; @@ -51,7 +53,7 @@ public void start() throws IOException, InterruptedException { if (HazelcastUtil.isMaster(instance)) { SmartServer.main(args); } else { - HadoopUtil.setSmartConfByHadoop(conf); + enrichSmartConf(conf); String rpcHost = HazelcastUtil .getMasterMember(HazelcastInstanceProvider.getInstance(conf)) diff --git a/smart-server/src/main/java/org/smartdata/server/SmartRpcServer.java b/smart-server/src/main/java/org/smartdata/server/SmartRpcServer.java index 7f72516d3f..8332caae4e 100644 --- a/smart-server/src/main/java/org/smartdata/server/SmartRpcServer.java +++ b/smart-server/src/main/java/org/smartdata/server/SmartRpcServer.java @@ -158,7 +158,7 @@ private void checkIfActive() throws IOException { public void reportFileAccessEvent(FileAccessEvent event) throws IOException { checkIfActive(); - ssm.getStatesManager().reportFileAccessEvent(event); + ssm.getFileAccessManager().reportFileAccessEvent(event); } @Override diff --git a/smart-server/src/main/java/org/smartdata/server/SmartServer.java b/smart-server/src/main/java/org/smartdata/server/SmartServer.java index 44eddf1e44..fa0d147df7 100644 --- a/smart-server/src/main/java/org/smartdata/server/SmartServer.java +++ b/smart-server/src/main/java/org/smartdata/server/SmartServer.java @@ -30,7 +30,6 @@ import org.smartdata.SmartServiceState; import org.smartdata.conf.SmartConf; import org.smartdata.conf.SmartConfKeys; -import org.smartdata.hdfs.HadoopUtil; import org.smartdata.http.SmartHttpServer; import org.smartdata.metastore.MetaStore; import org.smartdata.metrics.MetricsFactory; @@ -38,7 +37,7 @@ import org.smartdata.server.engine.CmdletManager; import org.smartdata.server.engine.RuleManager; import org.smartdata.server.engine.ServerContext; -import org.smartdata.server.engine.StatesManager; +import org.smartdata.server.engine.file.FileAccessManager; import org.smartdata.server.utils.GenericOptionsParser; import java.io.File; @@ -52,6 +51,7 @@ import static org.smartdata.SmartConstants.NUMBER_OF_SMART_AGENT; import static org.smartdata.metastore.utils.MetaStoreUtils.getDBAdapter; +import static org.smartdata.server.utils.ConfigUtil.enrichSmartConf; /** * From this Smart Storage Management begins. @@ -81,7 +81,7 @@ public SmartServer(SmartConf conf, MetaStore metaStore) throws IOException { private void initWith(MetaStore metaStore) throws IOException { LOG.info("Start Init Smart Server"); - HadoopUtil.setSmartConfByHadoop(conf); + enrichSmartConf(conf); MetricsFactory metricsFactory = MetricsFactory.from(conf, SMART_SERVER_BASE_TAGS); metaStore.dbPool().bindMetrics(metricsFactory); @@ -100,8 +100,8 @@ private void setRpcServerAddress(InetSocketAddress address) { address.getHostString() + ":" + address.getPort()); } - public StatesManager getStatesManager() { - return engine.getStatesManager(); + public FileAccessManager getFileAccessManager() { + return engine.getFileAccessManager(); } public RuleManager getRuleManager() { @@ -206,7 +206,7 @@ static SmartServer processWith(StartupOption startOption, SmartConf conf) throws } private static boolean parseHelpArgument(String[] args, - String helpDescription, PrintStream out) { + String helpDescription, PrintStream out) { try { CommandLineParser parser = new PosixParser(); CommandLine cmdLine = parser.parse(helpOptions, args); diff --git a/smart-server/src/test/java/org/smartdata/server/OzoneSmartClusterHarness.java b/smart-server/src/test/java/org/smartdata/server/OzoneSmartClusterHarness.java index dc381c29d9..cfd1eca934 100644 --- a/smart-server/src/test/java/org/smartdata/server/OzoneSmartClusterHarness.java +++ b/smart-server/src/test/java/org/smartdata/server/OzoneSmartClusterHarness.java @@ -17,9 +17,37 @@ */ package org.smartdata.server; +import org.junit.After; +import org.junit.Before; +import org.smartdata.conf.SmartConf; +import org.smartdata.conf.SmartConfKeys; +import org.smartdata.conf.SmartFsType; import org.smartdata.ozone.OzoneClusterHarness; -// TODO init SmartServer when ADH-7056 will be completed +import java.io.IOException; + +import static org.smartdata.conf.SmartConfKeys.SMART_FS_TYPE; + public class OzoneSmartClusterHarness extends OzoneClusterHarness { protected SmartServer ssm; + protected SmartConf smartConf; + + @Before + public void initSsm() throws Exception { + // Set db used + smartConf = new SmartConf(ozoneConf); + smartConf.set(SMART_FS_TYPE, SmartFsType.OZONE.toString()); + smartConf.set(SmartConfKeys.SMART_OZONE_RPC_SERVER_KEY, + ozoneContainer.getOmRpcAddress()); + + // rpcServer start in SmartServer + ssm = SmartServer.launchWith(smartConf); + } + + @After + public void shutdown() throws IOException { + if (ssm != null) { + ssm.shutdown(); + } + } } diff --git a/smart-server/src/test/java/org/smartdata/server/TestSmartServerCli.java b/smart-server/src/test/java/org/smartdata/server/TestSmartServerCli.java deleted file mode 100644 index 8bcb7f7b6f..0000000000 --- a/smart-server/src/test/java/org/smartdata/server/TestSmartServerCli.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.smartdata.server; - -import org.apache.hadoop.hdfs.DFSUtil; -import org.junit.Assert; -import org.junit.Test; -import org.smartdata.conf.SmartConf; -import org.smartdata.conf.SmartConfKeys; -import org.smartdata.hdfs.MiniClusterHarness; - -import java.net.URI; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -public class TestSmartServerCli extends MiniClusterHarness { - - @Test - public void testConfNameNodeRPCAddr() throws Exception { - try { - Collection namenodes = DFSUtil.getInternalNsRpcUris(smartContext.getConf()); - List uriList = new ArrayList<>(namenodes); - SmartConf conf = new SmartConf(); - - // rpcServer start in SmartServer - SmartServer ssm = null; - try { - ssm = SmartServer.launchWith(conf); - Thread.sleep(2000); - } catch (Exception e) { - Assert.fail("Should work without specifying NN"); - } finally { - if (ssm != null) { - ssm.shutdown(); - } - } - - conf.set(SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY, - uriList.get(0).toString()); - String[] args = new String[]{ - "-D", - SmartConfKeys.SMART_DFS_NAMENODE_RPCSERVER_KEY + "=" - + uriList.get(0).toString() - }; - - SmartServer regServer = SmartServer.launchWith(args, conf); - Assert.assertNotNull(regServer); - Thread.sleep(1000); - regServer.shutdown(); - - args = new String[]{ - "-h" - }; - SmartServer.launchWith(args, conf); - } finally { - cluster.shutdown(); - } - } -} diff --git a/smart-server/src/test/java/org/smartdata/server/engine/TestCmdletManager.java b/smart-server/src/test/java/org/smartdata/server/engine/TestCmdletManager.java index 3ff569fad3..188659ccf5 100644 --- a/smart-server/src/test/java/org/smartdata/server/engine/TestCmdletManager.java +++ b/smart-server/src/test/java/org/smartdata/server/engine/TestCmdletManager.java @@ -26,6 +26,7 @@ import org.smartdata.action.ActionRegistry; import org.smartdata.cmdlet.parser.CmdletParser; import org.smartdata.conf.SmartConf; +import org.smartdata.conf.SmartFsType; import org.smartdata.exception.SsmParseException; import org.smartdata.metastore.MetaStore; import org.smartdata.model.ActionInfo; @@ -44,6 +45,7 @@ import org.smartdata.server.engine.audit.AuditService; import org.smartdata.server.engine.cmdlet.CmdletDispatcher; import org.smartdata.server.engine.cmdlet.CmdletInfoHandler; +import org.smartdata.server.engine.filesystem.FileSystemContext; import java.io.IOException; import java.util.Collections; @@ -105,7 +107,6 @@ public void testSubmitAPI() throws Exception { Path dir3 = new Path("/testCacheFile"); dfs.mkdirs(dir3); - Assert.assertFalse(ActionRegistry.supportedActions().isEmpty()); CmdletInfo cmdletInfo = cmdletManager.submitCmdlet( "allssd -file /testMoveFile/file1 ; cache -file /testCacheFile ; " + "write -file /test -length 1024"); @@ -185,8 +186,11 @@ public void testWithoutCluster() throws Exception { Assert.assertNotNull(dispatcher); when(dispatcher.canDispatchMore()).thenReturn(true); ServerContext serverContext = new ServerContext(new SmartConf(), metaStore); + FileSystemContext fsCtx = FileSystemContext.fromFsType(SmartFsType.HDFS); CmdletManager cmdletManager = new CmdletManager( - serverContext, auditService, principalManager); + serverContext, auditService, principalManager, + new ActionRegistry(fsCtx.actionFactories()), + fsCtx.actionSchedulerServices(serverContext)); cmdletManager.init(); cmdletManager.setDispatcher(dispatcher); @@ -294,7 +298,7 @@ private void safeSubmit(String cmdlet) { } private void flushToDB(MetaStore metaStore, - List actionInfos, CmdletInfo cmdletInfo) throws Exception { + List actionInfos, CmdletInfo cmdletInfo) throws Exception { for (ActionInfo actionInfo : actionInfos) { cmdletInfo.addAction(actionInfo.getActionId()); } diff --git a/smart-server/src/test/java/org/smartdata/server/engine/audit/TestCmdletLifecycleLogger.java b/smart-server/src/test/java/org/smartdata/server/engine/audit/TestCmdletLifecycleLogger.java index d9f72562a6..c7decc0a5e 100644 --- a/smart-server/src/test/java/org/smartdata/server/engine/audit/TestCmdletLifecycleLogger.java +++ b/smart-server/src/test/java/org/smartdata/server/engine/audit/TestCmdletLifecycleLogger.java @@ -21,7 +21,9 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.smartdata.action.ActionRegistry; import org.smartdata.conf.SmartConf; +import org.smartdata.conf.SmartFsType; import org.smartdata.metastore.TestDaoBase; import org.smartdata.model.CmdletInfo; import org.smartdata.model.audit.UserActivityEvent; @@ -33,6 +35,7 @@ import org.smartdata.server.engine.CmdletManager; import org.smartdata.server.engine.ServerContext; import org.smartdata.server.engine.cmdlet.CmdletDispatcherHelper; +import org.smartdata.server.engine.filesystem.FileSystemContext; import java.util.Collections; import java.util.List; @@ -58,8 +61,14 @@ public void init() throws Exception { principalManager = new ThreadScopeSmartPrincipalManager( new AnonymousDefaultPrincipalProvider()); + FileSystemContext fsContext = FileSystemContext.fromFsType(SmartFsType.HDFS); CmdletDispatcherHelper.init(); - cmdletManager = new CmdletManager(serverContext, auditService, principalManager); + cmdletManager = new CmdletManager( + serverContext, + auditService, + principalManager, + new ActionRegistry(fsContext.actionFactories()), + fsContext.actionSchedulerServices(serverContext)); cmdletManager.init(); cmdletManager.start(); } diff --git a/smart-server/src/test/java/org/smartdata/server/engine/audit/TestRuleLifecycleLogger.java b/smart-server/src/test/java/org/smartdata/server/engine/audit/TestRuleLifecycleLogger.java index be3345b5ff..409ae2aa1e 100644 --- a/smart-server/src/test/java/org/smartdata/server/engine/audit/TestRuleLifecycleLogger.java +++ b/smart-server/src/test/java/org/smartdata/server/engine/audit/TestRuleLifecycleLogger.java @@ -21,7 +21,9 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.smartdata.action.ActionRegistry; import org.smartdata.conf.SmartConf; +import org.smartdata.conf.SmartFsType; import org.smartdata.metastore.TestDaoBase; import org.smartdata.model.RuleInfo; import org.smartdata.model.RuleState; @@ -33,6 +35,7 @@ import org.smartdata.security.ThreadScopeSmartPrincipalManager; import org.smartdata.server.engine.RuleManager; import org.smartdata.server.engine.ServerContext; +import org.smartdata.server.engine.filesystem.FileSystemContext; import java.util.Collections; import java.util.List; @@ -59,8 +62,13 @@ public void init() throws Exception { principalManager = new ThreadScopeSmartPrincipalManager( new AnonymousDefaultPrincipalProvider()); + FileSystemContext fsContext = FileSystemContext.fromFsType(SmartFsType.HDFS); ruleManager = new RuleManager( - serverContext, null, null, auditService, principalManager); + serverContext, null, auditService, + new ActionRegistry(fsContext.actionFactories()), + principalManager, + fsContext.smartObjectSupplier(), + fsContext.ruleExecutorPlugins(serverContext, null)); ruleManager.init(); ruleManager.start(); } diff --git a/smart-server/src/test/java/org/smartdata/server/engine/rule/TestRuleManager.java b/smart-server/src/test/java/org/smartdata/server/engine/rule/TestRuleManager.java index 84f6f572ab..e40c57f470 100644 --- a/smart-server/src/test/java/org/smartdata/server/engine/rule/TestRuleManager.java +++ b/smart-server/src/test/java/org/smartdata/server/engine/rule/TestRuleManager.java @@ -21,7 +21,9 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.smartdata.action.ActionRegistry; import org.smartdata.conf.SmartConf; +import org.smartdata.conf.SmartFsType; import org.smartdata.exception.NotFoundException; import org.smartdata.metastore.TestDaoBase; import org.smartdata.metastore.model.SearchResult; @@ -38,6 +40,7 @@ import org.smartdata.server.engine.RuleManager; import org.smartdata.server.engine.ServerContext; import org.smartdata.server.engine.audit.AuditService; +import org.smartdata.server.engine.filesystem.FileSystemContext; import java.util.Collections; import java.util.List; @@ -57,8 +60,13 @@ public void init() throws Exception { ServerContext serverContext = new ServerContext(smartConf, metaStore); SmartPrincipalManager principalManager = new ThreadScopeSmartPrincipalManager( new AnonymousDefaultPrincipalProvider()); + FileSystemContext fsContext = FileSystemContext.fromFsType(SmartFsType.HDFS); ruleManager = new RuleManager(serverContext, null, - null, new NoOpAuditService(), principalManager); + new NoOpAuditService(), + new ActionRegistry(fsContext.actionFactories()), + principalManager, + fsContext.smartObjectSupplier(), + fsContext.ruleExecutorPlugins(serverContext, null)); ruleManager.init(); ruleManager.start(); } @@ -238,7 +246,7 @@ public void testMultiThreadUpdate() throws Exception { long start = System.currentTimeMillis(); - Thread[] threads = new Thread[] { + Thread[] threads = new Thread[]{ new Thread(new RuleInfoUpdater(rid, 3)), // new Thread(new RuleInfoUpdater(rid, 7)), // new Thread(new RuleInfoUpdater(rid, 11)), diff --git a/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/SmartMasterRestServer.java b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/SmartMasterRestServer.java index 21f7624276..34b04e0383 100644 --- a/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/SmartMasterRestServer.java +++ b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/SmartMasterRestServer.java @@ -47,7 +47,7 @@ private static class ContextInitializer implements public void initialize(ConfigurableApplicationContext applicationContext) { ConfigurableListableBeanFactory beanFactory = applicationContext.getBeanFactory(); beanFactory.registerSingleton("smartEngine", smartEngine); - beanFactory.registerSingleton("statesManager", smartEngine.getStatesManager()); + beanFactory.registerSingleton("statesManager", smartEngine.getFileAccessManager()); beanFactory.registerSingleton("cmdletManager", smartEngine.getCmdletManager()); beanFactory.registerSingleton("ruleManager", smartEngine.getRuleManager()); beanFactory.registerSingleton("auditService", smartEngine.getAuditService()); @@ -60,11 +60,12 @@ public void initialize(ConfigurableApplicationContext applicationContext) { beanFactory.registerSingleton( "actionInfoHandler", smartEngine.getCmdletManager().getActionInfoHandler()); beanFactory.registerSingleton( - "cachedFilesManager", smartEngine.getStatesManager().getCachedFilesManager()); + "cachedFilesManager", smartEngine.getCachedFilesManager()); beanFactory.registerSingleton( "smartPrincipalManager", smartEngine.getSmartPrincipalManager()); beanFactory.registerSingleton("dbFileAccessManager", - smartEngine.getStatesManager().getFileAccessManager()); + smartEngine.getFileAccessManager().getFileAccessCountManager()); + beanFactory.registerSingleton("actionRegistry", smartEngine.getActionRegistry()); } } diff --git a/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/controller/CachedFilesControllerDelegate.java b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/controller/CachedFilesControllerDelegate.java index ac1aa9e2a1..9a1af745df 100644 --- a/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/controller/CachedFilesControllerDelegate.java +++ b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/controller/CachedFilesControllerDelegate.java @@ -21,7 +21,7 @@ import org.smartdata.metastore.queries.PageRequest; import org.smartdata.metastore.queries.sort.CachedFilesSortField; import org.smartdata.model.request.CachedFileSearchRequest; -import org.smartdata.server.engine.CachedFilesManager; +import org.smartdata.server.engine.file.CachedFilesManager; import org.smartdata.server.generated.model.CachedFileSortDto; import org.smartdata.server.generated.model.CachedFilesDto; import org.smartdata.server.generated.model.CachedTimeIntervalDto; diff --git a/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/controller/FilesControllerDelegate.java b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/controller/FilesControllerDelegate.java index 061b2483e7..d6135a7b60 100644 --- a/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/controller/FilesControllerDelegate.java +++ b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/controller/FilesControllerDelegate.java @@ -18,7 +18,7 @@ package org.smartdata.server.controller; import lombok.RequiredArgsConstructor; -import org.smartdata.metastore.accesscount.FileAccessManager; +import org.smartdata.metastore.accesscount.FileAccessCountManager; import org.smartdata.metastore.model.SearchResult; import org.smartdata.metastore.queries.PageRequest; import org.smartdata.metastore.queries.sort.FileAccessInfoSortField; @@ -44,26 +44,26 @@ @RequiredArgsConstructor public class FilesControllerDelegate implements FilesApiDelegate { - private final FileAccessManager fileAccessManager; + private final FileAccessCountManager fileAccessManager; private final FileAccessInfoMapper fileInfoMapper; private final FileAccessInfoPageRequestMapper pageRequestMapper; private final CachedFilesControllerDelegate cachedFilesControllerDelegate; @Override public CachedFilesDto getCachedFiles(PageRequestDto pageRequestDto, - List<@Valid CachedFileSortDto> sort, - String pathLike, - LastAccessedTimeIntervalDto lastAccessedTime, - CachedTimeIntervalDto cachedTime) throws Exception { + List<@Valid CachedFileSortDto> sort, + String pathLike, + LastAccessedTimeIntervalDto lastAccessedTime, + CachedTimeIntervalDto cachedTime) throws Exception { return cachedFilesControllerDelegate.getCachedFiles( pageRequestDto, sort, pathLike, lastAccessedTime, cachedTime); } @Override public FileAccessCountsDto getAccessCounts(PageRequestDto pageRequestDto, - List<@Valid HotFileSortDto> sort, - String pathLike, - LastAccessedTimeIntervalDto lastAccessedTime) + List<@Valid HotFileSortDto> sort, + String pathLike, + LastAccessedTimeIntervalDto lastAccessedTime) throws Exception { PageRequest pageRequest = pageRequestMapper.toPageRequest(pageRequestDto, sort); diff --git a/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/controller/MetadataControllerDelegate.java b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/controller/MetadataControllerDelegate.java new file mode 100644 index 0000000000..2857a67ca0 --- /dev/null +++ b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/controller/MetadataControllerDelegate.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.server.controller; + +import lombok.RequiredArgsConstructor; +import org.smartdata.action.ActionMetadata; +import org.smartdata.action.ActionRegistry; +import org.smartdata.server.generated.api.MetadataApiDelegate; +import org.smartdata.server.generated.model.ActionsMetadataDto; +import org.smartdata.server.mappers.ActionMetadataMapper; +import org.springframework.stereotype.Component; + +import java.util.Set; + +@Component +@RequiredArgsConstructor +public class MetadataControllerDelegate implements MetadataApiDelegate { + + private final ActionRegistry actionRegistry; + private final ActionMetadataMapper actionMetadataMapper; + + @Override + public ActionsMetadataDto getActionsMetadata() { + Set actionMetadata = actionRegistry.getActionMetadata(); + return actionMetadataMapper.toActionsMetadataDto(actionMetadata); + } +} diff --git a/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/generated/api/MetadataApi.java b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/generated/api/MetadataApi.java new file mode 100644 index 0000000000..53afe73215 --- /dev/null +++ b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/generated/api/MetadataApi.java @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.server.generated.api; + +import io.swagger.v3.oas.annotations.Operation; +import io.swagger.v3.oas.annotations.media.Content; +import io.swagger.v3.oas.annotations.media.Schema; +import io.swagger.v3.oas.annotations.responses.ApiResponse; +import io.swagger.v3.oas.annotations.security.SecurityRequirement; +import io.swagger.v3.oas.annotations.tags.Tag; +import org.smartdata.server.generated.model.ActionsMetadataDto; +import org.springframework.http.HttpStatus; +import org.springframework.validation.annotation.Validated; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestMethod; +import org.springframework.web.bind.annotation.ResponseStatus; + +import javax.annotation.Generated; + +@Generated(value = "org.openapitools.codegen.languages.SpringCodegen") +@Validated +@Tag(name = "Metadata", description = "the Metadata API") +public interface MetadataApi { + + default MetadataApiDelegate getDelegate() { + return new MetadataApiDelegate() { + }; + } + + /** + * GET /api/v2/metadata/actions : List all actions metadata + * + * @return OK (status code 200) + * or Unauthorized (status code 401) + */ + @Operation( + operationId = "getActionsMetadata", + summary = "List all actions metadata", + tags = {"Metadata"}, + responses = { + @ApiResponse(responseCode = "200", description = "OK", content = { + @Content(mediaType = "application/json", schema = @Schema(implementation = ActionsMetadataDto.class)) + }), + @ApiResponse(responseCode = "401", description = "Unauthorized") + }, + security = { + @SecurityRequirement(name = "basicAuth") + } + ) + @RequestMapping( + method = RequestMethod.GET, + value = "/api/v2/metadata/actions", + produces = {"application/json"} + ) + @ResponseStatus(HttpStatus.OK) + + default ActionsMetadataDto getActionsMetadata( + + ) throws Exception { + return getDelegate().getActionsMetadata(); + } + +} diff --git a/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/generated/api/MetadataApiController.java b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/generated/api/MetadataApiController.java new file mode 100644 index 0000000000..f292eb4c1a --- /dev/null +++ b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/generated/api/MetadataApiController.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.server.generated.api; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +import javax.annotation.Generated; + +import java.util.Optional; + +@Generated(value = "org.openapitools.codegen.languages.SpringCodegen") +@RestController +@RequestMapping("${openapi.sSMAPIDocumentation.base-path:}") +public class MetadataApiController implements MetadataApi { + + private final MetadataApiDelegate delegate; + + public MetadataApiController(@Autowired(required = false) MetadataApiDelegate delegate) { + this.delegate = Optional.ofNullable(delegate).orElse(new MetadataApiDelegate() {}); + } + + @Override + public MetadataApiDelegate getDelegate() { + return delegate; + } + +} diff --git a/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/generated/api/MetadataApiDelegate.java b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/generated/api/MetadataApiDelegate.java new file mode 100644 index 0000000000..fad15d8de2 --- /dev/null +++ b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/generated/api/MetadataApiDelegate.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.server.generated.api; + +import org.smartdata.server.generated.model.ActionsMetadataDto; +import org.springframework.web.context.request.NativeWebRequest; + +import javax.annotation.Generated; + +import java.util.Optional; + +/** + * A delegate to be called by the {@link MetadataApiController}}. + * Implement this interface with a {@link org.springframework.stereotype.Service} annotated class. + */ +@Generated(value = "org.openapitools.codegen.languages.SpringCodegen") +public interface MetadataApiDelegate { + + default Optional getRequest() { + return Optional.empty(); + } + + /** + * GET /api/v2/metadata/actions : List all actions metadata + * + * @return OK (status code 200) + * or Unauthorized (status code 401) + * @see MetadataApi#getActionsMetadata + */ + default ActionsMetadataDto getActionsMetadata() throws Exception { + throw new IllegalArgumentException("Not implemented"); + + } + +} diff --git a/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/generated/model/ActionMetadataDto.java b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/generated/model/ActionMetadataDto.java new file mode 100644 index 0000000000..e09380976e --- /dev/null +++ b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/generated/model/ActionMetadataDto.java @@ -0,0 +1,131 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.server.generated.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; +import io.swagger.v3.oas.annotations.media.Schema; + +import javax.annotation.Generated; +import javax.validation.constraints.NotNull; + +import java.util.Objects; + +/** + * ActionMetadataDto + */ + +@JsonTypeName("ActionMetadata") +@Generated(value = "org.openapitools.codegen.languages.SpringCodegen") +public class ActionMetadataDto { + + private String name; + + private String usage = null; + + public ActionMetadataDto() { + super(); + } + + /** + * Constructor with only required parameters + */ + public ActionMetadataDto(String name) { + this.name = name; + } + + public ActionMetadataDto name(String name) { + this.name = name; + return this; + } + + /** + * SSM host on which this action is running + * @return name + */ + @NotNull + @Schema(name = "name", description = "SSM host on which this action is running", requiredMode = Schema.RequiredMode.REQUIRED) + @JsonProperty("name") + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public ActionMetadataDto usage(String usage) { + this.usage = usage; + return this; + } + + /** + * SSM host on which this action is running + * @return usage + */ + + @Schema(name = "usage", description = "SSM host on which this action is running", requiredMode = Schema.RequiredMode.NOT_REQUIRED) + @JsonProperty("usage") + public String getUsage() { + return usage; + } + + public void setUsage(String usage) { + this.usage = usage; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ActionMetadataDto actionMetadata = (ActionMetadataDto) o; + return Objects.equals(this.name, actionMetadata.name) && + Objects.equals(this.usage, actionMetadata.usage); + } + + @Override + public int hashCode() { + return Objects.hash(name, usage); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class ActionMetadataDto {\n"); + sb.append(" name: ").append(toIndentedString(name)).append("\n"); + sb.append(" usage: ").append(toIndentedString(usage)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} + diff --git a/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/generated/model/ActionsMetadataDto.java b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/generated/model/ActionsMetadataDto.java new file mode 100644 index 0000000000..f702c29b21 --- /dev/null +++ b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/generated/model/ActionsMetadataDto.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.server.generated.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonTypeName; +import io.swagger.v3.oas.annotations.media.Schema; + +import javax.annotation.Generated; +import javax.validation.Valid; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +/** + * ActionsMetadataDto + */ + +@JsonTypeName("ActionsMetadata") +@Generated(value = "org.openapitools.codegen.languages.SpringCodegen") +public class ActionsMetadataDto { + + @Valid + private List<@Valid ActionMetadataDto> items; + + public ActionsMetadataDto items(List<@Valid ActionMetadataDto> items) { + this.items = items; + return this; + } + + public ActionsMetadataDto addItemsItem(ActionMetadataDto itemsItem) { + if (this.items == null) { + this.items = new ArrayList<>(); + } + this.items.add(itemsItem); + return this; + } + + /** + * List of actions metadata + * @return items + */ + @Valid + @Schema(name = "items", description = "List of actions metadata", requiredMode = Schema.RequiredMode.NOT_REQUIRED) + @JsonProperty("items") + public List<@Valid ActionMetadataDto> getItems() { + return items; + } + + public void setItems(List<@Valid ActionMetadataDto> items) { + this.items = items; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ActionsMetadataDto actionsMetadata = (ActionsMetadataDto) o; + return Objects.equals(this.items, actionsMetadata.items); + } + + @Override + public int hashCode() { + return Objects.hash(items); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class ActionsMetadataDto {\n"); + sb.append(" items: ").append(toIndentedString(items)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} + diff --git a/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/mappers/ActionMetadataMapper.java b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/mappers/ActionMetadataMapper.java new file mode 100644 index 0000000000..046bace275 --- /dev/null +++ b/smart-web-server/smart-master-web-server/src/main/java/org/smartdata/server/mappers/ActionMetadataMapper.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.smartdata.server.mappers; + +import org.mapstruct.Mapper; +import org.mapstruct.ReportingPolicy; +import org.smartdata.action.ActionMetadata; +import org.smartdata.server.generated.model.ActionMetadataDto; +import org.smartdata.server.generated.model.ActionsMetadataDto; + +import java.util.List; +import java.util.Set; + + +@Mapper(componentModel = "spring", unmappedTargetPolicy = ReportingPolicy.ERROR) +public interface ActionMetadataMapper extends SmartMapper { + + List toActionMetadataDtos(Set metadataSet); + + default ActionsMetadataDto toActionsMetadataDto(Set metadataSet) { + return new ActionsMetadataDto() + .items(toActionMetadataDtos(metadataSet)); + } +} diff --git a/smart-web-server/smart-master-web-server/src/main/resources/api/resources/actions-metadata.yaml b/smart-web-server/smart-master-web-server/src/main/resources/api/resources/actions-metadata.yaml new file mode 100644 index 0000000000..298a0bf137 --- /dev/null +++ b/smart-web-server/smart-master-web-server/src/main/resources/api/resources/actions-metadata.yaml @@ -0,0 +1,21 @@ +get: + tags: + - Metadata + summary: List all actions metadata + operationId: getActionsMetadata + responses: + '200': + description: OK + content: + application/json: + schema: + type: object + title: ActionsMetadata + properties: + items: + type: array + description: List of actions metadata + items: + $ref: '../schemas/metadata/ActionMetadata.yaml' + '401': + description: Unauthorized diff --git a/smart-web-server/smart-master-web-server/src/main/resources/api/schemas/metadata/ActionMetadata.yaml b/smart-web-server/smart-master-web-server/src/main/resources/api/schemas/metadata/ActionMetadata.yaml new file mode 100644 index 0000000000..126a2ea191 --- /dev/null +++ b/smart-web-server/smart-master-web-server/src/main/resources/api/schemas/metadata/ActionMetadata.yaml @@ -0,0 +1,13 @@ +title: ActionMetadata +type: object +properties: + name: + type: string + description: SSM host on which this action is running + usage: + type: string + nullable: true + default: null + description: SSM host on which this action is running +required: + - name diff --git a/smart-web-server/smart-master-web-server/src/main/resources/api/ssm-api.yaml b/smart-web-server/smart-master-web-server/src/main/resources/api/ssm-api.yaml index ad730787a2..ae23d641bf 100644 --- a/smart-web-server/smart-master-web-server/src/main/resources/api/ssm-api.yaml +++ b/smart-web-server/smart-master-web-server/src/main/resources/api/ssm-api.yaml @@ -11,6 +11,7 @@ tags: - name: Cluster - name: Audit - name: System + - name: Metadata servers: - url: http://localhost:8081 @@ -61,6 +62,10 @@ paths: /api/v2/system/current-user: $ref: './resources/current-user.yaml' + # Metadata + /api/v2/metadata/actions: + $ref: './resources/actions-metadata.yaml' + components: securitySchemes: basicAuth: diff --git a/smart-web-server/smart-master-web-server/src/main/resources/static/ssm-api.yaml b/smart-web-server/smart-master-web-server/src/main/resources/static/ssm-api.yaml index 7ad5369cb9..e98cf1242a 100644 --- a/smart-web-server/smart-master-web-server/src/main/resources/static/ssm-api.yaml +++ b/smart-web-server/smart-master-web-server/src/main/resources/static/ssm-api.yaml @@ -16,6 +16,7 @@ tags: - name: Cluster - name: Audit - name: System +- name: Metadata paths: /api/v2/rules: get: @@ -770,6 +771,21 @@ paths: summary: Get current logged in user tags: - System + /api/v2/metadata/actions: + get: + operationId: getActionsMetadata + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/ActionsMetadata' + description: OK + "401": + description: Unauthorized + summary: List all actions metadata + tags: + - Metadata components: parameters: page-request: @@ -1638,6 +1654,22 @@ components: - name title: UserInfo type: object + ActionMetadata: + example: + usage: usage + name: name + properties: + name: + description: SSM host on which this action is running + type: string + usage: + description: SSM host on which this action is running + nullable: true + type: string + required: + - name + title: ActionMetadata + type: object ExecutorType: description: Type of the cmdlet executor enum: @@ -1984,6 +2016,21 @@ components: timestamp: 1 objectType: null title: AuditEvents + ActionsMetadata: + example: + items: + - usage: usage + name: name + - usage: usage + name: name + properties: + items: + description: List of actions metadata + items: + $ref: '#/components/schemas/ActionMetadata' + type: array + title: ActionsMetadata + type: object securitySchemes: basicAuth: scheme: basic diff --git a/supports/tools/docker/multihost/docker-compose.yaml b/supports/tools/docker/multihost/docker-compose.yaml index 15e35bf3b3..c4e77df578 100644 --- a/supports/tools/docker/multihost/docker-compose.yaml +++ b/supports/tools/docker/multihost/docker-compose.yaml @@ -176,12 +176,15 @@ services: image: apache/hive:4.0.1 container_name: hive-metastore hostname: hive-metastore.demo + depends_on: + ssm-metastore-db: + condition: service_started environment: SERVICE_NAME: metastore DB_DRIVER: postgres SERVICE_OPTS: >- -Djavax.jdo.option.ConnectionDriverName=org.postgresql.Driver - -Djavax.jdo.option.ConnectionURL=jdbc:postgresql://ssm-metastore-db/hive + -Djavax.jdo.option.ConnectionURL=jdbc:postgresql://ssm-metastore-db.demo:5432/hive -Djavax.jdo.option.ConnectionUserName=ssm -Djavax.jdo.option.ConnectionPassword=ssm -Dhive.metastore.event.listeners=org.apache.hive.hcatalog.listener.DbNotificationListener diff --git a/supports/tools/docker/multihost/ssm-conf/smart-site-master.xml b/supports/tools/docker/multihost/ssm-conf/smart-site-master.xml index 3c4077b411..3206369bfa 100644 --- a/supports/tools/docker/multihost/ssm-conf/smart-site-master.xml +++ b/supports/tools/docker/multihost/ssm-conf/smart-site-master.xml @@ -141,7 +141,7 @@ hive.metastore.uris - thrift://hive-metastore:9083 + thrift://hive-metastore.demo:9083 hive.hmshandler.retry.interval diff --git a/supports/tools/docker/ozone/conf/configuration.xsl b/supports/tools/docker/ozone/conf/configuration.xsl new file mode 100644 index 0000000000..869c4bc51f --- /dev/null +++ b/supports/tools/docker/ozone/conf/configuration.xsl @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + + +
namevaluedescription
+ + +
+
diff --git a/supports/tools/docker/ozone/conf/core-site.xml b/supports/tools/docker/ozone/conf/core-site.xml index 5aa49b1704..9288506a79 100644 --- a/supports/tools/docker/ozone/conf/core-site.xml +++ b/supports/tools/docker/ozone/conf/core-site.xml @@ -14,6 +14,6 @@
smart.server.rpc.address - localhost:8081 + ssm-server:7042 diff --git a/supports/tools/docker/ozone/conf/druid.xml b/supports/tools/docker/ozone/conf/druid.xml new file mode 100644 index 0000000000..8f4c600d29 --- /dev/null +++ b/supports/tools/docker/ozone/conf/druid.xml @@ -0,0 +1,31 @@ + + + + jdbc:postgresql://ssm-metastore-db:5432/metastore + ssm + ssm + + 1 + 1 + 1 + + 60000 + 90000 + 300000 + + SELECT fid FROM xattr WHERE fid = 0 + + SELECT 1 + true + false + false + + false + 30 + + true + 180 + true + + stat + \ No newline at end of file diff --git a/supports/tools/docker/ozone/conf/hazelcast.xml b/supports/tools/docker/ozone/conf/hazelcast.xml new file mode 100644 index 0000000000..ed18d48fca --- /dev/null +++ b/supports/tools/docker/ozone/conf/hazelcast.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + diff --git a/supports/tools/docker/ozone/conf/log4j2.properties b/supports/tools/docker/ozone/conf/log4j2.properties new file mode 100644 index 0000000000..9d6136a6f8 --- /dev/null +++ b/supports/tools/docker/ozone/conf/log4j2.properties @@ -0,0 +1,49 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# log4j configuration used during build and unit tests + +rootLogger.level = INFO +property.filename = ${sys:smart.log.dir}/${sys:smart.log.file} +appenders = R, console + +appender.console.type = Console +appender.console.name = STDOUT +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = %d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n + +appender.R.type = RollingFile +appender.R.name = File +appender.R.fileName = ${filename} +appender.R.filePattern = ${filename}.%d{yyyy-MM-dd} +appender.R.layout.type = PatternLayout +appender.R.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %c{1} [%p] %m%n +appender.R.policies.type = Policies +appender.R.policies.time.type = TimeBasedTriggeringPolicy +appender.R.policies.time.interval = 1 + +rootLogger.appenderRefs = R, console + +rootLogger.appenderRef.console.ref = STDOUT +rootLogger.appenderRef.R.ref = File + +logger.ipc.name = org.apache.hadoop.ipc.Server +logger.ipc.level = ERROR + +logger.securedIpc.name = SecurityLogger +logger.securedIpc.level = WARN + +# Downgrade Hadoop Retry Exception, please remove this line during debug +logger.hadoopRetry.name = org.apache.hadoop.io.retry.RetryInvocationHandler +logger.hadoopRetry.level = ERROR + +logger.dataStreamer.name = org.apache.hadoop.hdfs.DataStreamer +logger.dataStreamer.level = ERROR diff --git a/supports/tools/docker/ozone/conf/servers b/supports/tools/docker/ozone/conf/servers new file mode 100644 index 0000000000..12e46f04f1 --- /dev/null +++ b/supports/tools/docker/ozone/conf/servers @@ -0,0 +1 @@ +ssm-server \ No newline at end of file diff --git a/supports/tools/docker/ozone/conf/smart-default.xml b/supports/tools/docker/ozone/conf/smart-default.xml new file mode 100644 index 0000000000..4160a744d8 --- /dev/null +++ b/supports/tools/docker/ozone/conf/smart-default.xml @@ -0,0 +1,721 @@ + + + + + + + + + + + + smart.server.rpc.address + 0.0.0.0:7042 + rpc server of SSM + + + + + smart.ignore.dirs + + + SSM will completely ignore files under the given HDFS directory. + For more than one directory, they should be separated by ",". + + + + + smart.cover.dirs + + + SSM will only fetch files under the given HDFS directory. + For more than one directory, they should be separated by ",". + By default, all HDFS files are covered. + + + + + smart.work.dir + /system/ssm + + This HDFS directory is used as a work directory for SSM to store tmp files. + The default path is "/system/ssm", and SSM will ignore HDFS inotify for + files under the work directory. Only one directory can be set for this property. + + + + + smart.client.concurrent.report.enabled + true + + This property is used to enable/disable concurrent report for SmartClient. + If it is enabled, SmartClient will connect to multiple configured smart + servers concurrently, which is an optimization to find active smart server. + Only active smart sever will respond to build successful connection. If + report is successfully delivered to active smart server, attempting to + connect to other smart servers will be canceled. + + + + + + + + smart.dfs.enabled + true + + By setting false, all SSM functions except rpc and RESTApi service will be disabled. + + + + + hadoop.security.credential.provider.path + + + This property specifies jceks path which can store password of metastore by + using alias 'smart.metastore.password'. An example is jceks://file/root/ssm.jceks. + No path is provided by default, then the password configured in druid.xml will be + used. + + + + + smart.server.rpc.handler.count + 80 + number of handlers in rpc server + + + + smart.agent.port + 7048 + SmartAgent port + + + + smart.agent.master.port + 7051 + SmartAgent master port + + + + smart.namespace.fetcher.batch + 500 + Batch size of Namespace fetcher + + + + smart.namespace.fetcher.producers.num + 3 + Number of producers in namespace fetcher + + + + smart.namespace.fetcher.consumers.num + 6 + Number of consumers in namespace fetcher + + + + smart.rule.executors + 5 + Max number of rules that can be executed in parallel + + + + smart.cmdlet.executors + 10 + Max number of cmdlets that can be executed in parallel + + + + smart.dispatch.cmdlets.extra.num + 10 + The number of extra cmdlets dispatched by Smart Server + + + + smart.cmdlet.dispatchers + 3 + Max number of cmdlet dispatchers that work in parallel + + + + smart.cmdlet.mover.max.concurrent.blocks.per.srv.inst + 0 + + Max number of file mover cmdlets that can be executed in parallel per SSM service. + 0 means unlimited. + + + + + smart.action.move.throttle.mb + 0 + The throughput limit (MB) for SSM move overall + + + + smart.action.copy.throttle.mb + 0 + The throughput limit (MB) for SSM copy overall + + + + smart.action.ec.throttle.mb + 0 + The throughput limit (MB) for SSM EC overall + + + + smart.action.local.execution.disabled + false + + The default false value means active smart server can also execute actions, + like an agent. If it is set to true, active SSM server will NOT be able to + execute actions. This configuration has no impact on standby smart server. + + + + + smart.cmdlet.max.num.pending + 20000 + + Maximum number of pending cmdlets in SSM server. + Default value is 20000. + + + + + smart.cmdlet.hist.max.num.records + 100000 + + Maximum number of historic cmdlet records kept in SSM server. + Oldest cmdlets will be deleted if exceeds the threshold. + + + + + smart.cmdlet.hist.max.record.lifetime + 30day + + Maximum life time of historic cmdlet records kept in SSM server. + Cmdlet record will be deleted from SSM server if exceeds the threshold. + Valid time unit can be 'day', 'hour', 'min', 'sec'. The minimum update + granularity is 5sec. + + + + + smart.cmdlet.cache.batch + 600 + + Maximum batch size of cmdlet batch insert. + + + + + smart.copy.scheduler.base.sync.batch + 500 + + Maximum batch size of copyscheduler base sync batch insert. + + + + + smart.file.diff.max.num.records + 10000 + + Maximum file diff records with useless state. + + + + + smart.status.report.period + 10 + + The status report period for actions. The time unit is millisecond. + + + + + smart.status.report.period.multiplier + 50 + + The report period multiplied by this multiplier defines largest report interval. + + + + + smart.status.report.ratio + 0.2 + + If the finished actions ratio equals or exceeds this value, status report will + be triggered. + + + + + smart.namespace.fetcher.ignore.unsuccessive.inotify.event + false + + Skip fetch the entire namespace and only use available iNotify events to + update namespace if true. NOTE: This may leads to some unpredictable + consequences and should only be used for test. + + + + + smart.cmdlet.dispatcher.log.disp.result + false + + Log dispatch result for each cmdlet been dispatched. + + + + + smart.cmdlet.dispatcher.log.disp.metrics.interval + 5000 + + Time interval in millisecond to log statistic metrics of cmdlet dispatcher. + If no cmdlet dispatched in the time interval, then no output for this interval. + Disable the logger if equals 0. + + + + + smart.compression.codec + Zlib + + The default compression codec for SSM compression (Zlib, Lz4, Bzip2, snappy). + User can also specify a codec in action arg, then this default setting will + be overridden. + + + + + smart.compression.max.split + 1000 + + The max number of chunks split for compression. + + + + + smart.compact.batch.size + 200 + + The max number of small files to be compacted by per compact action. + + + + + smart.compact.container.file.threshold.mb + 1024 + + The max size of a container file in MB. + + + + + smart.metastore.migration.liquibase.changelog.path + db/changelog/changelog-root.xml + + Path to liquibase changelog root file. + + + + + smart.ignore.path.templates + + + Comma-separated list of regex templates of HDFS paths to be completely ignored by SSM. + + + + + smart.internal.path.templates + .*/\..*,.*/__.*,.*_COPYING_.* + + Comma-separated list of regex templates of internal files to be completely ignored by SSM. + + + + + smart.file.access.event.fetch.interval.ms + 1000 + + The interval in milliseconds between access event fetches. + + + + + smart.cached.file.fetch.interval.ms + 5000 + + The interval in milliseconds between cached files fetches from HDFS. + + + + + smart.namespace.fetch.interval.ms + 1 + + The interval in milliseconds between namespace fetches from HDFS. + + + + + smart.mover.scheduler.storage.report.fetch.interval.ms + 120000 + + The interval in milliseconds between storage report fetches from HDFS DataNode in mover scheduler. + + + + + smart.metastore.small-file.insert.batch.size + 200 + + The max size of small file insert batch to the Metastore. + + + + + smart.agent.master.ask.timeout.ms + 5000 + + The max time in milliseconds to wait an answer from the SmartAgent master actor during action submission. + + + + + smart.file.access.count.aggregation.interval.ms + 5000 + + The interval in milliseconds that is covered by single second-granularity access count table. + + + + + smart.sync.file.equality.strategy + CHECKSUM + + The strategy for checking whether the files with same relative path in the source and target clusters + are equal during scheduling of the sync action. + Possible values: + FILE_LENGTH - equality check based on the file length. This strategy is fast alternative to + comparing file contents/checksums, but have some corner cases when two different files with the same + length but with different content are considered equal. + CHECKSUM - equality check based on the file checksum. This strategy is more resource-intensive, + but it doesn't return false positive results, like previous one. + + + + + smart.rest.server.port + 8081 + SSM Rest Server port + + + + smart.client.report.tasks.timeout.ms + 2000 + + Timeout in milliseconds for the successful file access report. + Has an effect only if the 'smart.client.concurrent.report.enabled' option is set to true. + + + + + smart.client.active.server.cache.path + /tmp/active_smart_server + + Local filesystem path of the active Smart Server address file-based cache. + + + + + smart.rest.server.security.enabled + false + + Whether to enable SSM REST server security. + + + + + smart.rest.server.auth.spnego.enabled + false + + Whether to enable SSM REST server SPNEGO authentication method support. + + + + + smart.rest.server.auth.kerberos.enabled + false + + Whether to enable SSM REST server basic Kerberos authentication method support. + + + + + smart.rest.server.auth.predefined.enabled + false + + Whether to enable SSM REST server basic authentication with users, + predefined in the 'smart.rest.server.auth.predefined.users' option. + + + + + smart.file.access.count.aggregator.failover.retry.count + 60 + + Maximum number of attempts to save file access events + + + + + smart.file.access.count.aggregator.failover + SAVE_FAILED_WITH_RETRY + + Failover strategy for file access events aggregator. Possible values: + FAIL - throw exception, no failover. + SAVE_FAILED_WITH_RETRY - save all file access events that caused exception + for later submission with max attempts less or equals than smart.access.count.aggregator.failover.retry.count + + + + + smart.action.client.cache.ttl + 10m + + The minimum amount of time after the last access to the DFS client cache entry + that must pass in order for the entry to be evicted. + Should be in the format '[Amount][TimeUnit]', where TimeUnit is one + of 'day' or 'd', 'hour' or 'h', 'min' or 'm', 'sec' or 's'. + + + + + smart.rest.server.ssl.enabled + false + + Whether to enable SSL support for the SSM REST server. + + + + + smart.rest.server.auth.ldap.enabled + false + + Whether to enable SSM REST server basic LDAP authentication method support. + + + + + smart.rest.server.auth.ldap.search.base + + + Base LDAP distinguished name for search. + + + + + smart.rest.server.auth.ldap.user.search.base + + + Base LDAP distinguished name for user search. + + + + + smart.rest.server.auth.ldap.group.search.base + + + Base LDAP distinguished name for group search. + + + + + smart.rest.server.auth.ldap.user.attributes.name + uid + + The name attribute of user LDAP object. + + + + + smart.rest.server.auth.ldap.user.object-classes + person + + Comma-separated list of LDAP user entry objectClasses. + + + + + smart.rest.server.auth.ldap.user.search.scope + ONE_LEVEL + + The scope of LDAP user search. Possible values: + OBJECT - Search the named object + ONE_LEVEL - Search one level of the named context + SUBTREE - Search the entire subtree rooted at the named object + + + + + smart.rest.server.auth.ldap.group.search.scope + ONE_LEVEL + + The scope of LDAP group search. Possible values: + OBJECT - Search the named object + ONE_LEVEL - Search one level of the named context + SUBTREE - Search the entire subtree rooted at the named object + + + + + smart.rest.server.auth.ldap.user.attributes.password + userPassword + + The password attribute of user LDAP object. + + + + + smart.rest.server.auth.ldap.group.object-class + groupOfNames + + LDAP group entry objectClass. + + + + + smart.rest.server.auth.ldap.group.attributes.name + cn + + The name attribute of group LDAP object. + + + + + smart.rest.server.auth.ldap.auth.type + BIND + + LDAP authentication type. Possible values: + BIND: search user by specified filters and authenticate with found user's DN and provided password + PASSWORD_COMPARE: search user by specified filters and use LDAP password compare operation + + + + smart.rest.server.auth.failures.logging.enabled + true + + Whether to enable unsuccessful REST server auth attempts. + + + + + smart.metrics.enabled + true + + Whether to enable metrics collection and export. + + + + + smart.metrics.jmx.enabled + true + + Whether to enable JMX metrics exporter. + + + + + smart.metrics.jmx.domain + metrics + + Default domain for JMX exporter. + + + + + smart.metrics.prometheus.enabled + true + + Whether to enable Prometheus metrics exporter. + + + + + smart.metrics.db.queries.enabled + true + + Whether to enable SQL queries statistics export. + + + + + smart.proxy.user.strategy + DISABLED + + The strategy of user impersonation, while connecting to HDFS. + Possible values are: + DISABLED - impersonation is disabled, all actions are performed by the SSM node user + (either kerberos principal or user, that started SSM); + NODE_SCOPE - impersonation is enabled on node level, + all actions are performed by the user, provided in the 'smart.proxy.user' option; + CMDLET_SCOPE - impersonation is enabled on cmdlet level, + all actions are performed by the cmdlet owner (currently, it's the cmdlet creator); + + + + + smart.proxy.users.cache.ttl + 2m + + The minimum amount of time after the last access to the proxy users cache entry + that must pass in order for the entry to be evicted. + Should be in the format '[Amount][TimeUnit]', where TimeUnit is one + of 'day' or 'd', 'hour' or 'h', 'min' or 'm', 'sec' or 's'. + + + + + smart.proxy.users.cache.size + 20 + + Maximum size of the proxy users cache. + + + diff --git a/supports/tools/docker/ozone/conf/smart-env.sh b/supports/tools/docker/ozone/conf/smart-env.sh new file mode 100644 index 0000000000..096ffb9815 --- /dev/null +++ b/supports/tools/docker/ozone/conf/smart-env.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# It's better to have JAVA_HOME configured through this file in the following way +# when starting a distributed SSM cluster. Or in some cases, it may leads to some +# error (cases reported on Ubuntu OS). +#export JAVA_HOME= + + +# Parameters used to start SSM Server JVM +export SSM_SERVER_JAVA_OPT="-Dcom.sun.management.jmxremote.rmi.port=3333 -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.port=3333 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" + +# Parameters used to start SSM Agent JVM +export SSM_AGENT_JAVA_OPT="-Dcom.sun.management.jmxremote.rmi.port=3334 -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.port=3334 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" + +# Common parameters used to start JVM both for SSM Server and Agent +#export SSM_JAVA_OPT= + +# Set Hadoop native lib path for SSM compression use. Generally, the path is $HADOOP_HOME/lib/native +# export LD_LIBRARY_PATH= \ No newline at end of file diff --git a/supports/tools/docker/ozone/conf/smart-site.xml b/supports/tools/docker/ozone/conf/smart-site.xml new file mode 100755 index 0000000000..77bb17ab52 --- /dev/null +++ b/supports/tools/docker/ozone/conf/smart-site.xml @@ -0,0 +1,58 @@ + + + + + + + + + smart.dfs.enabled + true + + + smart.server.rpc.address + ssm-server:7042 + + + smart.rule.executors + 5 + + + smart.hadoop.conf.path + /opt/ssm/conf + + + smart.fs.type + OZONE + + + smart.ozone.ofs.default + ofs://om/ + + + + + smart.rest.server.security.enabled + false + + + ozone.replication + ONE + + + ozone.replication.type + RATIS + + \ No newline at end of file diff --git a/supports/tools/docker/ozone/docker-compose.yaml b/supports/tools/docker/ozone/docker-compose.yaml index d750b51a42..c28bf7aba3 100644 --- a/supports/tools/docker/ozone/docker-compose.yaml +++ b/supports/tools/docker/ozone/docker-compose.yaml @@ -27,11 +27,12 @@ x-common-config: OZONE-SITE.XML_ozone.om.http-address: "om:9874" OZONE-SITE.XML_ozone.recon.address: "recon:9891" OZONE-SITE.XML_ozone.recon.db.dir: "/data/metadata/recon" - OZONE-SITE.XML_ozone.replication: "1" OZONE-SITE.XML_ozone.scm.block.client.address: "scm" OZONE-SITE.XML_ozone.scm.client.address: "scm" OZONE-SITE.XML_ozone.scm.datanode.id.dir: "/data/metadata" OZONE-SITE.XML_ozone.scm.names: "scm" + OZONE-SITE.XML_ozone.replication: "ONE" + OZONE-SITE.XML_ozone.replication.type: "RATIS" no_proxy: "om,recon,scm,s3g,localhost,127.0.0.1" version: "3" @@ -44,15 +45,44 @@ services: environment: <<: *common-config - ozone-fs-client: - image: hub.adsw.io/java/hadoop-openjdk8:${HADOOP_VERSION:-3.3.6} - restart: unless-stopped - entrypoint: [ "/entrypoint.sh" ] + ssm-server: + image: hub.adsw.io/ssm/ssm-server:${SSM_VERSION:-2.2.0-SNAPSHOT} volumes: - - ./conf:/etc/conf - - ./fs-client/common.sh:/common.sh - - ./fs-client/entrypoint.sh:/entrypoint.sh - - ../../../../smart-ozone-support/target/smart-ozone-fs-client-shaded-2.2.0-SNAPSHOT.jar:/etc/jars/smart-ozone-client.jar + - ./conf:/opt/ssm/conf + - ./ssm/common.sh:/common.sh + - ./ssm/ssm-server-entrypoint.sh:/ssm-server-entrypoint.sh + entrypoint: ["/ssm-server-entrypoint.sh"] + ports: + - "7042:7042" + - "8081:8081" + # JMX port + - "3333:3333" + # ssm debug port + - "8008:8008" + environment: + SSM_DEBUG_OPT: ${SSM_DEBUG_OPT} + healthcheck: + test: ["CMD-SHELL", "curl http://ssm-server:8081 || exit 1"] + interval: 30s + timeout: 15s + retries: 5 + depends_on: + - ssm-metastore-db + + ssm-metastore-db: + image: "docker.io/library/postgres:14.0" + restart: unless-stopped + environment: + POSTGRES_DB: 'metastore' + POSTGRES_USER: 'ssm' + POSTGRES_PASSWORD: 'ssm' + ports: + - '5432:5432' + healthcheck: + test: ["CMD-SHELL", "psql -d metastore -U ssm -Atc 'SELECT 1;'"] + interval: 30s + timeout: 15s + retries: 3 om: <<: *image diff --git a/supports/tools/docker/ozone/fs-client/entrypoint.sh b/supports/tools/docker/ozone/fs-client/entrypoint.sh deleted file mode 100755 index 6a7eaeed43..0000000000 --- a/supports/tools/docker/ozone/fs-client/entrypoint.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -. ./common.sh - -echo "export JAVA_HOME=${JAVA_HOME}" >> /root/.bashrc - -cp /etc/jars/*.jar $HADOOP_HOME/share/hadoop/common/lib/ -moveHadoopConfFiles /etc/conf ${HADOOP_CONF_DIR} - -tail -f /dev/null \ No newline at end of file diff --git a/supports/tools/docker/ozone/fs-client/common.sh b/supports/tools/docker/ozone/ssm/common.sh similarity index 100% rename from supports/tools/docker/ozone/fs-client/common.sh rename to supports/tools/docker/ozone/ssm/common.sh diff --git a/supports/tools/docker/ozone/ssm/ssm-server-entrypoint.sh b/supports/tools/docker/ozone/ssm/ssm-server-entrypoint.sh new file mode 100755 index 0000000000..d27c9745f9 --- /dev/null +++ b/supports/tools/docker/ozone/ssm/ssm-server-entrypoint.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +. ./common.sh + +cp /root/.ssh/id_rsa /tmp/shared/id_rsa +cp /root/.ssh/id_rsa.pub /tmp/shared/id_rsa.pub +service ssh start +ssh-keyscan "$HOSTNAME" >> /root/.ssh/known_hosts +echo "export JAVA_HOME=${JAVA_HOME}" >> /root/.bashrc +echo "export SMART_HOME=${SSM_HOME}" >> /root/.bashrc +echo "export SMART_CONF_DIR=${SSM_HOME}/conf/" >> /root/.bashrc + +# Starting Smart Storage Manager +cd $SSM_HOME || exit + +echo "---------------------------" +echo "Starting SSM server and agents" +echo "---------------------------" + +source bin/start-ssm.sh ${SSM_DEBUG_OPT} --config ${SSM_HOME}/conf/ & +wait_for_it $(hostname -f):8081 + +tail -f /var/log/ssm/* \ No newline at end of file