Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.LeaveSafeModeResponseProto;
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshMountTableEntriesRequestProto;
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshMountTableEntriesResponseProto;
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntriesRequestProto;
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntriesResponseProto;
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto;
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto;
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto;
Expand Down Expand Up @@ -73,6 +75,8 @@
import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntriesRequest;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntriesResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
Expand Down Expand Up @@ -100,6 +104,8 @@
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshMountTableEntriesRequestPBImpl;
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshMountTableEntriesResponsePBImpl;
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl;
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntriesRequestPBImpl;
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntriesResponsePBImpl;
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryRequestPBImpl;
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryResponsePBImpl;
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryRequestPBImpl;
Expand Down Expand Up @@ -168,6 +174,24 @@ public RemoveMountTableEntryResponseProto removeMountTableEntry(
}
}

@Override
public RemoveMountTableEntriesResponseProto removeMountTableEntries(
RpcController controller,
RemoveMountTableEntriesRequestProto request)
throws ServiceException {
try {
RemoveMountTableEntriesRequest req =
new RemoveMountTableEntriesRequestPBImpl(request);
RemoveMountTableEntriesResponse response =
server.removeMountTableEntries(req);
RemoveMountTableEntriesResponsePBImpl responsePB =
(RemoveMountTableEntriesResponsePBImpl)response;
return responsePB.getProto();
} catch (IOException e) {
throw new ServiceException(e);
}
}

@Override
public AddMountTableEntriesResponseProto addMountTableEntries(RpcController controller,
AddMountTableEntriesRequestProto request) throws ServiceException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshMountTableEntriesResponseProto;
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshSuperUserGroupsConfigurationRequestProto;
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RefreshSuperUserGroupsConfigurationResponseProto;
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntriesRequestProto;
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntriesResponseProto;
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto;
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto;
import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto;
Expand Down Expand Up @@ -76,6 +78,8 @@
import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntriesRequest;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntriesResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
Expand All @@ -99,6 +103,8 @@
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshMountTableEntriesRequestPBImpl;
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshMountTableEntriesResponsePBImpl;
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl;
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntriesRequestPBImpl;
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntriesResponsePBImpl;
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryRequestPBImpl;
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryResponsePBImpl;
import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryRequestPBImpl;
Expand Down Expand Up @@ -207,6 +213,22 @@ public RemoveMountTableEntryResponse removeMountTableEntry(
}
}

@Override
public RemoveMountTableEntriesResponse removeMountTableEntries(
RemoveMountTableEntriesRequest request) throws IOException {
RemoveMountTableEntriesRequestPBImpl requestPB =
(RemoveMountTableEntriesRequestPBImpl)request;
RemoveMountTableEntriesRequestProto proto = requestPB.getProto();
try {
RemoveMountTableEntriesResponseProto responseProto =
rpcProxy.removeMountTableEntries(null, proto);
return new RemoveMountTableEntriesResponsePBImpl(responseProto);
} catch (ServiceException e) {

throw new IOException(getRemoteException(e).getMessage());
}
}

@Override
public GetMountTableEntriesResponse getMountTableEntries(
GetMountTableEntriesRequest request) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntriesRequest;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntriesResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
Expand Down Expand Up @@ -76,12 +78,22 @@ UpdateMountTableEntryResponse updateMountTableEntry(
* Remove an entry from the mount table.
*
* @param request Fully populated request object.
* @return True the mount table entry was removed from the data store.
* @return True if the mount table entry was removed from the data store.
* @throws IOException Throws exception if the data store is not initialized.
*/
RemoveMountTableEntryResponse removeMountTableEntry(
RemoveMountTableEntryRequest request) throws IOException;

/**
* Remove multiple entries from the mount table.
*
* @param request Fully populated request object.
* @return True if all paths were removed from the data store.
* @throws IOException if the data store is not initialized.
*/
RemoveMountTableEntriesResponse removeMountTableEntries(RemoveMountTableEntriesRequest request)
throws IOException;

/**
* List all mount table entries present at or below the path. Fetches from the
* state store.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@

import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntriesRequest;
import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntriesResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntriesRequest;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntriesResponse;
import org.apache.hadoop.util.Preconditions;

import org.apache.hadoop.conf.Configuration;
Expand Down Expand Up @@ -515,6 +517,22 @@ public RemoveMountTableEntryResponse removeMountTableEntry(
return getMountTableStore().removeMountTableEntry(request);
}

@Override
public RemoveMountTableEntriesResponse removeMountTableEntries(
RemoveMountTableEntriesRequest request) throws IOException {
// clear sub-cluster's quota definition
for (String path : request.getSrcPaths()) {
try {
synchronizeQuota(path, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET, null);
} catch (Exception e) {
// Ignore exception, if any while reseting quota. Specifically to handle
// if the actual destination doesn't exist.
LOG.warn("Unable to clear quota at the destinations for {}: {}", path, e.getMessage());
}
}
return getMountTableStore().removeMountTableEntries(request);
}

@Override
public GetMountTableEntriesResponse getMountTableEntries(
GetMountTableEntriesRequest request) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,11 @@
import static org.apache.hadoop.hdfs.DFSUtil.isParentEntry;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;

import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
Expand All @@ -44,6 +46,8 @@
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesRequest;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntriesRequest;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntriesResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
Expand All @@ -53,6 +57,7 @@
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.Time;
import static org.apache.hadoop.hdfs.server.federation.router.Quota.eachByStorageType;
import static org.apache.hadoop.hdfs.server.federation.store.StateStoreUtils.filterMultiple;

/**
* Implementation of the {@link MountTableStore} state store API.
Expand Down Expand Up @@ -209,6 +214,56 @@ public RemoveMountTableEntryResponse removeMountTableEntry(
return response;
}

@Override
public RemoveMountTableEntriesResponse removeMountTableEntries(
RemoveMountTableEntriesRequest request) throws IOException {
List<String> failedPaths = new ArrayList<>();
List<MountTable> entriesToRemove = new ArrayList<>();
List<MountTable> allEntries = getDriver().get(getRecordClass()).getRecords();
for (String path : request.getSrcPaths()) {
final MountTable partial = MountTable.newInstance();
partial.setSourcePath(path);
final Query<MountTable> query = new Query<>(partial);
List<MountTable> filtered = filterMultiple(query, allEntries);
MountTable deleteEntry = null;
if (filtered.size() == 1) {
deleteEntry = filtered.get(0);
}

if (deleteEntry != null) {
RouterPermissionChecker pc = RouterAdminServer.getPermissionChecker();
if (pc != null) {
try {
pc.checkPermission(deleteEntry, FsAction.WRITE);
entriesToRemove.add(deleteEntry);
} catch (IOException ioe) {
failedPaths.add(path);
}
}
} else {
failedPaths.add(path);
}
}

boolean anyRemoved = false;
Map<MountTable, Boolean> statuses = getDriver().removeMultiple(entriesToRemove);
for (Map.Entry<MountTable, Boolean> mapEntry : statuses.entrySet()) {
if (!mapEntry.getValue()) {
failedPaths.add(mapEntry.getKey().getSourcePath());
} else {
anyRemoved = true;
}
}

RemoveMountTableEntriesResponse response = RemoveMountTableEntriesResponse.newInstance();
response.setStatus(failedPaths.isEmpty());
response.setFailedRecordsKeys(failedPaths);
if (anyRemoved) {
updateCacheAllRouters();
}
return response;
}

@Override
public GetMountTableEntriesResponse getMountTableEntries(
GetMountTableEntriesRequest request) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.federation.store.protocol;

import java.io.IOException;
import java.util.List;

import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;

/**
* API request for removing multiple mount table paths in the state store.
*/
public abstract class RemoveMountTableEntriesRequest {

public static RemoveMountTableEntriesRequest newInstance() throws IOException {
return StateStoreSerializer.newRecord(RemoveMountTableEntriesRequest.class);
}

public static RemoveMountTableEntriesRequest newInstance(List<String> paths) throws IOException {
RemoveMountTableEntriesRequest request = newInstance();
request.setSrcPaths(paths);
return request;
}

@Public
@Unstable
public abstract List<String> getSrcPaths();

@Public
@Unstable
public abstract void setSrcPaths(List<String> paths);
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.federation.store.protocol;

import java.io.IOException;
import java.util.List;

import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;

/**
* API response for removing multiple mount table paths in the state store.
*/
public abstract class RemoveMountTableEntriesResponse {

public static RemoveMountTableEntriesResponse newInstance() throws IOException {
return StateStoreSerializer.newRecord(RemoveMountTableEntriesResponse.class);
}

@Public
@Unstable
public abstract boolean getStatus();

@Public
@Unstable
public abstract List<String> getFailedRecordsKeys();

@Public
@Unstable
public abstract void setStatus(boolean result);

@Public
@Unstable
public abstract void setFailedRecordsKeys(List<String> failedRecordsKeys);
}
Loading
Loading