From a18ed15486785e7efdfd1ecaa793cead0e3954a2 Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Fri, 16 Jan 2026 00:31:40 +0000 Subject: [PATCH 01/38] WIUP --- .../accumulo/manager/fate/FateManager.java | 144 ++++++++++++++++++ .../accumulo/manager/fate/FateWorker.java | 7 + 2 files changed, 151 insertions(+) create mode 100644 server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java create mode 100644 server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java new file mode 100644 index 00000000000..63cf2cc421f --- /dev/null +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -0,0 +1,144 @@ +package org.apache.accumulo.manager.fate; + +import com.google.common.net.HostAndPort; +import org.apache.accumulo.core.fate.FateId; +import org.apache.hadoop.util.Sets; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class FateManager { + + record FatePartition(FateId start, FateId end) { + + } + + public void managerWorkers() throws InterruptedException { + while(true) { + // TODO make configurable + Thread.sleep(10_000); + + // TODO could support RG... could user ServerId + // This map will contain all current workers even their partitions are empty + Map> currentAssignments = getCurrentAssignments(); + Set desiredParititions = getDesiredPartitions(); + + // TODO handle duplicate current assignments + + Map> desired = computeDesiredAssignments(currentAssignments, desiredParititions); + + // are there any workers with extra partitions? If so need to unload those first. + boolean haveExtra = desired.entrySet().stream().anyMatch(e->{ + HostAndPort worker = e.getKey(); + var curr = currentAssignments.getOrDefault(worker, Set.of()); + var extra = Sets.difference(curr, e.getValue()); + return !extra.isEmpty(); + }); + + if(haveExtra) { + // force unload of extra partitions to make them available for other workers + desired.forEach((worker, paritions)->{ + var curr = currentAssignments.getOrDefault(worker, Set.of()); + if(!curr.equals(paritions)) { + var intersection = Sets.intersection(curr, paritions); + setWorkerPartitions(worker, intersection); + currentAssignments.put(worker, intersection); + } + }); + } + + // Load all partitions on all workers.. + desired.forEach((worker, paritions)->{ + var curr = currentAssignments.getOrDefault(worker, Set.of()); + if(!curr.equals(paritions)){ + setWorkerPartitions(worker, paritions); + } + }); + } + } + + private void setWorkerPartitions(HostAndPort worker, Set partitions) { + // TODO make RPC to get update nonce + // TODO update partitions using nonce + } + + /** + * Compute the desired distribution of partitions across workers. Favors leaving partitions in place if possible. + */ + private Map> computeDesiredAssignments(Map> currentAssignments, Set desiredParititions) { + // min number of partitions a single worker must have + int minPerWorker = currentAssignments.size() / desiredParititions.size(); + // max number of partitions a single worker can have + int maxPerWorker = minPerWorker + Math.min(currentAssignments.size() % desiredParititions.size(), 1); + // number of workers that can have the max partitions + int desiredWorkersWithMax = currentAssignments.size() % desiredParititions.size(); + + Map> desiredAssignments = new HashMap<>(); + Set availablePartitions = new HashSet<>(desiredParititions); + + // remove everything that is assigned + currentAssignments.values().forEach(p->p.forEach(availablePartitions::remove)); + + // Find workers that currently have too many partitions assigned and place their excess in the available set. Let workers keep what they have when its under the limit. + int numWorkersWithMax = 0; + for(var worker : currentAssignments.keySet()) { + var assignments = new HashSet(); + var curr = currentAssignments.getOrDefault(worker, Set.of()); + // The number of partitions this worker can have, anything in excess should be added to available + int canHave = numWorkersWithMax < desiredWorkersWithMax ? maxPerWorker : minPerWorker; + + var iter = curr.iterator(); + for(int i = 0; i= maxPerWorker) { + numWorkersWithMax++; + } + } + + // Distribute available partitions to workers that do not have the minimum. + var availIter = availablePartitions.iterator(); + for(var worker : currentAssignments.keySet()) { + var assignments = desiredAssignments.get(worker); + while(assignments.size() < minPerWorker) { + // This should always have next if the creation of available partitions was done correctly. + assignments.add(availIter.next()); + } + } + + // Distribute available partitions to workers that do not have the max until no more partitions available. + for(var worker : currentAssignments.keySet()) { + var assignments = desiredAssignments.get(worker); + while(assignments.size() < maxPerWorker && availIter.hasNext()){ + assignments.add(availIter.next()); + } + if(!availIter.hasNext()){ + break; + } + } + + return desiredAssignments; + } + + private Set getDesiredPartitions() { + throw new UnsupportedOperationException(); + } + + private Map> getCurrentAssignments() { + throw new UnsupportedOperationException(); + } + + + + // TODO this will not need a main eventually, will be run by the manager + public static void main(String[] args) { + + } +} diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java new file mode 100644 index 00000000000..8914435f8d6 --- /dev/null +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -0,0 +1,7 @@ +package org.apache.accumulo.manager.fate; + +public class FateWorker { + public static void main(String[] args) { + + } +} From c99e61bdcc705ba529b4cd2efb005c35c2ba3927 Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Mon, 26 Jan 2026 21:16:19 +0000 Subject: [PATCH 02/38] WIP --- core/src/main/thrift/fate-worker.thrift | 26 +++++++++++++++++++ .../accumulo/manager/fate/FateManager.java | 9 +++---- 2 files changed, 30 insertions(+), 5 deletions(-) create mode 100644 core/src/main/thrift/fate-worker.thrift diff --git a/core/src/main/thrift/fate-worker.thrift b/core/src/main/thrift/fate-worker.thrift new file mode 100644 index 00000000000..5ed19f4d429 --- /dev/null +++ b/core/src/main/thrift/fate-worker.thrift @@ -0,0 +1,26 @@ +namespace java org.apache.accumulo.core.fate.thrift +namespace cpp org.apache.accumulo.core.fate.thrift + +struct FatePartition { + 1:string start + 2:string end +} + +service FateWorker { + + list getPartitions( + 1:client.TInfo tinfo, + 2:security.TCredentials credentials + ) throws ( + 1:client.ThriftSecurityException sec + ) + + bool setPartitions( + 1:client.TInfo tinfo, + 2:security.TCredentials credentials, + 3:list current, + 4:list desired + ) throws ( + 1:client.ThriftSecurityException sec + ) +} \ No newline at end of file diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index 63cf2cc421f..07e60e1c9e8 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -45,7 +45,7 @@ public void managerWorkers() throws InterruptedException { var curr = currentAssignments.getOrDefault(worker, Set.of()); if(!curr.equals(paritions)) { var intersection = Sets.intersection(curr, paritions); - setWorkerPartitions(worker, intersection); + setWorkerPartitions(worker,curr, intersection); currentAssignments.put(worker, intersection); } }); @@ -55,15 +55,14 @@ public void managerWorkers() throws InterruptedException { desired.forEach((worker, paritions)->{ var curr = currentAssignments.getOrDefault(worker, Set.of()); if(!curr.equals(paritions)){ - setWorkerPartitions(worker, paritions); + setWorkerPartitions(worker, curr, paritions); } }); } } - private void setWorkerPartitions(HostAndPort worker, Set partitions) { - // TODO make RPC to get update nonce - // TODO update partitions using nonce + private void setWorkerPartitions(HostAndPort worker, Set current, Set desired) { + // TODO make a compare and set type RPC that uses the current and desired } /** From 21f9e97fea65ed610be09ad7ae1271fa10c75419 Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Mon, 26 Jan 2026 23:57:42 +0000 Subject: [PATCH 03/38] WIP --- .../FateWorkerServiceThriftClient.java | 31 + .../core/rpc/clients/ThriftClientTypes.java | 4 + core/src/main/scripts/generate-thrift.sh | 2 +- .../core/fate/thrift/FateWorkerService.java | 2804 +++++++++++++++++ .../core/fate/thrift/TFatePartition.java | 511 +++ core/src/main/thrift/fate-worker.thrift | 33 +- .../server/rpc/ThriftProcessorTypes.java | 10 +- .../org/apache/accumulo/manager/Manager.java | 6 +- .../accumulo/manager/fate/FateManager.java | 268 +- .../accumulo/manager/fate/FateWorker.java | 85 +- 10 files changed, 3625 insertions(+), 129 deletions(-) create mode 100644 core/src/main/java/org/apache/accumulo/core/rpc/clients/FateWorkerServiceThriftClient.java create mode 100644 core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/FateWorkerService.java create mode 100644 core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/TFatePartition.java diff --git a/core/src/main/java/org/apache/accumulo/core/rpc/clients/FateWorkerServiceThriftClient.java b/core/src/main/java/org/apache/accumulo/core/rpc/clients/FateWorkerServiceThriftClient.java new file mode 100644 index 00000000000..bb4a757c41f --- /dev/null +++ b/core/src/main/java/org/apache/accumulo/core/rpc/clients/FateWorkerServiceThriftClient.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.accumulo.core.rpc.clients; + +import org.apache.accumulo.core.fate.thrift.FateWorkerService; + +/** + * Client side object that can be used to interact with services that support scan operations + * against tablets. See TabletScanClientService$Iface for a list of supported operations. + */ +public class FateWorkerServiceThriftClient extends ThriftClientTypes { + FateWorkerServiceThriftClient(String serviceName) { + super(serviceName, new FateWorkerService.Client.Factory()); + } +} diff --git a/core/src/main/java/org/apache/accumulo/core/rpc/clients/ThriftClientTypes.java b/core/src/main/java/org/apache/accumulo/core/rpc/clients/ThriftClientTypes.java index 5b9a5c203d1..e8bcac16e33 100644 --- a/core/src/main/java/org/apache/accumulo/core/rpc/clients/ThriftClientTypes.java +++ b/core/src/main/java/org/apache/accumulo/core/rpc/clients/ThriftClientTypes.java @@ -24,6 +24,7 @@ import org.apache.accumulo.core.client.AccumuloException; import org.apache.accumulo.core.client.AccumuloSecurityException; import org.apache.accumulo.core.clientImpl.ClientContext; +import org.apache.accumulo.core.fate.thrift.FateWorkerService; import org.apache.thrift.TException; import org.apache.thrift.TServiceClient; import org.apache.thrift.TServiceClientFactory; @@ -61,6 +62,9 @@ public abstract class ThriftClientTypes { public static final ServerProcessServiceThriftClient SERVER_PROCESS = new ServerProcessServiceThriftClient("process"); + protected static final ThriftClientTypes FATE_WORKER = + new FateWorkerServiceThriftClient("fateworker"); + /** * execute method with supplied client returning object of type R * diff --git a/core/src/main/scripts/generate-thrift.sh b/core/src/main/scripts/generate-thrift.sh index 1f787d46523..397f042dd7c 100755 --- a/core/src/main/scripts/generate-thrift.sh +++ b/core/src/main/scripts/generate-thrift.sh @@ -32,7 +32,7 @@ [[ -z $REQUIRED_THRIFT_VERSION ]] && REQUIRED_THRIFT_VERSION='0.17.0' [[ -z $INCLUDED_MODULES ]] && INCLUDED_MODULES=() [[ -z $BASE_OUTPUT_PACKAGE ]] && BASE_OUTPUT_PACKAGE='org.apache.accumulo.core' -[[ -z $PACKAGES_TO_GENERATE ]] && PACKAGES_TO_GENERATE=(process gc manager tabletserver securityImpl clientImpl dataImpl compaction tabletingest tablet tabletscan) +[[ -z $PACKAGES_TO_GENERATE ]] && PACKAGES_TO_GENERATE=(process gc manager tabletserver securityImpl clientImpl dataImpl compaction fate tabletingest tablet tabletscan) [[ -z $BUILD_DIR ]] && BUILD_DIR='target' [[ -z $LANGUAGES_TO_GENERATE ]] && LANGUAGES_TO_GENERATE=(java) [[ -z $FINAL_DIR ]] && FINAL_DIR='src/main' diff --git a/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/FateWorkerService.java b/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/FateWorkerService.java new file mode 100644 index 00000000000..eb04b865cbb --- /dev/null +++ b/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/FateWorkerService.java @@ -0,0 +1,2804 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/** + * Autogenerated by Thrift Compiler (0.17.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.accumulo.core.fate.thrift; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) +public class FateWorkerService { + + public interface Iface { + + public java.util.List getPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException; + + public boolean setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List current, java.util.List desired) throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException; + + } + + public interface AsyncIface { + + public void getPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException; + + public void setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List current, java.util.List desired, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + } + + public static class Client extends org.apache.thrift.TServiceClient implements Iface { + public static class Factory implements org.apache.thrift.TServiceClientFactory { + public Factory() {} + @Override + public Client getClient(org.apache.thrift.protocol.TProtocol prot) { + return new Client(prot); + } + @Override + public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) { + return new Client(iprot, oprot); + } + } + + public Client(org.apache.thrift.protocol.TProtocol prot) + { + super(prot, prot); + } + + public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) { + super(iprot, oprot); + } + + @Override + public java.util.List getPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException + { + send_getPartitions(tinfo, credentials); + return recv_getPartitions(); + } + + public void send_getPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) throws org.apache.thrift.TException + { + getPartitions_args args = new getPartitions_args(); + args.setTinfo(tinfo); + args.setCredentials(credentials); + sendBase("getPartitions", args); + } + + public java.util.List recv_getPartitions() throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException + { + getPartitions_result result = new getPartitions_result(); + receiveBase(result, "getPartitions"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.sec != null) { + throw result.sec; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getPartitions failed: unknown result"); + } + + @Override + public boolean setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List current, java.util.List desired) throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException + { + send_setPartitions(tinfo, credentials, current, desired); + return recv_setPartitions(); + } + + public void send_setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List current, java.util.List desired) throws org.apache.thrift.TException + { + setPartitions_args args = new setPartitions_args(); + args.setTinfo(tinfo); + args.setCredentials(credentials); + args.setCurrent(current); + args.setDesired(desired); + sendBase("setPartitions", args); + } + + public boolean recv_setPartitions() throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException + { + setPartitions_result result = new setPartitions_result(); + receiveBase(result, "setPartitions"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.sec != null) { + throw result.sec; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "setPartitions failed: unknown result"); + } + + } + public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface { + public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { + private org.apache.thrift.async.TAsyncClientManager clientManager; + private org.apache.thrift.protocol.TProtocolFactory protocolFactory; + public Factory(org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.protocol.TProtocolFactory protocolFactory) { + this.clientManager = clientManager; + this.protocolFactory = protocolFactory; + } + @Override + public AsyncClient getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) { + return new AsyncClient(protocolFactory, clientManager, transport); + } + } + + public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.transport.TNonblockingTransport transport) { + super(protocolFactory, clientManager, transport); + } + + @Override + public void getPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + checkReady(); + getPartitions_call method_call = new getPartitions_call(tinfo, credentials, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class getPartitions_call extends org.apache.thrift.async.TAsyncMethodCall> { + private org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo; + private org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials; + public getPartitions_call(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.tinfo = tinfo; + this.credentials = credentials; + } + + @Override + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getPartitions", org.apache.thrift.protocol.TMessageType.CALL, 0)); + getPartitions_args args = new getPartitions_args(); + args.setTinfo(tinfo); + args.setCredentials(credentials); + args.write(prot); + prot.writeMessageEnd(); + } + + @Override + public java.util.List getResult() throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new java.lang.IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_getPartitions(); + } + } + + @Override + public void setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List current, java.util.List desired, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + setPartitions_call method_call = new setPartitions_call(tinfo, credentials, current, desired, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class setPartitions_call extends org.apache.thrift.async.TAsyncMethodCall { + private org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo; + private org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials; + private java.util.List current; + private java.util.List desired; + public setPartitions_call(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List current, java.util.List desired, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.tinfo = tinfo; + this.credentials = credentials; + this.current = current; + this.desired = desired; + } + + @Override + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("setPartitions", org.apache.thrift.protocol.TMessageType.CALL, 0)); + setPartitions_args args = new setPartitions_args(); + args.setTinfo(tinfo); + args.setCredentials(credentials); + args.setCurrent(current); + args.setDesired(desired); + args.write(prot); + prot.writeMessageEnd(); + } + + @Override + public java.lang.Boolean getResult() throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new java.lang.IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_setPartitions(); + } + } + + } + + public static class Processor extends org.apache.thrift.TBaseProcessor implements org.apache.thrift.TProcessor { + private static final org.slf4j.Logger _LOGGER = org.slf4j.LoggerFactory.getLogger(Processor.class.getName()); + public Processor(I iface) { + super(iface, getProcessMap(new java.util.HashMap>())); + } + + protected Processor(I iface, java.util.Map> processMap) { + super(iface, getProcessMap(processMap)); + } + + private static java.util.Map> getProcessMap(java.util.Map> processMap) { + processMap.put("getPartitions", new getPartitions()); + processMap.put("setPartitions", new setPartitions()); + return processMap; + } + + public static class getPartitions extends org.apache.thrift.ProcessFunction { + public getPartitions() { + super("getPartitions"); + } + + @Override + public getPartitions_args getEmptyArgsInstance() { + return new getPartitions_args(); + } + + @Override + protected boolean isOneway() { + return false; + } + + @Override + protected boolean rethrowUnhandledExceptions() { + return false; + } + + @Override + public getPartitions_result getResult(I iface, getPartitions_args args) throws org.apache.thrift.TException { + getPartitions_result result = new getPartitions_result(); + try { + result.success = iface.getPartitions(args.tinfo, args.credentials); + } catch (org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException sec) { + result.sec = sec; + } + return result; + } + } + + public static class setPartitions extends org.apache.thrift.ProcessFunction { + public setPartitions() { + super("setPartitions"); + } + + @Override + public setPartitions_args getEmptyArgsInstance() { + return new setPartitions_args(); + } + + @Override + protected boolean isOneway() { + return false; + } + + @Override + protected boolean rethrowUnhandledExceptions() { + return false; + } + + @Override + public setPartitions_result getResult(I iface, setPartitions_args args) throws org.apache.thrift.TException { + setPartitions_result result = new setPartitions_result(); + try { + result.success = iface.setPartitions(args.tinfo, args.credentials, args.current, args.desired); + result.setSuccessIsSet(true); + } catch (org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException sec) { + result.sec = sec; + } + return result; + } + } + + } + + public static class AsyncProcessor extends org.apache.thrift.TBaseAsyncProcessor { + private static final org.slf4j.Logger _LOGGER = org.slf4j.LoggerFactory.getLogger(AsyncProcessor.class.getName()); + public AsyncProcessor(I iface) { + super(iface, getProcessMap(new java.util.HashMap>())); + } + + protected AsyncProcessor(I iface, java.util.Map> processMap) { + super(iface, getProcessMap(processMap)); + } + + private static java.util.Map> getProcessMap(java.util.Map> processMap) { + processMap.put("getPartitions", new getPartitions()); + processMap.put("setPartitions", new setPartitions()); + return processMap; + } + + public static class getPartitions extends org.apache.thrift.AsyncProcessFunction> { + public getPartitions() { + super("getPartitions"); + } + + @Override + public getPartitions_args getEmptyArgsInstance() { + return new getPartitions_args(); + } + + @Override + public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new org.apache.thrift.async.AsyncMethodCallback>() { + @Override + public void onComplete(java.util.List o) { + getPartitions_result result = new getPartitions_result(); + result.success = o; + try { + fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + } catch (org.apache.thrift.transport.TTransportException e) { + _LOGGER.error("TTransportException writing to internal frame buffer", e); + fb.close(); + } catch (java.lang.Exception e) { + _LOGGER.error("Exception writing to internal frame buffer", e); + onError(e); + } + } + @Override + public void onError(java.lang.Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TSerializable msg; + getPartitions_result result = new getPartitions_result(); + if (e instanceof org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException) { + result.sec = (org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException) e; + result.setSecIsSet(true); + msg = result; + } else if (e instanceof org.apache.thrift.transport.TTransportException) { + _LOGGER.error("TTransportException inside handler", e); + fb.close(); + return; + } else if (e instanceof org.apache.thrift.TApplicationException) { + _LOGGER.error("TApplicationException inside handler", e); + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TApplicationException)e; + } else { + _LOGGER.error("Exception inside handler", e); + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + } catch (java.lang.Exception ex) { + _LOGGER.error("Exception writing to internal frame buffer", ex); + fb.close(); + } + } + }; + } + + @Override + protected boolean isOneway() { + return false; + } + + @Override + public void start(I iface, getPartitions_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + iface.getPartitions(args.tinfo, args.credentials,resultHandler); + } + } + + public static class setPartitions extends org.apache.thrift.AsyncProcessFunction { + public setPartitions() { + super("setPartitions"); + } + + @Override + public setPartitions_args getEmptyArgsInstance() { + return new setPartitions_args(); + } + + @Override + public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new org.apache.thrift.async.AsyncMethodCallback() { + @Override + public void onComplete(java.lang.Boolean o) { + setPartitions_result result = new setPartitions_result(); + result.success = o; + result.setSuccessIsSet(true); + try { + fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + } catch (org.apache.thrift.transport.TTransportException e) { + _LOGGER.error("TTransportException writing to internal frame buffer", e); + fb.close(); + } catch (java.lang.Exception e) { + _LOGGER.error("Exception writing to internal frame buffer", e); + onError(e); + } + } + @Override + public void onError(java.lang.Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TSerializable msg; + setPartitions_result result = new setPartitions_result(); + if (e instanceof org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException) { + result.sec = (org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException) e; + result.setSecIsSet(true); + msg = result; + } else if (e instanceof org.apache.thrift.transport.TTransportException) { + _LOGGER.error("TTransportException inside handler", e); + fb.close(); + return; + } else if (e instanceof org.apache.thrift.TApplicationException) { + _LOGGER.error("TApplicationException inside handler", e); + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TApplicationException)e; + } else { + _LOGGER.error("Exception inside handler", e); + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + } catch (java.lang.Exception ex) { + _LOGGER.error("Exception writing to internal frame buffer", ex); + fb.close(); + } + } + }; + } + + @Override + protected boolean isOneway() { + return false; + } + + @Override + public void start(I iface, setPartitions_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.setPartitions(args.tinfo, args.credentials, args.current, args.desired,resultHandler); + } + } + + } + + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) + public static class getPartitions_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getPartitions_args"); + + private static final org.apache.thrift.protocol.TField TINFO_FIELD_DESC = new org.apache.thrift.protocol.TField("tinfo", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField CREDENTIALS_FIELD_DESC = new org.apache.thrift.protocol.TField("credentials", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getPartitions_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getPartitions_argsTupleSchemeFactory(); + + public @org.apache.thrift.annotation.Nullable org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo; // required + public @org.apache.thrift.annotation.Nullable org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TINFO((short)1, "tinfo"), + CREDENTIALS((short)2, "credentials"); + + private static final java.util.Map byName = new java.util.HashMap(); + + static { + for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TINFO + return TINFO; + case 2: // CREDENTIALS + return CREDENTIALS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByName(java.lang.String name) { + return byName.get(name); + } + + private final short _thriftId; + private final java.lang.String _fieldName; + + _Fields(short thriftId, java.lang.String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + @Override + public short getThriftFieldId() { + return _thriftId; + } + + @Override + public java.lang.String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TINFO, new org.apache.thrift.meta_data.FieldMetaData("tinfo", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.core.clientImpl.thrift.TInfo.class))); + tmpMap.put(_Fields.CREDENTIALS, new org.apache.thrift.meta_data.FieldMetaData("credentials", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.core.securityImpl.thrift.TCredentials.class))); + metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getPartitions_args.class, metaDataMap); + } + + public getPartitions_args() { + } + + public getPartitions_args( + org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, + org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) + { + this(); + this.tinfo = tinfo; + this.credentials = credentials; + } + + /** + * Performs a deep copy on other. + */ + public getPartitions_args(getPartitions_args other) { + if (other.isSetTinfo()) { + this.tinfo = new org.apache.accumulo.core.clientImpl.thrift.TInfo(other.tinfo); + } + if (other.isSetCredentials()) { + this.credentials = new org.apache.accumulo.core.securityImpl.thrift.TCredentials(other.credentials); + } + } + + @Override + public getPartitions_args deepCopy() { + return new getPartitions_args(this); + } + + @Override + public void clear() { + this.tinfo = null; + this.credentials = null; + } + + @org.apache.thrift.annotation.Nullable + public org.apache.accumulo.core.clientImpl.thrift.TInfo getTinfo() { + return this.tinfo; + } + + public getPartitions_args setTinfo(@org.apache.thrift.annotation.Nullable org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo) { + this.tinfo = tinfo; + return this; + } + + public void unsetTinfo() { + this.tinfo = null; + } + + /** Returns true if field tinfo is set (has been assigned a value) and false otherwise */ + public boolean isSetTinfo() { + return this.tinfo != null; + } + + public void setTinfoIsSet(boolean value) { + if (!value) { + this.tinfo = null; + } + } + + @org.apache.thrift.annotation.Nullable + public org.apache.accumulo.core.securityImpl.thrift.TCredentials getCredentials() { + return this.credentials; + } + + public getPartitions_args setCredentials(@org.apache.thrift.annotation.Nullable org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) { + this.credentials = credentials; + return this; + } + + public void unsetCredentials() { + this.credentials = null; + } + + /** Returns true if field credentials is set (has been assigned a value) and false otherwise */ + public boolean isSetCredentials() { + return this.credentials != null; + } + + public void setCredentialsIsSet(boolean value) { + if (!value) { + this.credentials = null; + } + } + + @Override + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { + switch (field) { + case TINFO: + if (value == null) { + unsetTinfo(); + } else { + setTinfo((org.apache.accumulo.core.clientImpl.thrift.TInfo)value); + } + break; + + case CREDENTIALS: + if (value == null) { + unsetCredentials(); + } else { + setCredentials((org.apache.accumulo.core.securityImpl.thrift.TCredentials)value); + } + break; + + } + } + + @org.apache.thrift.annotation.Nullable + @Override + public java.lang.Object getFieldValue(_Fields field) { + switch (field) { + case TINFO: + return getTinfo(); + + case CREDENTIALS: + return getCredentials(); + + } + throw new java.lang.IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + @Override + public boolean isSet(_Fields field) { + if (field == null) { + throw new java.lang.IllegalArgumentException(); + } + + switch (field) { + case TINFO: + return isSetTinfo(); + case CREDENTIALS: + return isSetCredentials(); + } + throw new java.lang.IllegalStateException(); + } + + @Override + public boolean equals(java.lang.Object that) { + if (that instanceof getPartitions_args) + return this.equals((getPartitions_args)that); + return false; + } + + public boolean equals(getPartitions_args that) { + if (that == null) + return false; + if (this == that) + return true; + + boolean this_present_tinfo = true && this.isSetTinfo(); + boolean that_present_tinfo = true && that.isSetTinfo(); + if (this_present_tinfo || that_present_tinfo) { + if (!(this_present_tinfo && that_present_tinfo)) + return false; + if (!this.tinfo.equals(that.tinfo)) + return false; + } + + boolean this_present_credentials = true && this.isSetCredentials(); + boolean that_present_credentials = true && that.isSetCredentials(); + if (this_present_credentials || that_present_credentials) { + if (!(this_present_credentials && that_present_credentials)) + return false; + if (!this.credentials.equals(that.credentials)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + int hashCode = 1; + + hashCode = hashCode * 8191 + ((isSetTinfo()) ? 131071 : 524287); + if (isSetTinfo()) + hashCode = hashCode * 8191 + tinfo.hashCode(); + + hashCode = hashCode * 8191 + ((isSetCredentials()) ? 131071 : 524287); + if (isSetCredentials()) + hashCode = hashCode * 8191 + credentials.hashCode(); + + return hashCode; + } + + @Override + public int compareTo(getPartitions_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = java.lang.Boolean.compare(isSetTinfo(), other.isSetTinfo()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTinfo()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tinfo, other.tinfo); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = java.lang.Boolean.compare(isSetCredentials(), other.isSetCredentials()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCredentials()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.credentials, other.credentials); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + @org.apache.thrift.annotation.Nullable + @Override + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + scheme(iprot).read(iprot, this); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + scheme(oprot).write(oprot, this); + } + + @Override + public java.lang.String toString() { + java.lang.StringBuilder sb = new java.lang.StringBuilder("getPartitions_args("); + boolean first = true; + + sb.append("tinfo:"); + if (this.tinfo == null) { + sb.append("null"); + } else { + sb.append(this.tinfo); + } + first = false; + if (!first) sb.append(", "); + sb.append("credentials:"); + if (this.credentials == null) { + sb.append("null"); + } else { + sb.append(this.credentials); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (tinfo != null) { + tinfo.validate(); + } + if (credentials != null) { + credentials.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class getPartitions_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public getPartitions_argsStandardScheme getScheme() { + return new getPartitions_argsStandardScheme(); + } + } + + private static class getPartitions_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot, getPartitions_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TINFO + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.tinfo = new org.apache.accumulo.core.clientImpl.thrift.TInfo(); + struct.tinfo.read(iprot); + struct.setTinfoIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // CREDENTIALS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.credentials = new org.apache.accumulo.core.securityImpl.thrift.TCredentials(); + struct.credentials.read(iprot); + struct.setCredentialsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot, getPartitions_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.tinfo != null) { + oprot.writeFieldBegin(TINFO_FIELD_DESC); + struct.tinfo.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.credentials != null) { + oprot.writeFieldBegin(CREDENTIALS_FIELD_DESC); + struct.credentials.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class getPartitions_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public getPartitions_argsTupleScheme getScheme() { + return new getPartitions_argsTupleScheme(); + } + } + + private static class getPartitions_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, getPartitions_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet optionals = new java.util.BitSet(); + if (struct.isSetTinfo()) { + optionals.set(0); + } + if (struct.isSetCredentials()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetTinfo()) { + struct.tinfo.write(oprot); + } + if (struct.isSetCredentials()) { + struct.credentials.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, getPartitions_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.tinfo = new org.apache.accumulo.core.clientImpl.thrift.TInfo(); + struct.tinfo.read(iprot); + struct.setTinfoIsSet(true); + } + if (incoming.get(1)) { + struct.credentials = new org.apache.accumulo.core.securityImpl.thrift.TCredentials(); + struct.credentials.read(iprot); + struct.setCredentialsIsSet(true); + } + } + } + + private static S scheme(org.apache.thrift.protocol.TProtocol proto) { + return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); + } + } + + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) + public static class getPartitions_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getPartitions_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); + private static final org.apache.thrift.protocol.TField SEC_FIELD_DESC = new org.apache.thrift.protocol.TField("sec", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getPartitions_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getPartitions_resultTupleSchemeFactory(); + + public @org.apache.thrift.annotation.Nullable java.util.List success; // required + public @org.apache.thrift.annotation.Nullable org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException sec; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + SEC((short)1, "sec"); + + private static final java.util.Map byName = new java.util.HashMap(); + + static { + for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // SEC + return SEC; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByName(java.lang.String name) { + return byName.get(name); + } + + private final short _thriftId; + private final java.lang.String _fieldName; + + _Fields(short thriftId, java.lang.String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + @Override + public short getThriftFieldId() { + return _thriftId; + } + + @Override + public java.lang.String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TFatePartition.class)))); + tmpMap.put(_Fields.SEC, new org.apache.thrift.meta_data.FieldMetaData("sec", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException.class))); + metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getPartitions_result.class, metaDataMap); + } + + public getPartitions_result() { + } + + public getPartitions_result( + java.util.List success, + org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException sec) + { + this(); + this.success = success; + this.sec = sec; + } + + /** + * Performs a deep copy on other. + */ + public getPartitions_result(getPartitions_result other) { + if (other.isSetSuccess()) { + java.util.List __this__success = new java.util.ArrayList(other.success.size()); + for (TFatePartition other_element : other.success) { + __this__success.add(new TFatePartition(other_element)); + } + this.success = __this__success; + } + if (other.isSetSec()) { + this.sec = new org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException(other.sec); + } + } + + @Override + public getPartitions_result deepCopy() { + return new getPartitions_result(this); + } + + @Override + public void clear() { + this.success = null; + this.sec = null; + } + + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + @org.apache.thrift.annotation.Nullable + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(TFatePartition elem) { + if (this.success == null) { + this.success = new java.util.ArrayList(); + } + this.success.add(elem); + } + + @org.apache.thrift.annotation.Nullable + public java.util.List getSuccess() { + return this.success; + } + + public getPartitions_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) { + this.success = success; + return this; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + @org.apache.thrift.annotation.Nullable + public org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException getSec() { + return this.sec; + } + + public getPartitions_result setSec(@org.apache.thrift.annotation.Nullable org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException sec) { + this.sec = sec; + return this; + } + + public void unsetSec() { + this.sec = null; + } + + /** Returns true if field sec is set (has been assigned a value) and false otherwise */ + public boolean isSetSec() { + return this.sec != null; + } + + public void setSecIsSet(boolean value) { + if (!value) { + this.sec = null; + } + } + + @Override + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((java.util.List)value); + } + break; + + case SEC: + if (value == null) { + unsetSec(); + } else { + setSec((org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException)value); + } + break; + + } + } + + @org.apache.thrift.annotation.Nullable + @Override + public java.lang.Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case SEC: + return getSec(); + + } + throw new java.lang.IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + @Override + public boolean isSet(_Fields field) { + if (field == null) { + throw new java.lang.IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case SEC: + return isSetSec(); + } + throw new java.lang.IllegalStateException(); + } + + @Override + public boolean equals(java.lang.Object that) { + if (that instanceof getPartitions_result) + return this.equals((getPartitions_result)that); + return false; + } + + public boolean equals(getPartitions_result that) { + if (that == null) + return false; + if (this == that) + return true; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_sec = true && this.isSetSec(); + boolean that_present_sec = true && that.isSetSec(); + if (this_present_sec || that_present_sec) { + if (!(this_present_sec && that_present_sec)) + return false; + if (!this.sec.equals(that.sec)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + int hashCode = 1; + + hashCode = hashCode * 8191 + ((isSetSuccess()) ? 131071 : 524287); + if (isSetSuccess()) + hashCode = hashCode * 8191 + success.hashCode(); + + hashCode = hashCode * 8191 + ((isSetSec()) ? 131071 : 524287); + if (isSetSec()) + hashCode = hashCode * 8191 + sec.hashCode(); + + return hashCode; + } + + @Override + public int compareTo(getPartitions_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = java.lang.Boolean.compare(isSetSuccess(), other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = java.lang.Boolean.compare(isSetSec(), other.isSetSec()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSec()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sec, other.sec); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + @org.apache.thrift.annotation.Nullable + @Override + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + scheme(iprot).read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + scheme(oprot).write(oprot, this); + } + + @Override + public java.lang.String toString() { + java.lang.StringBuilder sb = new java.lang.StringBuilder("getPartitions_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("sec:"); + if (this.sec == null) { + sb.append("null"); + } else { + sb.append(this.sec); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class getPartitions_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public getPartitions_resultStandardScheme getScheme() { + return new getPartitions_resultStandardScheme(); + } + } + + private static class getPartitions_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot, getPartitions_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list0 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list0.size); + @org.apache.thrift.annotation.Nullable TFatePartition _elem1; + for (int _i2 = 0; _i2 < _list0.size; ++_i2) + { + _elem1 = new TFatePartition(); + _elem1.read(iprot); + struct.success.add(_elem1); + } + iprot.readListEnd(); + } + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // SEC + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.sec = new org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException(); + struct.sec.read(iprot); + struct.setSecIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot, getPartitions_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); + for (TFatePartition _iter3 : struct.success) + { + _iter3.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.sec != null) { + oprot.writeFieldBegin(SEC_FIELD_DESC); + struct.sec.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class getPartitions_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public getPartitions_resultTupleScheme getScheme() { + return new getPartitions_resultTupleScheme(); + } + } + + private static class getPartitions_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, getPartitions_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet optionals = new java.util.BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetSec()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + { + oprot.writeI32(struct.success.size()); + for (TFatePartition _iter4 : struct.success) + { + _iter4.write(oprot); + } + } + } + if (struct.isSetSec()) { + struct.sec.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, getPartitions_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TList _list5 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list5.size); + @org.apache.thrift.annotation.Nullable TFatePartition _elem6; + for (int _i7 = 0; _i7 < _list5.size; ++_i7) + { + _elem6 = new TFatePartition(); + _elem6.read(iprot); + struct.success.add(_elem6); + } + } + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.sec = new org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException(); + struct.sec.read(iprot); + struct.setSecIsSet(true); + } + } + } + + private static S scheme(org.apache.thrift.protocol.TProtocol proto) { + return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); + } + } + + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) + public static class setPartitions_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("setPartitions_args"); + + private static final org.apache.thrift.protocol.TField TINFO_FIELD_DESC = new org.apache.thrift.protocol.TField("tinfo", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField CREDENTIALS_FIELD_DESC = new org.apache.thrift.protocol.TField("credentials", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField CURRENT_FIELD_DESC = new org.apache.thrift.protocol.TField("current", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField DESIRED_FIELD_DESC = new org.apache.thrift.protocol.TField("desired", org.apache.thrift.protocol.TType.LIST, (short)4); + + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new setPartitions_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new setPartitions_argsTupleSchemeFactory(); + + public @org.apache.thrift.annotation.Nullable org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo; // required + public @org.apache.thrift.annotation.Nullable org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials; // required + public @org.apache.thrift.annotation.Nullable java.util.List current; // required + public @org.apache.thrift.annotation.Nullable java.util.List desired; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TINFO((short)1, "tinfo"), + CREDENTIALS((short)2, "credentials"), + CURRENT((short)3, "current"), + DESIRED((short)4, "desired"); + + private static final java.util.Map byName = new java.util.HashMap(); + + static { + for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TINFO + return TINFO; + case 2: // CREDENTIALS + return CREDENTIALS; + case 3: // CURRENT + return CURRENT; + case 4: // DESIRED + return DESIRED; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByName(java.lang.String name) { + return byName.get(name); + } + + private final short _thriftId; + private final java.lang.String _fieldName; + + _Fields(short thriftId, java.lang.String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + @Override + public short getThriftFieldId() { + return _thriftId; + } + + @Override + public java.lang.String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TINFO, new org.apache.thrift.meta_data.FieldMetaData("tinfo", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.core.clientImpl.thrift.TInfo.class))); + tmpMap.put(_Fields.CREDENTIALS, new org.apache.thrift.meta_data.FieldMetaData("credentials", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.core.securityImpl.thrift.TCredentials.class))); + tmpMap.put(_Fields.CURRENT, new org.apache.thrift.meta_data.FieldMetaData("current", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TFatePartition.class)))); + tmpMap.put(_Fields.DESIRED, new org.apache.thrift.meta_data.FieldMetaData("desired", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TFatePartition.class)))); + metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(setPartitions_args.class, metaDataMap); + } + + public setPartitions_args() { + } + + public setPartitions_args( + org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, + org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, + java.util.List current, + java.util.List desired) + { + this(); + this.tinfo = tinfo; + this.credentials = credentials; + this.current = current; + this.desired = desired; + } + + /** + * Performs a deep copy on other. + */ + public setPartitions_args(setPartitions_args other) { + if (other.isSetTinfo()) { + this.tinfo = new org.apache.accumulo.core.clientImpl.thrift.TInfo(other.tinfo); + } + if (other.isSetCredentials()) { + this.credentials = new org.apache.accumulo.core.securityImpl.thrift.TCredentials(other.credentials); + } + if (other.isSetCurrent()) { + java.util.List __this__current = new java.util.ArrayList(other.current.size()); + for (TFatePartition other_element : other.current) { + __this__current.add(new TFatePartition(other_element)); + } + this.current = __this__current; + } + if (other.isSetDesired()) { + java.util.List __this__desired = new java.util.ArrayList(other.desired.size()); + for (TFatePartition other_element : other.desired) { + __this__desired.add(new TFatePartition(other_element)); + } + this.desired = __this__desired; + } + } + + @Override + public setPartitions_args deepCopy() { + return new setPartitions_args(this); + } + + @Override + public void clear() { + this.tinfo = null; + this.credentials = null; + this.current = null; + this.desired = null; + } + + @org.apache.thrift.annotation.Nullable + public org.apache.accumulo.core.clientImpl.thrift.TInfo getTinfo() { + return this.tinfo; + } + + public setPartitions_args setTinfo(@org.apache.thrift.annotation.Nullable org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo) { + this.tinfo = tinfo; + return this; + } + + public void unsetTinfo() { + this.tinfo = null; + } + + /** Returns true if field tinfo is set (has been assigned a value) and false otherwise */ + public boolean isSetTinfo() { + return this.tinfo != null; + } + + public void setTinfoIsSet(boolean value) { + if (!value) { + this.tinfo = null; + } + } + + @org.apache.thrift.annotation.Nullable + public org.apache.accumulo.core.securityImpl.thrift.TCredentials getCredentials() { + return this.credentials; + } + + public setPartitions_args setCredentials(@org.apache.thrift.annotation.Nullable org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) { + this.credentials = credentials; + return this; + } + + public void unsetCredentials() { + this.credentials = null; + } + + /** Returns true if field credentials is set (has been assigned a value) and false otherwise */ + public boolean isSetCredentials() { + return this.credentials != null; + } + + public void setCredentialsIsSet(boolean value) { + if (!value) { + this.credentials = null; + } + } + + public int getCurrentSize() { + return (this.current == null) ? 0 : this.current.size(); + } + + @org.apache.thrift.annotation.Nullable + public java.util.Iterator getCurrentIterator() { + return (this.current == null) ? null : this.current.iterator(); + } + + public void addToCurrent(TFatePartition elem) { + if (this.current == null) { + this.current = new java.util.ArrayList(); + } + this.current.add(elem); + } + + @org.apache.thrift.annotation.Nullable + public java.util.List getCurrent() { + return this.current; + } + + public setPartitions_args setCurrent(@org.apache.thrift.annotation.Nullable java.util.List current) { + this.current = current; + return this; + } + + public void unsetCurrent() { + this.current = null; + } + + /** Returns true if field current is set (has been assigned a value) and false otherwise */ + public boolean isSetCurrent() { + return this.current != null; + } + + public void setCurrentIsSet(boolean value) { + if (!value) { + this.current = null; + } + } + + public int getDesiredSize() { + return (this.desired == null) ? 0 : this.desired.size(); + } + + @org.apache.thrift.annotation.Nullable + public java.util.Iterator getDesiredIterator() { + return (this.desired == null) ? null : this.desired.iterator(); + } + + public void addToDesired(TFatePartition elem) { + if (this.desired == null) { + this.desired = new java.util.ArrayList(); + } + this.desired.add(elem); + } + + @org.apache.thrift.annotation.Nullable + public java.util.List getDesired() { + return this.desired; + } + + public setPartitions_args setDesired(@org.apache.thrift.annotation.Nullable java.util.List desired) { + this.desired = desired; + return this; + } + + public void unsetDesired() { + this.desired = null; + } + + /** Returns true if field desired is set (has been assigned a value) and false otherwise */ + public boolean isSetDesired() { + return this.desired != null; + } + + public void setDesiredIsSet(boolean value) { + if (!value) { + this.desired = null; + } + } + + @Override + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { + switch (field) { + case TINFO: + if (value == null) { + unsetTinfo(); + } else { + setTinfo((org.apache.accumulo.core.clientImpl.thrift.TInfo)value); + } + break; + + case CREDENTIALS: + if (value == null) { + unsetCredentials(); + } else { + setCredentials((org.apache.accumulo.core.securityImpl.thrift.TCredentials)value); + } + break; + + case CURRENT: + if (value == null) { + unsetCurrent(); + } else { + setCurrent((java.util.List)value); + } + break; + + case DESIRED: + if (value == null) { + unsetDesired(); + } else { + setDesired((java.util.List)value); + } + break; + + } + } + + @org.apache.thrift.annotation.Nullable + @Override + public java.lang.Object getFieldValue(_Fields field) { + switch (field) { + case TINFO: + return getTinfo(); + + case CREDENTIALS: + return getCredentials(); + + case CURRENT: + return getCurrent(); + + case DESIRED: + return getDesired(); + + } + throw new java.lang.IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + @Override + public boolean isSet(_Fields field) { + if (field == null) { + throw new java.lang.IllegalArgumentException(); + } + + switch (field) { + case TINFO: + return isSetTinfo(); + case CREDENTIALS: + return isSetCredentials(); + case CURRENT: + return isSetCurrent(); + case DESIRED: + return isSetDesired(); + } + throw new java.lang.IllegalStateException(); + } + + @Override + public boolean equals(java.lang.Object that) { + if (that instanceof setPartitions_args) + return this.equals((setPartitions_args)that); + return false; + } + + public boolean equals(setPartitions_args that) { + if (that == null) + return false; + if (this == that) + return true; + + boolean this_present_tinfo = true && this.isSetTinfo(); + boolean that_present_tinfo = true && that.isSetTinfo(); + if (this_present_tinfo || that_present_tinfo) { + if (!(this_present_tinfo && that_present_tinfo)) + return false; + if (!this.tinfo.equals(that.tinfo)) + return false; + } + + boolean this_present_credentials = true && this.isSetCredentials(); + boolean that_present_credentials = true && that.isSetCredentials(); + if (this_present_credentials || that_present_credentials) { + if (!(this_present_credentials && that_present_credentials)) + return false; + if (!this.credentials.equals(that.credentials)) + return false; + } + + boolean this_present_current = true && this.isSetCurrent(); + boolean that_present_current = true && that.isSetCurrent(); + if (this_present_current || that_present_current) { + if (!(this_present_current && that_present_current)) + return false; + if (!this.current.equals(that.current)) + return false; + } + + boolean this_present_desired = true && this.isSetDesired(); + boolean that_present_desired = true && that.isSetDesired(); + if (this_present_desired || that_present_desired) { + if (!(this_present_desired && that_present_desired)) + return false; + if (!this.desired.equals(that.desired)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + int hashCode = 1; + + hashCode = hashCode * 8191 + ((isSetTinfo()) ? 131071 : 524287); + if (isSetTinfo()) + hashCode = hashCode * 8191 + tinfo.hashCode(); + + hashCode = hashCode * 8191 + ((isSetCredentials()) ? 131071 : 524287); + if (isSetCredentials()) + hashCode = hashCode * 8191 + credentials.hashCode(); + + hashCode = hashCode * 8191 + ((isSetCurrent()) ? 131071 : 524287); + if (isSetCurrent()) + hashCode = hashCode * 8191 + current.hashCode(); + + hashCode = hashCode * 8191 + ((isSetDesired()) ? 131071 : 524287); + if (isSetDesired()) + hashCode = hashCode * 8191 + desired.hashCode(); + + return hashCode; + } + + @Override + public int compareTo(setPartitions_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = java.lang.Boolean.compare(isSetTinfo(), other.isSetTinfo()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTinfo()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tinfo, other.tinfo); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = java.lang.Boolean.compare(isSetCredentials(), other.isSetCredentials()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCredentials()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.credentials, other.credentials); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = java.lang.Boolean.compare(isSetCurrent(), other.isSetCurrent()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCurrent()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.current, other.current); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = java.lang.Boolean.compare(isSetDesired(), other.isSetDesired()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDesired()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.desired, other.desired); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + @org.apache.thrift.annotation.Nullable + @Override + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + scheme(iprot).read(iprot, this); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + scheme(oprot).write(oprot, this); + } + + @Override + public java.lang.String toString() { + java.lang.StringBuilder sb = new java.lang.StringBuilder("setPartitions_args("); + boolean first = true; + + sb.append("tinfo:"); + if (this.tinfo == null) { + sb.append("null"); + } else { + sb.append(this.tinfo); + } + first = false; + if (!first) sb.append(", "); + sb.append("credentials:"); + if (this.credentials == null) { + sb.append("null"); + } else { + sb.append(this.credentials); + } + first = false; + if (!first) sb.append(", "); + sb.append("current:"); + if (this.current == null) { + sb.append("null"); + } else { + sb.append(this.current); + } + first = false; + if (!first) sb.append(", "); + sb.append("desired:"); + if (this.desired == null) { + sb.append("null"); + } else { + sb.append(this.desired); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (tinfo != null) { + tinfo.validate(); + } + if (credentials != null) { + credentials.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class setPartitions_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public setPartitions_argsStandardScheme getScheme() { + return new setPartitions_argsStandardScheme(); + } + } + + private static class setPartitions_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot, setPartitions_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TINFO + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.tinfo = new org.apache.accumulo.core.clientImpl.thrift.TInfo(); + struct.tinfo.read(iprot); + struct.setTinfoIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // CREDENTIALS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.credentials = new org.apache.accumulo.core.securityImpl.thrift.TCredentials(); + struct.credentials.read(iprot); + struct.setCredentialsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // CURRENT + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list8 = iprot.readListBegin(); + struct.current = new java.util.ArrayList(_list8.size); + @org.apache.thrift.annotation.Nullable TFatePartition _elem9; + for (int _i10 = 0; _i10 < _list8.size; ++_i10) + { + _elem9 = new TFatePartition(); + _elem9.read(iprot); + struct.current.add(_elem9); + } + iprot.readListEnd(); + } + struct.setCurrentIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // DESIRED + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list11 = iprot.readListBegin(); + struct.desired = new java.util.ArrayList(_list11.size); + @org.apache.thrift.annotation.Nullable TFatePartition _elem12; + for (int _i13 = 0; _i13 < _list11.size; ++_i13) + { + _elem12 = new TFatePartition(); + _elem12.read(iprot); + struct.desired.add(_elem12); + } + iprot.readListEnd(); + } + struct.setDesiredIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot, setPartitions_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.tinfo != null) { + oprot.writeFieldBegin(TINFO_FIELD_DESC); + struct.tinfo.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.credentials != null) { + oprot.writeFieldBegin(CREDENTIALS_FIELD_DESC); + struct.credentials.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.current != null) { + oprot.writeFieldBegin(CURRENT_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.current.size())); + for (TFatePartition _iter14 : struct.current) + { + _iter14.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.desired != null) { + oprot.writeFieldBegin(DESIRED_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.desired.size())); + for (TFatePartition _iter15 : struct.desired) + { + _iter15.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class setPartitions_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public setPartitions_argsTupleScheme getScheme() { + return new setPartitions_argsTupleScheme(); + } + } + + private static class setPartitions_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, setPartitions_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet optionals = new java.util.BitSet(); + if (struct.isSetTinfo()) { + optionals.set(0); + } + if (struct.isSetCredentials()) { + optionals.set(1); + } + if (struct.isSetCurrent()) { + optionals.set(2); + } + if (struct.isSetDesired()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); + if (struct.isSetTinfo()) { + struct.tinfo.write(oprot); + } + if (struct.isSetCredentials()) { + struct.credentials.write(oprot); + } + if (struct.isSetCurrent()) { + { + oprot.writeI32(struct.current.size()); + for (TFatePartition _iter16 : struct.current) + { + _iter16.write(oprot); + } + } + } + if (struct.isSetDesired()) { + { + oprot.writeI32(struct.desired.size()); + for (TFatePartition _iter17 : struct.desired) + { + _iter17.write(oprot); + } + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, setPartitions_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet incoming = iprot.readBitSet(4); + if (incoming.get(0)) { + struct.tinfo = new org.apache.accumulo.core.clientImpl.thrift.TInfo(); + struct.tinfo.read(iprot); + struct.setTinfoIsSet(true); + } + if (incoming.get(1)) { + struct.credentials = new org.apache.accumulo.core.securityImpl.thrift.TCredentials(); + struct.credentials.read(iprot); + struct.setCredentialsIsSet(true); + } + if (incoming.get(2)) { + { + org.apache.thrift.protocol.TList _list18 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.current = new java.util.ArrayList(_list18.size); + @org.apache.thrift.annotation.Nullable TFatePartition _elem19; + for (int _i20 = 0; _i20 < _list18.size; ++_i20) + { + _elem19 = new TFatePartition(); + _elem19.read(iprot); + struct.current.add(_elem19); + } + } + struct.setCurrentIsSet(true); + } + if (incoming.get(3)) { + { + org.apache.thrift.protocol.TList _list21 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.desired = new java.util.ArrayList(_list21.size); + @org.apache.thrift.annotation.Nullable TFatePartition _elem22; + for (int _i23 = 0; _i23 < _list21.size; ++_i23) + { + _elem22 = new TFatePartition(); + _elem22.read(iprot); + struct.desired.add(_elem22); + } + } + struct.setDesiredIsSet(true); + } + } + } + + private static S scheme(org.apache.thrift.protocol.TProtocol proto) { + return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); + } + } + + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) + public static class setPartitions_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("setPartitions_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0); + private static final org.apache.thrift.protocol.TField SEC_FIELD_DESC = new org.apache.thrift.protocol.TField("sec", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new setPartitions_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new setPartitions_resultTupleSchemeFactory(); + + public boolean success; // required + public @org.apache.thrift.annotation.Nullable org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException sec; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + SEC((short)1, "sec"); + + private static final java.util.Map byName = new java.util.HashMap(); + + static { + for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // SEC + return SEC; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByName(java.lang.String name) { + return byName.get(name); + } + + private final short _thriftId; + private final java.lang.String _fieldName; + + _Fields(short thriftId, java.lang.String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + @Override + public short getThriftFieldId() { + return _thriftId; + } + + @Override + public java.lang.String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __SUCCESS_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.SEC, new org.apache.thrift.meta_data.FieldMetaData("sec", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException.class))); + metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(setPartitions_result.class, metaDataMap); + } + + public setPartitions_result() { + } + + public setPartitions_result( + boolean success, + org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException sec) + { + this(); + this.success = success; + setSuccessIsSet(true); + this.sec = sec; + } + + /** + * Performs a deep copy on other. + */ + public setPartitions_result(setPartitions_result other) { + __isset_bitfield = other.__isset_bitfield; + this.success = other.success; + if (other.isSetSec()) { + this.sec = new org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException(other.sec); + } + } + + @Override + public setPartitions_result deepCopy() { + return new setPartitions_result(this); + } + + @Override + public void clear() { + setSuccessIsSet(false); + this.success = false; + this.sec = null; + } + + public boolean isSuccess() { + return this.success; + } + + public setPartitions_result setSuccess(boolean success) { + this.success = success; + setSuccessIsSet(true); + return this; + } + + public void unsetSuccess() { + __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID); + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID); + } + + public void setSuccessIsSet(boolean value) { + __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value); + } + + @org.apache.thrift.annotation.Nullable + public org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException getSec() { + return this.sec; + } + + public setPartitions_result setSec(@org.apache.thrift.annotation.Nullable org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException sec) { + this.sec = sec; + return this; + } + + public void unsetSec() { + this.sec = null; + } + + /** Returns true if field sec is set (has been assigned a value) and false otherwise */ + public boolean isSetSec() { + return this.sec != null; + } + + public void setSecIsSet(boolean value) { + if (!value) { + this.sec = null; + } + } + + @Override + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((java.lang.Boolean)value); + } + break; + + case SEC: + if (value == null) { + unsetSec(); + } else { + setSec((org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException)value); + } + break; + + } + } + + @org.apache.thrift.annotation.Nullable + @Override + public java.lang.Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return isSuccess(); + + case SEC: + return getSec(); + + } + throw new java.lang.IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + @Override + public boolean isSet(_Fields field) { + if (field == null) { + throw new java.lang.IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case SEC: + return isSetSec(); + } + throw new java.lang.IllegalStateException(); + } + + @Override + public boolean equals(java.lang.Object that) { + if (that instanceof setPartitions_result) + return this.equals((setPartitions_result)that); + return false; + } + + public boolean equals(setPartitions_result that) { + if (that == null) + return false; + if (this == that) + return true; + + boolean this_present_success = true; + boolean that_present_success = true; + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (this.success != that.success) + return false; + } + + boolean this_present_sec = true && this.isSetSec(); + boolean that_present_sec = true && that.isSetSec(); + if (this_present_sec || that_present_sec) { + if (!(this_present_sec && that_present_sec)) + return false; + if (!this.sec.equals(that.sec)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + int hashCode = 1; + + hashCode = hashCode * 8191 + ((success) ? 131071 : 524287); + + hashCode = hashCode * 8191 + ((isSetSec()) ? 131071 : 524287); + if (isSetSec()) + hashCode = hashCode * 8191 + sec.hashCode(); + + return hashCode; + } + + @Override + public int compareTo(setPartitions_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = java.lang.Boolean.compare(isSetSuccess(), other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = java.lang.Boolean.compare(isSetSec(), other.isSetSec()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSec()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sec, other.sec); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + @org.apache.thrift.annotation.Nullable + @Override + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + scheme(iprot).read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + scheme(oprot).write(oprot, this); + } + + @Override + public java.lang.String toString() { + java.lang.StringBuilder sb = new java.lang.StringBuilder("setPartitions_result("); + boolean first = true; + + sb.append("success:"); + sb.append(this.success); + first = false; + if (!first) sb.append(", "); + sb.append("sec:"); + if (this.sec == null) { + sb.append("null"); + } else { + sb.append(this.sec); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class setPartitions_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public setPartitions_resultStandardScheme getScheme() { + return new setPartitions_resultStandardScheme(); + } + } + + private static class setPartitions_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot, setPartitions_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // SEC + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.sec = new org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException(); + struct.sec.read(iprot); + struct.setSecIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot, setPartitions_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.isSetSuccess()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeBool(struct.success); + oprot.writeFieldEnd(); + } + if (struct.sec != null) { + oprot.writeFieldBegin(SEC_FIELD_DESC); + struct.sec.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class setPartitions_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public setPartitions_resultTupleScheme getScheme() { + return new setPartitions_resultTupleScheme(); + } + } + + private static class setPartitions_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, setPartitions_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet optionals = new java.util.BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetSec()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + oprot.writeBool(struct.success); + } + if (struct.isSetSec()) { + struct.sec.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, setPartitions_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.sec = new org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException(); + struct.sec.read(iprot); + struct.setSecIsSet(true); + } + } + } + + private static S scheme(org.apache.thrift.protocol.TProtocol proto) { + return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); + } + } + + private static void unusedMethod() {} +} diff --git a/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/TFatePartition.java b/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/TFatePartition.java new file mode 100644 index 00000000000..c88ad14f85c --- /dev/null +++ b/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/TFatePartition.java @@ -0,0 +1,511 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/** + * Autogenerated by Thrift Compiler (0.17.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.accumulo.core.fate.thrift; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) +public class TFatePartition implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TFatePartition"); + + private static final org.apache.thrift.protocol.TField START_FIELD_DESC = new org.apache.thrift.protocol.TField("start", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField STOP_FIELD_DESC = new org.apache.thrift.protocol.TField("stop", org.apache.thrift.protocol.TType.STRING, (short)2); + + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TFatePartitionStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TFatePartitionTupleSchemeFactory(); + + public @org.apache.thrift.annotation.Nullable java.lang.String start; // required + public @org.apache.thrift.annotation.Nullable java.lang.String stop; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + START((short)1, "start"), + STOP((short)2, "stop"); + + private static final java.util.Map byName = new java.util.HashMap(); + + static { + for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // START + return START; + case 2: // STOP + return STOP; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByName(java.lang.String name) { + return byName.get(name); + } + + private final short _thriftId; + private final java.lang.String _fieldName; + + _Fields(short thriftId, java.lang.String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + @Override + public short getThriftFieldId() { + return _thriftId; + } + + @Override + public java.lang.String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.START, new org.apache.thrift.meta_data.FieldMetaData("start", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.STOP, new org.apache.thrift.meta_data.FieldMetaData("stop", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TFatePartition.class, metaDataMap); + } + + public TFatePartition() { + } + + public TFatePartition( + java.lang.String start, + java.lang.String stop) + { + this(); + this.start = start; + this.stop = stop; + } + + /** + * Performs a deep copy on other. + */ + public TFatePartition(TFatePartition other) { + if (other.isSetStart()) { + this.start = other.start; + } + if (other.isSetStop()) { + this.stop = other.stop; + } + } + + @Override + public TFatePartition deepCopy() { + return new TFatePartition(this); + } + + @Override + public void clear() { + this.start = null; + this.stop = null; + } + + @org.apache.thrift.annotation.Nullable + public java.lang.String getStart() { + return this.start; + } + + public TFatePartition setStart(@org.apache.thrift.annotation.Nullable java.lang.String start) { + this.start = start; + return this; + } + + public void unsetStart() { + this.start = null; + } + + /** Returns true if field start is set (has been assigned a value) and false otherwise */ + public boolean isSetStart() { + return this.start != null; + } + + public void setStartIsSet(boolean value) { + if (!value) { + this.start = null; + } + } + + @org.apache.thrift.annotation.Nullable + public java.lang.String getStop() { + return this.stop; + } + + public TFatePartition setStop(@org.apache.thrift.annotation.Nullable java.lang.String stop) { + this.stop = stop; + return this; + } + + public void unsetStop() { + this.stop = null; + } + + /** Returns true if field stop is set (has been assigned a value) and false otherwise */ + public boolean isSetStop() { + return this.stop != null; + } + + public void setStopIsSet(boolean value) { + if (!value) { + this.stop = null; + } + } + + @Override + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { + switch (field) { + case START: + if (value == null) { + unsetStart(); + } else { + setStart((java.lang.String)value); + } + break; + + case STOP: + if (value == null) { + unsetStop(); + } else { + setStop((java.lang.String)value); + } + break; + + } + } + + @org.apache.thrift.annotation.Nullable + @Override + public java.lang.Object getFieldValue(_Fields field) { + switch (field) { + case START: + return getStart(); + + case STOP: + return getStop(); + + } + throw new java.lang.IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + @Override + public boolean isSet(_Fields field) { + if (field == null) { + throw new java.lang.IllegalArgumentException(); + } + + switch (field) { + case START: + return isSetStart(); + case STOP: + return isSetStop(); + } + throw new java.lang.IllegalStateException(); + } + + @Override + public boolean equals(java.lang.Object that) { + if (that instanceof TFatePartition) + return this.equals((TFatePartition)that); + return false; + } + + public boolean equals(TFatePartition that) { + if (that == null) + return false; + if (this == that) + return true; + + boolean this_present_start = true && this.isSetStart(); + boolean that_present_start = true && that.isSetStart(); + if (this_present_start || that_present_start) { + if (!(this_present_start && that_present_start)) + return false; + if (!this.start.equals(that.start)) + return false; + } + + boolean this_present_stop = true && this.isSetStop(); + boolean that_present_stop = true && that.isSetStop(); + if (this_present_stop || that_present_stop) { + if (!(this_present_stop && that_present_stop)) + return false; + if (!this.stop.equals(that.stop)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + int hashCode = 1; + + hashCode = hashCode * 8191 + ((isSetStart()) ? 131071 : 524287); + if (isSetStart()) + hashCode = hashCode * 8191 + start.hashCode(); + + hashCode = hashCode * 8191 + ((isSetStop()) ? 131071 : 524287); + if (isSetStop()) + hashCode = hashCode * 8191 + stop.hashCode(); + + return hashCode; + } + + @Override + public int compareTo(TFatePartition other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = java.lang.Boolean.compare(isSetStart(), other.isSetStart()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetStart()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.start, other.start); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = java.lang.Boolean.compare(isSetStop(), other.isSetStop()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetStop()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.stop, other.stop); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + @org.apache.thrift.annotation.Nullable + @Override + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + scheme(iprot).read(iprot, this); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + scheme(oprot).write(oprot, this); + } + + @Override + public java.lang.String toString() { + java.lang.StringBuilder sb = new java.lang.StringBuilder("TFatePartition("); + boolean first = true; + + sb.append("start:"); + if (this.start == null) { + sb.append("null"); + } else { + sb.append(this.start); + } + first = false; + if (!first) sb.append(", "); + sb.append("stop:"); + if (this.stop == null) { + sb.append("null"); + } else { + sb.append(this.stop); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TFatePartitionStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public TFatePartitionStandardScheme getScheme() { + return new TFatePartitionStandardScheme(); + } + } + + private static class TFatePartitionStandardScheme extends org.apache.thrift.scheme.StandardScheme { + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot, TFatePartition struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // START + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.start = iprot.readString(); + struct.setStartIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // STOP + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.stop = iprot.readString(); + struct.setStopIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot, TFatePartition struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.start != null) { + oprot.writeFieldBegin(START_FIELD_DESC); + oprot.writeString(struct.start); + oprot.writeFieldEnd(); + } + if (struct.stop != null) { + oprot.writeFieldBegin(STOP_FIELD_DESC); + oprot.writeString(struct.stop); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TFatePartitionTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public TFatePartitionTupleScheme getScheme() { + return new TFatePartitionTupleScheme(); + } + } + + private static class TFatePartitionTupleScheme extends org.apache.thrift.scheme.TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TFatePartition struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet optionals = new java.util.BitSet(); + if (struct.isSetStart()) { + optionals.set(0); + } + if (struct.isSetStop()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetStart()) { + oprot.writeString(struct.start); + } + if (struct.isSetStop()) { + oprot.writeString(struct.stop); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TFatePartition struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.start = iprot.readString(); + struct.setStartIsSet(true); + } + if (incoming.get(1)) { + struct.stop = iprot.readString(); + struct.setStopIsSet(true); + } + } + } + + private static S scheme(org.apache.thrift.protocol.TProtocol proto) { + return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); + } + private static void unusedMethod() {} +} + diff --git a/core/src/main/thrift/fate-worker.thrift b/core/src/main/thrift/fate-worker.thrift index 5ed19f4d429..e60fc317120 100644 --- a/core/src/main/thrift/fate-worker.thrift +++ b/core/src/main/thrift/fate-worker.thrift @@ -1,14 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ namespace java org.apache.accumulo.core.fate.thrift namespace cpp org.apache.accumulo.core.fate.thrift -struct FatePartition { +include "client.thrift" +include "security.thrift" + +struct TFatePartition { 1:string start - 2:string end + 2:string stop } -service FateWorker { +service FateWorkerService { - list getPartitions( + list getPartitions( 1:client.TInfo tinfo, 2:security.TCredentials credentials ) throws ( @@ -18,8 +39,8 @@ service FateWorker { bool setPartitions( 1:client.TInfo tinfo, 2:security.TCredentials credentials, - 3:list current, - 4:list desired + 3:list current, + 4:list desired ) throws ( 1:client.ThriftSecurityException sec ) diff --git a/server/base/src/main/java/org/apache/accumulo/server/rpc/ThriftProcessorTypes.java b/server/base/src/main/java/org/apache/accumulo/server/rpc/ThriftProcessorTypes.java index 0a56b4e32fe..c1e602e1293 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/rpc/ThriftProcessorTypes.java +++ b/server/base/src/main/java/org/apache/accumulo/server/rpc/ThriftProcessorTypes.java @@ -21,6 +21,7 @@ import org.apache.accumulo.core.clientImpl.thrift.ClientService; import org.apache.accumulo.core.compaction.thrift.CompactionCoordinatorService; import org.apache.accumulo.core.compaction.thrift.CompactorService; +import org.apache.accumulo.core.fate.thrift.FateWorkerService; import org.apache.accumulo.core.gc.thrift.GCMonitorService; import org.apache.accumulo.core.manager.thrift.FateService; import org.apache.accumulo.core.manager.thrift.ManagerClientService; @@ -82,6 +83,9 @@ public > TProcessor getTProcessor( private static final ThriftProcessorTypes MANAGER = new ThriftProcessorTypes<>(ThriftClientTypes.MANAGER); + private static final ThriftProcessorTypes FATE_WORKER = + new ThriftProcessorTypes<>(ThriftClientTypes.FATE_WORKER); + @VisibleForTesting public static final ThriftProcessorTypes TABLET_SERVER = new ThriftProcessorTypes<>(ThriftClientTypes.TABLET_SERVER); @@ -127,7 +131,8 @@ public static TMultiplexedProcessor getGcTProcessor(ServerProcessService.Iface p public static TMultiplexedProcessor getManagerTProcessor( ServerProcessService.Iface processHandler, FateService.Iface fateServiceHandler, CompactionCoordinatorService.Iface coordinatorServiceHandler, - ManagerClientService.Iface managerServiceHandler, ServerContext context) { + ManagerClientService.Iface managerServiceHandler, + FateWorkerService.Iface fateWorkerServiceHandler, ServerContext context) { TMultiplexedProcessor muxProcessor = new TMultiplexedProcessor(); muxProcessor.registerProcessor(SERVER_PROCESS.getServiceName(), SERVER_PROCESS.getTProcessor(ServerProcessService.Processor.class, @@ -140,6 +145,9 @@ public static TMultiplexedProcessor getManagerTProcessor( muxProcessor.registerProcessor(MANAGER.getServiceName(), MANAGER.getTProcessor(ManagerClientService.Processor.class, ManagerClientService.Iface.class, managerServiceHandler, context)); + muxProcessor.registerProcessor(FATE_WORKER.getServiceName(), + FATE_WORKER.getTProcessor(FateWorkerService.Processor.class, FateWorkerService.Iface.class, + fateWorkerServiceHandler, context)); return muxProcessor; } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java index 99c9feee577..196e617d5a9 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java @@ -71,6 +71,7 @@ import org.apache.accumulo.core.fate.FateId; import org.apache.accumulo.core.fate.FateInstanceType; import org.apache.accumulo.core.fate.FateStore; +import org.apache.accumulo.core.fate.thrift.FateWorkerService; import org.apache.accumulo.core.fate.user.UserFateStore; import org.apache.accumulo.core.fate.zookeeper.MetaFateStore; import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter; @@ -108,6 +109,7 @@ import org.apache.accumulo.core.util.time.SteadyTime; import org.apache.accumulo.core.zookeeper.ZcStat; import org.apache.accumulo.manager.compaction.coordinator.CompactionCoordinator; +import org.apache.accumulo.manager.fate.FateWorker; import org.apache.accumulo.manager.merge.FindMergeableRangeTask; import org.apache.accumulo.manager.metrics.ManagerMetrics; import org.apache.accumulo.manager.recovery.RecoveryManager; @@ -915,9 +917,11 @@ public void run() { fateServiceHandler = new FateServiceHandler(this); managerClientHandler = new ManagerClientServiceHandler(this); compactionCoordinator = new CompactionCoordinator(this, fateRefs); + FateWorkerService.Iface fateWorkerHandler = new FateWorker(context); var processor = ThriftProcessorTypes.getManagerTProcessor(this, fateServiceHandler, - compactionCoordinator.getThriftService(), managerClientHandler, getContext()); + compactionCoordinator.getThriftService(), managerClientHandler, fateWorkerHandler, + getContext()); try { updateThriftServer(() -> { return TServerUtils.createThriftServer(context, getBindAddress(), diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index 07e60e1c9e8..53dbc8ea949 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -1,143 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ package org.apache.accumulo.manager.fate; -import com.google.common.net.HostAndPort; -import org.apache.accumulo.core.fate.FateId; -import org.apache.hadoop.util.Sets; - -import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Set; +import org.apache.accumulo.core.fate.FateId; +import org.apache.accumulo.core.fate.thrift.TFatePartition; +import org.apache.hadoop.util.Sets; + +import com.google.common.net.HostAndPort; + public class FateManager { - record FatePartition(FateId start, FateId end) { + record FatePartition(FateId start, FateId end) { + public TFatePartition toThrift() { + return new TFatePartition(start.canonical(), end.canonical()); } - public void managerWorkers() throws InterruptedException { - while(true) { - // TODO make configurable - Thread.sleep(10_000); - - // TODO could support RG... could user ServerId - // This map will contain all current workers even their partitions are empty - Map> currentAssignments = getCurrentAssignments(); - Set desiredParititions = getDesiredPartitions(); - - // TODO handle duplicate current assignments - - Map> desired = computeDesiredAssignments(currentAssignments, desiredParititions); - - // are there any workers with extra partitions? If so need to unload those first. - boolean haveExtra = desired.entrySet().stream().anyMatch(e->{ - HostAndPort worker = e.getKey(); - var curr = currentAssignments.getOrDefault(worker, Set.of()); - var extra = Sets.difference(curr, e.getValue()); - return !extra.isEmpty(); - }); - - if(haveExtra) { - // force unload of extra partitions to make them available for other workers - desired.forEach((worker, paritions)->{ - var curr = currentAssignments.getOrDefault(worker, Set.of()); - if(!curr.equals(paritions)) { - var intersection = Sets.intersection(curr, paritions); - setWorkerPartitions(worker,curr, intersection); - currentAssignments.put(worker, intersection); - } - }); - } - - // Load all partitions on all workers.. - desired.forEach((worker, paritions)->{ - var curr = currentAssignments.getOrDefault(worker, Set.of()); - if(!curr.equals(paritions)){ - setWorkerPartitions(worker, curr, paritions); - } - }); + public static FatePartition from(TFatePartition tfp) { + return new FatePartition(FateId.from(tfp.start), FateId.from(tfp.stop)); + } + } + + public void managerWorkers() throws InterruptedException { + while (true) { + // TODO make configurable + Thread.sleep(10_000); + + // TODO could support RG... could user ServerId + // This map will contain all current workers even their partitions are empty + Map> currentAssignments = getCurrentAssignments(); + Set desiredParititions = getDesiredPartitions(); + + // TODO handle duplicate current assignments + + Map> desired = + computeDesiredAssignments(currentAssignments, desiredParititions); + + // are there any workers with extra partitions? If so need to unload those first. + boolean haveExtra = desired.entrySet().stream().anyMatch(e -> { + HostAndPort worker = e.getKey(); + var curr = currentAssignments.getOrDefault(worker, Set.of()); + var extra = Sets.difference(curr, e.getValue()); + return !extra.isEmpty(); + }); + + if (haveExtra) { + // force unload of extra partitions to make them available for other workers + desired.forEach((worker, paritions) -> { + var curr = currentAssignments.getOrDefault(worker, Set.of()); + if (!curr.equals(paritions)) { + var intersection = Sets.intersection(curr, paritions); + setWorkerPartitions(worker, curr, intersection); + currentAssignments.put(worker, intersection); + } + }); + } + + // Load all partitions on all workers.. + desired.forEach((worker, paritions) -> { + var curr = currentAssignments.getOrDefault(worker, Set.of()); + if (!curr.equals(paritions)) { + setWorkerPartitions(worker, curr, paritions); } + }); } - - private void setWorkerPartitions(HostAndPort worker, Set current, Set desired) { - // TODO make a compare and set type RPC that uses the current and desired + } + + private void setWorkerPartitions(HostAndPort worker, Set current, + Set desired) { + // TODO make a compare and set type RPC that uses the current and desired + } + + /** + * Compute the desired distribution of partitions across workers. Favors leaving partitions in + * place if possible. + */ + private Map> computeDesiredAssignments( + Map> currentAssignments, + Set desiredParititions) { + // min number of partitions a single worker must have + int minPerWorker = currentAssignments.size() / desiredParititions.size(); + // max number of partitions a single worker can have + int maxPerWorker = + minPerWorker + Math.min(currentAssignments.size() % desiredParititions.size(), 1); + // number of workers that can have the max partitions + int desiredWorkersWithMax = currentAssignments.size() % desiredParititions.size(); + + Map> desiredAssignments = new HashMap<>(); + Set availablePartitions = new HashSet<>(desiredParititions); + + // remove everything that is assigned + currentAssignments.values().forEach(p -> p.forEach(availablePartitions::remove)); + + // Find workers that currently have too many partitions assigned and place their excess in the + // available set. Let workers keep what they have when its under the limit. + int numWorkersWithMax = 0; + for (var worker : currentAssignments.keySet()) { + var assignments = new HashSet(); + var curr = currentAssignments.getOrDefault(worker, Set.of()); + // The number of partitions this worker can have, anything in excess should be added to + // available + int canHave = numWorkersWithMax < desiredWorkersWithMax ? maxPerWorker : minPerWorker; + + var iter = curr.iterator(); + for (int i = 0; i < canHave && iter.hasNext(); i++) { + assignments.add(iter.next()); + } + iter.forEachRemaining(availablePartitions::add); + + desiredAssignments.put(worker, assignments); + if (curr.size() >= maxPerWorker) { + numWorkersWithMax++; + } } - /** - * Compute the desired distribution of partitions across workers. Favors leaving partitions in place if possible. - */ - private Map> computeDesiredAssignments(Map> currentAssignments, Set desiredParititions) { - // min number of partitions a single worker must have - int minPerWorker = currentAssignments.size() / desiredParititions.size(); - // max number of partitions a single worker can have - int maxPerWorker = minPerWorker + Math.min(currentAssignments.size() % desiredParititions.size(), 1); - // number of workers that can have the max partitions - int desiredWorkersWithMax = currentAssignments.size() % desiredParititions.size(); - - Map> desiredAssignments = new HashMap<>(); - Set availablePartitions = new HashSet<>(desiredParititions); - - // remove everything that is assigned - currentAssignments.values().forEach(p->p.forEach(availablePartitions::remove)); - - // Find workers that currently have too many partitions assigned and place their excess in the available set. Let workers keep what they have when its under the limit. - int numWorkersWithMax = 0; - for(var worker : currentAssignments.keySet()) { - var assignments = new HashSet(); - var curr = currentAssignments.getOrDefault(worker, Set.of()); - // The number of partitions this worker can have, anything in excess should be added to available - int canHave = numWorkersWithMax < desiredWorkersWithMax ? maxPerWorker : minPerWorker; - - var iter = curr.iterator(); - for(int i = 0; i= maxPerWorker) { - numWorkersWithMax++; - } - } - - // Distribute available partitions to workers that do not have the minimum. - var availIter = availablePartitions.iterator(); - for(var worker : currentAssignments.keySet()) { - var assignments = desiredAssignments.get(worker); - while(assignments.size() < minPerWorker) { - // This should always have next if the creation of available partitions was done correctly. - assignments.add(availIter.next()); - } - } - - // Distribute available partitions to workers that do not have the max until no more partitions available. - for(var worker : currentAssignments.keySet()) { - var assignments = desiredAssignments.get(worker); - while(assignments.size() < maxPerWorker && availIter.hasNext()){ - assignments.add(availIter.next()); - } - if(!availIter.hasNext()){ - break; - } - } - - return desiredAssignments; + // Distribute available partitions to workers that do not have the minimum. + var availIter = availablePartitions.iterator(); + for (var worker : currentAssignments.keySet()) { + var assignments = desiredAssignments.get(worker); + while (assignments.size() < minPerWorker) { + // This should always have next if the creation of available partitions was done correctly. + assignments.add(availIter.next()); + } } - private Set getDesiredPartitions() { - throw new UnsupportedOperationException(); + // Distribute available partitions to workers that do not have the max until no more partitions + // available. + for (var worker : currentAssignments.keySet()) { + var assignments = desiredAssignments.get(worker); + while (assignments.size() < maxPerWorker && availIter.hasNext()) { + assignments.add(availIter.next()); + } + if (!availIter.hasNext()) { + break; + } } - private Map> getCurrentAssignments() { - throw new UnsupportedOperationException(); - } + return desiredAssignments; + } + private Set getDesiredPartitions() { + throw new UnsupportedOperationException(); + } + private Map> getCurrentAssignments() { + throw new UnsupportedOperationException(); + } - // TODO this will not need a main eventually, will be run by the manager - public static void main(String[] args) { + // TODO this will not need a main eventually, will be run by the manager + public static void main(String[] args) { - } + } } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index 8914435f8d6..a5796164db2 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -1,7 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ package org.apache.accumulo.manager.fate; -public class FateWorker { - public static void main(String[] args) { +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import org.apache.accumulo.core.client.AccumuloSecurityException; +import org.apache.accumulo.core.clientImpl.thrift.SecurityErrorCode; +import org.apache.accumulo.core.clientImpl.thrift.TInfo; +import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException; +import org.apache.accumulo.core.fate.thrift.FateWorkerService; +import org.apache.accumulo.core.fate.thrift.TFatePartition; +import org.apache.accumulo.core.securityImpl.thrift.TCredentials; +import org.apache.accumulo.manager.fate.FateManager.FatePartition; +import org.apache.accumulo.server.ServerContext; +import org.apache.accumulo.server.security.AuditedSecurityOperation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class FateWorker implements FateWorkerService.Iface { + + private static final Logger log = LoggerFactory.getLogger(FateWorker.class); + private final ServerContext context; + private final AuditedSecurityOperation security; + private final Set currentPartitions; + + public FateWorker(ServerContext ctx) { + this.context = ctx; + this.security = ctx.getSecurityOperation(); + this.currentPartitions = Collections.synchronizedSet(new HashSet<>()); + } + + @Override + public List getPartitions(TInfo tinfo, TCredentials credentials) + throws ThriftSecurityException { + if (!security.canPerformSystemActions(credentials)) { + throw new AccumuloSecurityException(credentials.getPrincipal(), + SecurityErrorCode.PERMISSION_DENIED).asThriftException(); + } + + synchronized (currentPartitions) { + return currentPartitions.stream().map(FatePartition::toThrift).toList(); } + } + + @Override + public boolean setPartitions(TInfo tinfo, TCredentials credentials, List current, + List desired) throws ThriftSecurityException { + if (!security.canPerformSystemActions(credentials)) { + throw new AccumuloSecurityException(credentials.getPrincipal(), + SecurityErrorCode.PERMISSION_DENIED).asThriftException(); + } + + var currentSet = current.stream().map(FatePartition::from).collect(Collectors.toSet()); + synchronized (currentPartitions) { + if (currentPartitions.equals(currentSet)) { + currentPartitions.clear(); + desired.stream().map(FatePartition::from).forEach(currentPartitions::add); + log.info("Changed partitions from {} to {}", currentSet, currentPartitions); + } else { + log.info("Did not change partitions to {} because {} != {}", desired, currentSet, + currentPartitions); + } + } + + return false; + } } From fbc213c6be3bd43bf4b0a8e71875147ddaacfd7c Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Tue, 27 Jan 2026 20:15:41 +0000 Subject: [PATCH 04/38] WIP --- .../accumulo/core/lock/ServiceLockData.java | 1 + .../core/rpc/clients/ThriftClientTypes.java | 2 +- core/src/main/thrift/fate-worker.thrift | 2 +- .../org/apache/accumulo/manager/Manager.java | 2 +- .../accumulo/manager/fate/FateManager.java | 65 ++++++++++++++++--- .../accumulo/manager/fate/FateWorker.java | 14 ++-- .../accumulo/test/MultipleManagerIT.java | 35 ++++++++++ 7 files changed, 103 insertions(+), 18 deletions(-) create mode 100644 test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java diff --git a/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockData.java b/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockData.java index c29879f05c7..ec1bfd50b2a 100644 --- a/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockData.java +++ b/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockData.java @@ -46,6 +46,7 @@ public static enum ThriftService { COORDINATOR, COMPACTOR, FATE, + FATE_WORKER, GC, MANAGER, NONE, diff --git a/core/src/main/java/org/apache/accumulo/core/rpc/clients/ThriftClientTypes.java b/core/src/main/java/org/apache/accumulo/core/rpc/clients/ThriftClientTypes.java index e8bcac16e33..7576356f0b9 100644 --- a/core/src/main/java/org/apache/accumulo/core/rpc/clients/ThriftClientTypes.java +++ b/core/src/main/java/org/apache/accumulo/core/rpc/clients/ThriftClientTypes.java @@ -62,7 +62,7 @@ public abstract class ThriftClientTypes { public static final ServerProcessServiceThriftClient SERVER_PROCESS = new ServerProcessServiceThriftClient("process"); - protected static final ThriftClientTypes FATE_WORKER = + public static final ThriftClientTypes FATE_WORKER = new FateWorkerServiceThriftClient("fateworker"); /** diff --git a/core/src/main/thrift/fate-worker.thrift b/core/src/main/thrift/fate-worker.thrift index e60fc317120..b27a254382d 100644 --- a/core/src/main/thrift/fate-worker.thrift +++ b/core/src/main/thrift/fate-worker.thrift @@ -39,7 +39,7 @@ service FateWorkerService { bool setPartitions( 1:client.TInfo tinfo, 2:security.TCredentials credentials, - 3:list current, + 3:list expected, 4:list desired ) throws ( 1:client.ThriftSecurityException sec diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java index 196e617d5a9..127c14cd02b 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java @@ -1187,7 +1187,7 @@ boolean canSuspendTablets() { UUID uuid = sld.getServerUUID(ThriftService.NONE); ServiceDescriptors descriptors = new ServiceDescriptors(); for (ThriftService svc : new ThriftService[] {ThriftService.MANAGER, ThriftService.COORDINATOR, - ThriftService.FATE}) { + ThriftService.FATE, ThriftService.FATE_WORKER}) { descriptors.addService(new ServiceDescriptor(uuid, svc, getAdvertiseAddress().toString(), this.getResourceGroup())); } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index 53dbc8ea949..fe4f4d084a0 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -18,20 +18,35 @@ */ package org.apache.accumulo.manager.fate; +import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; +import java.util.UUID; +import java.util.stream.Collectors; import org.apache.accumulo.core.fate.FateId; +import org.apache.accumulo.core.fate.FateInstanceType; +import org.apache.accumulo.core.fate.thrift.FateWorkerService; import org.apache.accumulo.core.fate.thrift.TFatePartition; +import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter; +import org.apache.accumulo.core.lock.ServiceLock; +import org.apache.accumulo.core.lock.ServiceLockData; +import org.apache.accumulo.core.rpc.ThriftUtil; +import org.apache.accumulo.core.rpc.clients.ThriftClientTypes; +import org.apache.accumulo.core.trace.TraceUtil; +import org.apache.accumulo.server.ServerContext; import org.apache.hadoop.util.Sets; import com.google.common.net.HostAndPort; +import org.apache.thrift.TException; +import org.apache.zookeeper.KeeperException; public class FateManager { - record FatePartition(FateId start, FateId end) { + record FatePartition(FateId start, FateId end) { public TFatePartition toThrift() { return new TFatePartition(start.canonical(), end.canonical()); @@ -42,7 +57,13 @@ public static FatePartition from(TFatePartition tfp) { } } - public void managerWorkers() throws InterruptedException { + private final ServerContext context; + + public FateManager(ServerContext context) { + this.context = context; + } + + public void managerWorkers() throws Exception { while (true) { // TODO make configurable Thread.sleep(10_000); @@ -161,15 +182,43 @@ private Map> computeDesiredAssignments( } private Set getDesiredPartitions() { - throw new UnsupportedOperationException(); - } - private Map> getCurrentAssignments() { - throw new UnsupportedOperationException(); + HashSet desired = new HashSet<>(); + // TODO created based on the number of available servers + for(long i = 0; i<=15; i++){ + UUID start = new UUID((i<<60) , -0); + UUID stop = new UUID((i<<60) | (-1L>>>4), -1); + desired.add(new FatePartition(FateId.from(FateInstanceType.USER, start), FateId.from(FateInstanceType.USER, stop))); + } + + return desired; } - // TODO this will not need a main eventually, will be run by the manager - public static void main(String[] args) { + private Map> getCurrentAssignments() throws InterruptedException, KeeperException, TException { + ZooReaderWriter zk = context.getZooSession().asReaderWriter(); + var managerPath = context.getServerPaths().createManagerPath(); + + var children = ServiceLock.validateAndSort(managerPath, zk.getChildren(managerPath.toString())); + + List locksData = new ArrayList<>(children.size()); + + for(var child : children){ + ServiceLockData.parse(zk.getData(managerPath +"/"+child)).ifPresent(locksData::add); + } + + Map> currentAssignments = new HashMap<>(); + + for(var lockData : locksData) { + var address = lockData.getAddress(ServiceLockData.ThriftService.FATE_WORKER); + + FateWorkerService.Client client = + ThriftUtil.getClient(ThriftClientTypes.FATE_WORKER, address, context); + + var tparitions = client.getPartitions(TraceUtil.traceInfo(), context.rpcCreds()); + var partitions = tparitions.stream().map(FatePartition::from).collect(Collectors.toSet()); + currentAssignments.put(address, partitions); + } + return currentAssignments; } } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index a5796164db2..d83981ac720 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -64,25 +64,25 @@ public List getPartitions(TInfo tinfo, TCredentials credentials) } @Override - public boolean setPartitions(TInfo tinfo, TCredentials credentials, List current, + public boolean setPartitions(TInfo tinfo, TCredentials credentials, List expected, List desired) throws ThriftSecurityException { if (!security.canPerformSystemActions(credentials)) { throw new AccumuloSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED).asThriftException(); } - var currentSet = current.stream().map(FatePartition::from).collect(Collectors.toSet()); + var expectedSet = expected.stream().map(FatePartition::from).collect(Collectors.toSet()); synchronized (currentPartitions) { - if (currentPartitions.equals(currentSet)) { + if (currentPartitions.equals(expectedSet)) { currentPartitions.clear(); desired.stream().map(FatePartition::from).forEach(currentPartitions::add); - log.info("Changed partitions from {} to {}", currentSet, currentPartitions); + log.info("Changed partitions from {} to {}", expectedSet, currentPartitions); + return true; } else { - log.info("Did not change partitions to {} because {} != {}", desired, currentSet, + log.info("Did not change partitions to {} because {} != {}", desired, expectedSet, currentPartitions); + return false; } } - - return false; } } diff --git a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java new file mode 100644 index 00000000000..b6af8b48f1c --- /dev/null +++ b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java @@ -0,0 +1,35 @@ +package org.apache.accumulo.test; + +import org.apache.accumulo.harness.AccumuloClusterHarness; +import org.apache.accumulo.manager.Manager; +import org.apache.accumulo.manager.fate.FateManager; +import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl; +import org.apache.accumulo.test.functional.ConfigurableMacBase; +import org.apache.hadoop.conf.Configuration; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.List; + +public class MultipleManagerIT extends ConfigurableMacBase { + @Override + protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { + // TODO add a way to start multiple managers to mini + super.configure(cfg, hadoopCoreSite); + } + + @Test + public void test() throws Exception { + + List managers = new ArrayList<>(); + for(int i = 0; i<5;i++){ + managers.add(exec(Manager.class)); + } + + var fateMgr = new FateManager(getServerContext()); + fateMgr.managerWorkers(); + + // TODO kill processes + } +} From 08f221e3433770db1678a070645ea45e9686b99b Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Thu, 29 Jan 2026 20:53:08 +0000 Subject: [PATCH 05/38] WIP --- .../org/apache/accumulo/core/Constants.java | 1 + .../core/client/admin/servers/ServerId.java | 2 +- .../accumulo/core/lock/ServiceLockPaths.java | 8 +- .../server/rpc/ThriftProcessorTypes.java | 19 ++- .../org/apache/accumulo/manager/Manager.java | 3 +- .../accumulo/manager/ManagerWorker.java | 139 ++++++++++++++++++ .../accumulo/tserver/tablet/Scanner.java | 1 + 7 files changed, 163 insertions(+), 10 deletions(-) create mode 100644 server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java diff --git a/core/src/main/java/org/apache/accumulo/core/Constants.java b/core/src/main/java/org/apache/accumulo/core/Constants.java index ea06bc4ad79..ea44de8676b 100644 --- a/core/src/main/java/org/apache/accumulo/core/Constants.java +++ b/core/src/main/java/org/apache/accumulo/core/Constants.java @@ -49,6 +49,7 @@ public class Constants { public static final String ZMANAGERS = "/managers"; public static final String ZMANAGER_LOCK = ZMANAGERS + "/lock"; + public static final String ZMANAGER_WORKER_LOCK = ZMANAGERS + "/workers"; public static final String ZMANAGER_GOAL_STATE = ZMANAGERS + "/goal_state"; public static final String ZMANAGER_TICK = ZMANAGERS + "/tick"; diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/servers/ServerId.java b/core/src/main/java/org/apache/accumulo/core/client/admin/servers/ServerId.java index 19182bc7c92..9125e188d25 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/admin/servers/ServerId.java +++ b/core/src/main/java/org/apache/accumulo/core/client/admin/servers/ServerId.java @@ -38,7 +38,7 @@ public final class ServerId implements Comparable { * @since 4.0.0 */ public enum Type { - MANAGER, MONITOR, GARBAGE_COLLECTOR, COMPACTOR, SCAN_SERVER, TABLET_SERVER; + MANAGER, MONITOR, GARBAGE_COLLECTOR, COMPACTOR, SCAN_SERVER, TABLET_SERVER, MANAGER_WORKER; } private final Type type; diff --git a/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockPaths.java b/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockPaths.java index 68c29c485f0..1d99011e283 100644 --- a/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockPaths.java +++ b/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockPaths.java @@ -77,7 +77,7 @@ private ServiceLockPath(String type) { Preconditions.checkArgument(this.type.equals(Constants.ZGC_LOCK) || this.type.equals(Constants.ZMANAGER_LOCK) || this.type.equals(Constants.ZMONITOR_LOCK) || this.type.equals(Constants.ZTABLE_LOCKS) || this.type.equals(Constants.ZADMIN_LOCK) - || this.type.equals(Constants.ZTEST_LOCK), "Unsupported type: " + type); + || this.type.equals(Constants.ZTEST_LOCK) || this.type.equals(Constants.ZMANAGER_WORKER_LOCK), "Unsupported type: " + type); // These server types support only one active instance, so they use a lock at // a known path, not the server's address. this.resourceGroup = null; @@ -170,6 +170,8 @@ private static String determineServerType(final String path) { return Constants.ZGC_LOCK; } else if (pathStartsWith(path, Constants.ZMANAGER_LOCK)) { return Constants.ZMANAGER_LOCK; + } else if (pathStartsWith(path, Constants.ZMANAGER_WORKER_LOCK)) { + return Constants.ZMANAGER_WORKER_LOCK; } else if (pathStartsWith(path, Constants.ZMONITOR_LOCK)) { return Constants.ZMONITOR_LOCK; } else if (pathStartsWith(path, Constants.ZMINI_LOCK)) { @@ -219,7 +221,7 @@ public static ServiceLockPath parse(Optional serverType, String path) { return switch (type) { case Constants.ZMINI_LOCK -> new ServiceLockPath(type, server); case Constants.ZCOMPACTORS, Constants.ZSSERVERS, Constants.ZTSERVERS, - Constants.ZDEADTSERVERS -> + Constants.ZDEADTSERVERS,Constants.ZMANAGER_WORKER_LOCK -> new ServiceLockPath(type, ResourceGroupId.of(resourceGroup), HostAndPort.fromString(server)); default -> @@ -431,7 +433,7 @@ private Set get(final String serverType, } } } else if (serverType.equals(Constants.ZCOMPACTORS) || serverType.equals(Constants.ZSSERVERS) - || serverType.equals(Constants.ZTSERVERS) || serverType.equals(Constants.ZDEADTSERVERS)) { + || serverType.equals(Constants.ZTSERVERS) || serverType.equals(Constants.ZDEADTSERVERS) || serverType.equals(Constants.ZMANAGER_WORKER_LOCK)) { final List resourceGroups = zooCache.getChildren(typePath); for (final String group : resourceGroups) { if (resourceGroupPredicate.test(ResourceGroupId.of(group))) { diff --git a/server/base/src/main/java/org/apache/accumulo/server/rpc/ThriftProcessorTypes.java b/server/base/src/main/java/org/apache/accumulo/server/rpc/ThriftProcessorTypes.java index c1e602e1293..1293aba5992 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/rpc/ThriftProcessorTypes.java +++ b/server/base/src/main/java/org/apache/accumulo/server/rpc/ThriftProcessorTypes.java @@ -131,8 +131,7 @@ public static TMultiplexedProcessor getGcTProcessor(ServerProcessService.Iface p public static TMultiplexedProcessor getManagerTProcessor( ServerProcessService.Iface processHandler, FateService.Iface fateServiceHandler, CompactionCoordinatorService.Iface coordinatorServiceHandler, - ManagerClientService.Iface managerServiceHandler, - FateWorkerService.Iface fateWorkerServiceHandler, ServerContext context) { + ManagerClientService.Iface managerServiceHandler, ServerContext context) { TMultiplexedProcessor muxProcessor = new TMultiplexedProcessor(); muxProcessor.registerProcessor(SERVER_PROCESS.getServiceName(), SERVER_PROCESS.getTProcessor(ServerProcessService.Processor.class, @@ -145,9 +144,21 @@ public static TMultiplexedProcessor getManagerTProcessor( muxProcessor.registerProcessor(MANAGER.getServiceName(), MANAGER.getTProcessor(ManagerClientService.Processor.class, ManagerClientService.Iface.class, managerServiceHandler, context)); + return muxProcessor; + } + + public static TMultiplexedProcessor getManagerWorkerTProcessor( + ServerProcessService.Iface processHandler, ClientServiceHandler clientHandler, + FateWorkerService.Iface fateWorkerHandler, ServerContext context) { + TMultiplexedProcessor muxProcessor = new TMultiplexedProcessor(); + muxProcessor.registerProcessor(CLIENT.getServiceName(), CLIENT.getTProcessor( + ClientService.Processor.class, ClientService.Iface.class, clientHandler, context)); + muxProcessor.registerProcessor(SERVER_PROCESS.getServiceName(), + SERVER_PROCESS.getTProcessor(ServerProcessService.Processor.class, + ServerProcessService.Iface.class, processHandler, context)); muxProcessor.registerProcessor(FATE_WORKER.getServiceName(), - FATE_WORKER.getTProcessor(FateWorkerService.Processor.class, FateWorkerService.Iface.class, - fateWorkerServiceHandler, context)); + FATE_WORKER.getTProcessor(FateWorkerService.Processor.class, + FateWorkerService.Iface.class, fateWorkerHandler, context)); return muxProcessor; } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java index 127c14cd02b..051e4247780 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java @@ -917,10 +917,9 @@ public void run() { fateServiceHandler = new FateServiceHandler(this); managerClientHandler = new ManagerClientServiceHandler(this); compactionCoordinator = new CompactionCoordinator(this, fateRefs); - FateWorkerService.Iface fateWorkerHandler = new FateWorker(context); var processor = ThriftProcessorTypes.getManagerTProcessor(this, fateServiceHandler, - compactionCoordinator.getThriftService(), managerClientHandler, fateWorkerHandler, + compactionCoordinator.getThriftService(), managerClientHandler, getContext()); try { updateThriftServer(() -> { diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java new file mode 100644 index 00000000000..72514a5e3a1 --- /dev/null +++ b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java @@ -0,0 +1,139 @@ +package org.apache.accumulo.manager; + +import org.apache.accumulo.core.cli.ConfigOpts; +import org.apache.accumulo.core.client.admin.servers.ServerId; +import org.apache.accumulo.core.conf.Property; +import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter; +import org.apache.accumulo.core.fate.zookeeper.ZooUtil; +import org.apache.accumulo.core.lock.ServiceLock; +import org.apache.accumulo.core.lock.ServiceLockData; +import org.apache.accumulo.core.lock.ServiceLockPaths; +import org.apache.accumulo.core.lock.ServiceLockSupport; +import org.apache.accumulo.manager.fate.FateWorker; +import org.apache.accumulo.server.AbstractServer; +import org.apache.accumulo.server.ServerContext; +import org.apache.accumulo.server.client.ClientServiceHandler; +import org.apache.accumulo.server.rpc.TServerUtils; +import org.apache.accumulo.server.rpc.ThriftProcessorTypes; +import org.apache.accumulo.server.security.SecurityUtil; +import org.apache.thrift.TProcessor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.UnknownHostException; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly; + +public class ManagerWorker extends AbstractServer { + + private static final Logger LOG = LoggerFactory.getLogger(ManagerWorker.class); + private ServiceLock scanServerLock; + + protected ManagerWorker(ConfigOpts opts, String[] args) { + super(ServerId.Type.MANAGER_WORKER, opts, ServerContext::new, args); + } + + protected void startScanServerClientService() throws UnknownHostException { + + var fateWorker = new FateWorker(getContext()); + + // This class implements TabletClientService.Iface and then delegates calls. Be sure + // to set up the ThriftProcessor using this class, not the delegate. + ClientServiceHandler clientHandler = new ClientServiceHandler(getContext()); + TProcessor processor = + ThriftProcessorTypes.getManagerWorkerTProcessor(this, clientHandler, fateWorker, getContext()); + + // TODO using scan server props + updateThriftServer(() -> { + return TServerUtils.createThriftServer(getContext(), getBindAddress(), + Property.SSERV_CLIENTPORT, processor, this.getClass().getSimpleName(), + Property.SSERV_PORTSEARCH, Property.SSERV_MINTHREADS, Property.SSERV_MINTHREADS_TIMEOUT, + Property.SSERV_THREADCHECK); + }, true); + } + + private ServiceLock announceExistence() { + final ZooReaderWriter zoo = getContext().getZooSession().asReaderWriter(); + try { + + final ServiceLockPaths.ServiceLockPath zLockPath = + getContext().getServerPaths().createScanServerPath(getResourceGroup(), getAdvertiseAddress()); + ServiceLockSupport.createNonHaServiceLockPath(ServerId.Type.MANAGER_WORKER, zoo, zLockPath); + var serverLockUUID = UUID.randomUUID(); + scanServerLock = new ServiceLock(getContext().getZooSession(), zLockPath, serverLockUUID); + ServiceLock.LockWatcher lw = new ServiceLockSupport.ServiceLockWatcher(ServerId.Type.MANAGER_WORKER, () -> getShutdownComplete().get(), + (type) -> getContext().getLowMemoryDetector().logGCInfo(getConfiguration())); + + for (int i = 0; i < 120 / 5; i++) { + zoo.putPersistentData(zLockPath.toString(), new byte[0], ZooUtil.NodeExistsPolicy.SKIP); + + ServiceLockData.ServiceDescriptors descriptors = new ServiceLockData.ServiceDescriptors(); + for (ServiceLockData.ThriftService svc : new ServiceLockData.ThriftService[] {ServiceLockData.ThriftService.CLIENT, + ServiceLockData.ThriftService.FATE_WORKER}) { + descriptors.addService(new ServiceLockData.ServiceDescriptor(serverLockUUID, svc, + getAdvertiseAddress().toString(), this.getResourceGroup())); + } + + if (scanServerLock.tryLock(lw, new ServiceLockData(descriptors))) { + LOG.debug("Obtained scan server lock {}", scanServerLock.getLockPath()); + return scanServerLock; + } + LOG.info("Waiting for scan server lock"); + sleepUninterruptibly(5, TimeUnit.SECONDS); + } + String msg = "Too many retries, exiting."; + LOG.info(msg); + throw new RuntimeException(msg); + } catch (Exception e) { + LOG.info("Could not obtain scan server lock, exiting.", e); + throw new RuntimeException(e); + } + } + + @Override + public ServiceLock getLock() { + return scanServerLock; + } + + @Override + public void run() { + try { + waitForUpgrade(); + } catch (InterruptedException e) { + LOG.error("Interrupted while waiting for upgrade to complete, exiting..."); + System.exit(1); + } + + SecurityUtil.serverLogin(getConfiguration()); + + // TODO metrics + + try { + startScanServerClientService(); + } catch (UnknownHostException e1) { + throw new RuntimeException("Failed to start the scan server client service", e1); + } + + ServiceLock lock = announceExistence(); + this.getContext().setServiceLock(lock); + + while (!isShutdownRequested()) { + if (Thread.currentThread().isInterrupted()) { + LOG.info("Server process thread has been interrupted, shutting down"); + break; + } + try { + Thread.sleep(1000); + // TODO update idle status + } catch (InterruptedException e) { + LOG.info("Interrupt Exception received, shutting down"); + gracefulShutdown(getContext().rpcCreds()); + } + } + + LOG.debug("Stopping Thrift Servers"); + getThriftServer().stop(); + } +} diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Scanner.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Scanner.java index 3500b9c645f..e121a9fa1e9 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Scanner.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Scanner.java @@ -34,6 +34,7 @@ import org.apache.accumulo.core.trace.TraceUtil; import org.apache.accumulo.core.util.Pair; import org.apache.accumulo.core.util.ShutdownUtil; +import org.apache.accumulo.tserver.ScanServer; import org.apache.accumulo.tserver.scan.NextBatchTask; import org.apache.accumulo.tserver.scan.ScanParameters; import org.slf4j.Logger; From b58def9e7009c7d8c2c6850f08ff8fbc6cd3df3b Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Thu, 29 Jan 2026 23:24:37 +0000 Subject: [PATCH 06/38] WIP --- .../core/clientImpl/ClientContext.java | 2 +- .../accumulo/core/lock/ServiceLockPaths.java | 21 +- .../accumulo/server/AbstractServer.java | 4 + .../server/init/ZooKeeperInitializer.java | 3 + .../server/rpc/ThriftProcessorTypes.java | 14 +- .../org/apache/accumulo/manager/Manager.java | 5 +- .../accumulo/manager/ManagerWorker.java | 231 ++++++++++-------- .../accumulo/manager/fate/FateManager.java | 109 +++++---- .../accumulo/manager/fate/FateWorker.java | 2 + .../accumulo/tserver/tablet/Scanner.java | 1 - .../accumulo/test/MultipleManagerIT.java | 69 ++++-- 11 files changed, 277 insertions(+), 184 deletions(-) diff --git a/core/src/main/java/org/apache/accumulo/core/clientImpl/ClientContext.java b/core/src/main/java/org/apache/accumulo/core/clientImpl/ClientContext.java index 7f1cfed481b..cb3fb5d283e 100644 --- a/core/src/main/java/org/apache/accumulo/core/clientImpl/ClientContext.java +++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/ClientContext.java @@ -1292,7 +1292,7 @@ private static Set createPersistentWatcherPaths() { Constants.ZMANAGER_LOCK, Constants.ZMINI_LOCK, Constants.ZMONITOR_LOCK, Constants.ZNAMESPACES, Constants.ZRECOVERY, Constants.ZSSERVERS, Constants.ZTABLES, Constants.ZTSERVERS, Constants.ZUSERS, RootTable.ZROOT_TABLET, Constants.ZTEST_LOCK, - Constants.ZRESOURCEGROUPS)) { + Constants.ZMANAGER_WORKER_LOCK, Constants.ZRESOURCEGROUPS)) { pathsToWatch.add(path); } return pathsToWatch; diff --git a/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockPaths.java b/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockPaths.java index 1d99011e283..cbf826ae43c 100644 --- a/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockPaths.java +++ b/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockPaths.java @@ -77,7 +77,8 @@ private ServiceLockPath(String type) { Preconditions.checkArgument(this.type.equals(Constants.ZGC_LOCK) || this.type.equals(Constants.ZMANAGER_LOCK) || this.type.equals(Constants.ZMONITOR_LOCK) || this.type.equals(Constants.ZTABLE_LOCKS) || this.type.equals(Constants.ZADMIN_LOCK) - || this.type.equals(Constants.ZTEST_LOCK) || this.type.equals(Constants.ZMANAGER_WORKER_LOCK), "Unsupported type: " + type); + || this.type.equals(Constants.ZTEST_LOCK) + || this.type.equals(Constants.ZMANAGER_WORKER_LOCK), "Unsupported type: " + type); // These server types support only one active instance, so they use a lock at // a known path, not the server's address. this.resourceGroup = null; @@ -105,7 +106,8 @@ private ServiceLockPath(String type, ResourceGroupId resourceGroup, HostAndPort this.type = requireNonNull(type); Preconditions.checkArgument( this.type.equals(Constants.ZCOMPACTORS) || this.type.equals(Constants.ZSSERVERS) - || this.type.equals(Constants.ZTSERVERS) || this.type.equals(Constants.ZDEADTSERVERS), + || this.type.equals(Constants.ZTSERVERS) || this.type.equals(Constants.ZDEADTSERVERS) + || this.type.equals(Constants.ZMANAGER_WORKER_LOCK), "Unsupported type: " + type); this.resourceGroup = requireNonNull(resourceGroup); this.server = requireNonNull(server).toString(); @@ -221,7 +223,7 @@ public static ServiceLockPath parse(Optional serverType, String path) { return switch (type) { case Constants.ZMINI_LOCK -> new ServiceLockPath(type, server); case Constants.ZCOMPACTORS, Constants.ZSSERVERS, Constants.ZTSERVERS, - Constants.ZDEADTSERVERS,Constants.ZMANAGER_WORKER_LOCK -> + Constants.ZDEADTSERVERS, Constants.ZMANAGER_WORKER_LOCK -> new ServiceLockPath(type, ResourceGroupId.of(resourceGroup), HostAndPort.fromString(server)); default -> @@ -240,6 +242,11 @@ public ServiceLockPath createManagerPath() { return new ServiceLockPath(Constants.ZMANAGER_LOCK); } + public ServiceLockPath createManagerWorkerPath(ResourceGroupId resourceGroup, + HostAndPort advertiseAddress) { + return new ServiceLockPath(Constants.ZMANAGER_WORKER_LOCK, resourceGroup, advertiseAddress); + } + public ServiceLockPath createMiniPath(String miniUUID) { return new ServiceLockPath(Constants.ZMINI_LOCK, miniUUID); } @@ -289,6 +296,11 @@ public Set getCompactor(ResourceGroupPredicate resourceGroupPre return get(Constants.ZCOMPACTORS, resourceGroupPredicate, address, withLock); } + public Set getManagerWorker(ResourceGroupPredicate resourceGroupPredicate, + AddressSelector address, boolean withLock) { + return get(Constants.ZMANAGER_WORKER_LOCK, resourceGroupPredicate, address, withLock); + } + /** * Note that the ServiceLockPath object returned by this method does not populate the server * attribute. To get the location of the GarbageCollector you will need to parse the lock data at @@ -433,7 +445,8 @@ private Set get(final String serverType, } } } else if (serverType.equals(Constants.ZCOMPACTORS) || serverType.equals(Constants.ZSSERVERS) - || serverType.equals(Constants.ZTSERVERS) || serverType.equals(Constants.ZDEADTSERVERS) || serverType.equals(Constants.ZMANAGER_WORKER_LOCK)) { + || serverType.equals(Constants.ZTSERVERS) || serverType.equals(Constants.ZDEADTSERVERS) + || serverType.equals(Constants.ZMANAGER_WORKER_LOCK)) { final List resourceGroups = zooCache.getChildren(typePath); for (final String group : resourceGroups) { if (resourceGroupPredicate.test(ResourceGroupId.of(group))) { diff --git a/server/base/src/main/java/org/apache/accumulo/server/AbstractServer.java b/server/base/src/main/java/org/apache/accumulo/server/AbstractServer.java index 157698997fd..2b65dc6b0ac 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/AbstractServer.java +++ b/server/base/src/main/java/org/apache/accumulo/server/AbstractServer.java @@ -174,6 +174,10 @@ protected AbstractServer(ServerId.Type serverType, ConfigOpts opts, case TABLET_SERVER: metricSource = MetricSource.TABLET_SERVER; break; + case MANAGER_WORKER: + // TODO create a new source? + metricSource = MetricSource.MANAGER; + break; default: throw new IllegalArgumentException("Unhandled server type: " + serverType); } diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java b/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java index 3dde58446c3..2477b65b13a 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java +++ b/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java @@ -176,6 +176,9 @@ void initialize(final ServerContext context, final String rootTabletDirName, ZooUtil.NodeExistsPolicy.FAIL); zrwChroot.putPersistentData(Constants.ZCOMPACTIONS, EMPTY_BYTE_ARRAY, ZooUtil.NodeExistsPolicy.FAIL); + // TODO would need to create in upgrade + zrwChroot.putPersistentData(Constants.ZMANAGER_WORKER_LOCK, EMPTY_BYTE_ARRAY, + ZooUtil.NodeExistsPolicy.FAIL); } /** diff --git a/server/base/src/main/java/org/apache/accumulo/server/rpc/ThriftProcessorTypes.java b/server/base/src/main/java/org/apache/accumulo/server/rpc/ThriftProcessorTypes.java index 1293aba5992..86a5a77dc96 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/rpc/ThriftProcessorTypes.java +++ b/server/base/src/main/java/org/apache/accumulo/server/rpc/ThriftProcessorTypes.java @@ -148,17 +148,17 @@ public static TMultiplexedProcessor getManagerTProcessor( } public static TMultiplexedProcessor getManagerWorkerTProcessor( - ServerProcessService.Iface processHandler, ClientServiceHandler clientHandler, - FateWorkerService.Iface fateWorkerHandler, ServerContext context) { + ServerProcessService.Iface processHandler, ClientServiceHandler clientHandler, + FateWorkerService.Iface fateWorkerHandler, ServerContext context) { TMultiplexedProcessor muxProcessor = new TMultiplexedProcessor(); muxProcessor.registerProcessor(CLIENT.getServiceName(), CLIENT.getTProcessor( - ClientService.Processor.class, ClientService.Iface.class, clientHandler, context)); + ClientService.Processor.class, ClientService.Iface.class, clientHandler, context)); muxProcessor.registerProcessor(SERVER_PROCESS.getServiceName(), - SERVER_PROCESS.getTProcessor(ServerProcessService.Processor.class, - ServerProcessService.Iface.class, processHandler, context)); + SERVER_PROCESS.getTProcessor(ServerProcessService.Processor.class, + ServerProcessService.Iface.class, processHandler, context)); muxProcessor.registerProcessor(FATE_WORKER.getServiceName(), - FATE_WORKER.getTProcessor(FateWorkerService.Processor.class, - FateWorkerService.Iface.class, fateWorkerHandler, context)); + FATE_WORKER.getTProcessor(FateWorkerService.Processor.class, FateWorkerService.Iface.class, + fateWorkerHandler, context)); return muxProcessor; } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java index 051e4247780..08989d41900 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java @@ -71,7 +71,6 @@ import org.apache.accumulo.core.fate.FateId; import org.apache.accumulo.core.fate.FateInstanceType; import org.apache.accumulo.core.fate.FateStore; -import org.apache.accumulo.core.fate.thrift.FateWorkerService; import org.apache.accumulo.core.fate.user.UserFateStore; import org.apache.accumulo.core.fate.zookeeper.MetaFateStore; import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter; @@ -109,7 +108,6 @@ import org.apache.accumulo.core.util.time.SteadyTime; import org.apache.accumulo.core.zookeeper.ZcStat; import org.apache.accumulo.manager.compaction.coordinator.CompactionCoordinator; -import org.apache.accumulo.manager.fate.FateWorker; import org.apache.accumulo.manager.merge.FindMergeableRangeTask; import org.apache.accumulo.manager.metrics.ManagerMetrics; import org.apache.accumulo.manager.recovery.RecoveryManager; @@ -919,8 +917,7 @@ public void run() { compactionCoordinator = new CompactionCoordinator(this, fateRefs); var processor = ThriftProcessorTypes.getManagerTProcessor(this, fateServiceHandler, - compactionCoordinator.getThriftService(), managerClientHandler, - getContext()); + compactionCoordinator.getThriftService(), managerClientHandler, getContext()); try { updateThriftServer(() -> { return TServerUtils.createThriftServer(context, getBindAddress(), diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java index 72514a5e3a1..9541b82d0bd 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java @@ -1,5 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ package org.apache.accumulo.manager; +import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly; + +import java.net.UnknownHostException; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + import org.apache.accumulo.core.cli.ConfigOpts; import org.apache.accumulo.core.client.admin.servers.ServerId; import org.apache.accumulo.core.conf.Property; @@ -20,120 +44,119 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.net.UnknownHostException; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly; - public class ManagerWorker extends AbstractServer { - private static final Logger LOG = LoggerFactory.getLogger(ManagerWorker.class); - private ServiceLock scanServerLock; - - protected ManagerWorker(ConfigOpts opts, String[] args) { - super(ServerId.Type.MANAGER_WORKER, opts, ServerContext::new, args); - } - - protected void startScanServerClientService() throws UnknownHostException { - - var fateWorker = new FateWorker(getContext()); - - // This class implements TabletClientService.Iface and then delegates calls. Be sure - // to set up the ThriftProcessor using this class, not the delegate. - ClientServiceHandler clientHandler = new ClientServiceHandler(getContext()); - TProcessor processor = - ThriftProcessorTypes.getManagerWorkerTProcessor(this, clientHandler, fateWorker, getContext()); - - // TODO using scan server props - updateThriftServer(() -> { - return TServerUtils.createThriftServer(getContext(), getBindAddress(), - Property.SSERV_CLIENTPORT, processor, this.getClass().getSimpleName(), - Property.SSERV_PORTSEARCH, Property.SSERV_MINTHREADS, Property.SSERV_MINTHREADS_TIMEOUT, - Property.SSERV_THREADCHECK); - }, true); - } + private static final Logger log = LoggerFactory.getLogger(ManagerWorker.class); + private ServiceLock scanServerLock; + + protected ManagerWorker(ConfigOpts opts, String[] args) { + super(ServerId.Type.MANAGER_WORKER, opts, ServerContext::new, args); + } + + protected void startClientService() throws UnknownHostException { + + var fateWorker = new FateWorker(getContext()); + + // This class implements TabletClientService.Iface and then delegates calls. Be sure + // to set up the ThriftProcessor using this class, not the delegate. + ClientServiceHandler clientHandler = new ClientServiceHandler(getContext()); + TProcessor processor = ThriftProcessorTypes.getManagerWorkerTProcessor(this, clientHandler, + fateWorker, getContext()); + + // TODO using scan server props + updateThriftServer(() -> { + return TServerUtils.createThriftServer(getContext(), getBindAddress(), + Property.SSERV_CLIENTPORT, processor, this.getClass().getSimpleName(), + Property.SSERV_PORTSEARCH, Property.SSERV_MINTHREADS, Property.SSERV_MINTHREADS_TIMEOUT, + Property.SSERV_THREADCHECK); + }, true); + } + + private ServiceLock announceExistence() { + final ZooReaderWriter zoo = getContext().getZooSession().asReaderWriter(); + try { + + final ServiceLockPaths.ServiceLockPath zLockPath = getContext().getServerPaths() + .createManagerWorkerPath(getResourceGroup(), getAdvertiseAddress()); + ServiceLockSupport.createNonHaServiceLockPath(ServerId.Type.MANAGER_WORKER, zoo, zLockPath); + var serverLockUUID = UUID.randomUUID(); + scanServerLock = new ServiceLock(getContext().getZooSession(), zLockPath, serverLockUUID); + ServiceLock.LockWatcher lw = new ServiceLockSupport.ServiceLockWatcher( + ServerId.Type.MANAGER_WORKER, () -> getShutdownComplete().get(), + (type) -> getContext().getLowMemoryDetector().logGCInfo(getConfiguration())); + + for (int i = 0; i < 120 / 5; i++) { + zoo.putPersistentData(zLockPath.toString(), new byte[0], ZooUtil.NodeExistsPolicy.SKIP); + + ServiceLockData.ServiceDescriptors descriptors = new ServiceLockData.ServiceDescriptors(); + for (ServiceLockData.ThriftService svc : new ServiceLockData.ThriftService[] { + ServiceLockData.ThriftService.CLIENT, ServiceLockData.ThriftService.FATE_WORKER}) { + descriptors.addService(new ServiceLockData.ServiceDescriptor(serverLockUUID, svc, + getAdvertiseAddress().toString(), this.getResourceGroup())); + } - private ServiceLock announceExistence() { - final ZooReaderWriter zoo = getContext().getZooSession().asReaderWriter(); - try { - - final ServiceLockPaths.ServiceLockPath zLockPath = - getContext().getServerPaths().createScanServerPath(getResourceGroup(), getAdvertiseAddress()); - ServiceLockSupport.createNonHaServiceLockPath(ServerId.Type.MANAGER_WORKER, zoo, zLockPath); - var serverLockUUID = UUID.randomUUID(); - scanServerLock = new ServiceLock(getContext().getZooSession(), zLockPath, serverLockUUID); - ServiceLock.LockWatcher lw = new ServiceLockSupport.ServiceLockWatcher(ServerId.Type.MANAGER_WORKER, () -> getShutdownComplete().get(), - (type) -> getContext().getLowMemoryDetector().logGCInfo(getConfiguration())); - - for (int i = 0; i < 120 / 5; i++) { - zoo.putPersistentData(zLockPath.toString(), new byte[0], ZooUtil.NodeExistsPolicy.SKIP); - - ServiceLockData.ServiceDescriptors descriptors = new ServiceLockData.ServiceDescriptors(); - for (ServiceLockData.ThriftService svc : new ServiceLockData.ThriftService[] {ServiceLockData.ThriftService.CLIENT, - ServiceLockData.ThriftService.FATE_WORKER}) { - descriptors.addService(new ServiceLockData.ServiceDescriptor(serverLockUUID, svc, - getAdvertiseAddress().toString(), this.getResourceGroup())); - } - - if (scanServerLock.tryLock(lw, new ServiceLockData(descriptors))) { - LOG.debug("Obtained scan server lock {}", scanServerLock.getLockPath()); - return scanServerLock; - } - LOG.info("Waiting for scan server lock"); - sleepUninterruptibly(5, TimeUnit.SECONDS); - } - String msg = "Too many retries, exiting."; - LOG.info(msg); - throw new RuntimeException(msg); - } catch (Exception e) { - LOG.info("Could not obtain scan server lock, exiting.", e); - throw new RuntimeException(e); + if (scanServerLock.tryLock(lw, new ServiceLockData(descriptors))) { + log.debug("Obtained scan server lock {}", scanServerLock.getLockPath()); + return scanServerLock; } + log.info("Waiting for scan server lock"); + sleepUninterruptibly(5, TimeUnit.SECONDS); + } + String msg = "Too many retries, exiting."; + log.info(msg); + throw new RuntimeException(msg); + } catch (Exception e) { + log.info("Could not obtain scan server lock, exiting.", e); + throw new RuntimeException(e); } - - @Override - public ServiceLock getLock() { - return scanServerLock; + } + + @Override + public ServiceLock getLock() { + return scanServerLock; + } + + @Override + public void run() { + try { + waitForUpgrade(); + } catch (InterruptedException e) { + log.error("Interrupted while waiting for upgrade to complete, exiting..."); + System.exit(1); } - @Override - public void run() { - try { - waitForUpgrade(); - } catch (InterruptedException e) { - LOG.error("Interrupted while waiting for upgrade to complete, exiting..."); - System.exit(1); - } + SecurityUtil.serverLogin(getConfiguration()); - SecurityUtil.serverLogin(getConfiguration()); + // TODO metrics - // TODO metrics + try { + startClientService(); + } catch (UnknownHostException e1) { + throw new RuntimeException("Failed to start the scan server client service", e1); + } - try { - startScanServerClientService(); - } catch (UnknownHostException e1) { - throw new RuntimeException("Failed to start the scan server client service", e1); - } + ServiceLock lock = announceExistence(); + this.getContext().setServiceLock(lock); + + while (!isShutdownRequested()) { + if (Thread.currentThread().isInterrupted()) { + log.info("Server process thread has been interrupted, shutting down"); + break; + } + try { + Thread.sleep(1000); + // TODO update idle status + } catch (InterruptedException e) { + log.info("Interrupt Exception received, shutting down"); + gracefulShutdown(getContext().rpcCreds()); + } + } - ServiceLock lock = announceExistence(); - this.getContext().setServiceLock(lock); - - while (!isShutdownRequested()) { - if (Thread.currentThread().isInterrupted()) { - LOG.info("Server process thread has been interrupted, shutting down"); - break; - } - try { - Thread.sleep(1000); - // TODO update idle status - } catch (InterruptedException e) { - LOG.info("Interrupt Exception received, shutting down"); - gracefulShutdown(getContext().rpcCreds()); - } - } + log.debug("Stopping Thrift Servers"); + getThriftServer().stop(); + } - LOG.debug("Stopping Thrift Servers"); - getThriftServer().stop(); - } + public static void main(String[] args) throws Exception { + AbstractServer.startServer(new ManagerWorker(new ConfigOpts(), args), log); + } } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index fe4f4d084a0..0e4be64ffcc 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -18,10 +18,10 @@ */ package org.apache.accumulo.manager.fate; -import java.util.ArrayList; +import static org.apache.accumulo.core.lock.ServiceLockPaths.ResourceGroupPredicate.DEFAULT_RG_ONLY; + import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; @@ -31,22 +31,19 @@ import org.apache.accumulo.core.fate.FateInstanceType; import org.apache.accumulo.core.fate.thrift.FateWorkerService; import org.apache.accumulo.core.fate.thrift.TFatePartition; -import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter; -import org.apache.accumulo.core.lock.ServiceLock; -import org.apache.accumulo.core.lock.ServiceLockData; +import org.apache.accumulo.core.lock.ServiceLockPaths.AddressSelector; import org.apache.accumulo.core.rpc.ThriftUtil; import org.apache.accumulo.core.rpc.clients.ThriftClientTypes; import org.apache.accumulo.core.trace.TraceUtil; import org.apache.accumulo.server.ServerContext; import org.apache.hadoop.util.Sets; +import org.apache.thrift.TException; import com.google.common.net.HostAndPort; -import org.apache.thrift.TException; -import org.apache.zookeeper.KeeperException; public class FateManager { - record FatePartition(FateId start, FateId end) { + record FatePartition(FateId start, FateId end) { public TFatePartition toThrift() { return new TFatePartition(start.canonical(), end.canonical()); @@ -75,6 +72,9 @@ public void managerWorkers() throws Exception { // TODO handle duplicate current assignments + System.out.println("current : " + currentAssignments); + System.out.println("desired : " + desiredParititions); + Map> desired = computeDesiredAssignments(currentAssignments, desiredParititions); @@ -88,29 +88,42 @@ public void managerWorkers() throws Exception { if (haveExtra) { // force unload of extra partitions to make them available for other workers - desired.forEach((worker, paritions) -> { + for (Map.Entry> entry : desired.entrySet()) { + HostAndPort worker = entry.getKey(); + Set partitions = entry.getValue(); var curr = currentAssignments.getOrDefault(worker, Set.of()); - if (!curr.equals(paritions)) { - var intersection = Sets.intersection(curr, paritions); + if (!curr.equals(partitions)) { + var intersection = Sets.intersection(curr, partitions); setWorkerPartitions(worker, curr, intersection); currentAssignments.put(worker, intersection); } - }); + } } // Load all partitions on all workers.. - desired.forEach((worker, paritions) -> { + for (Map.Entry> entry : desired.entrySet()) { + HostAndPort worker = entry.getKey(); + Set partitions = entry.getValue(); var curr = currentAssignments.getOrDefault(worker, Set.of()); - if (!curr.equals(paritions)) { - setWorkerPartitions(worker, curr, paritions); + if (!curr.equals(partitions)) { + setWorkerPartitions(worker, curr, partitions); } - }); + } } } - private void setWorkerPartitions(HostAndPort worker, Set current, - Set desired) { + private void setWorkerPartitions(HostAndPort address, Set current, + Set desired) throws TException { // TODO make a compare and set type RPC that uses the current and desired + FateWorkerService.Client client = + ThriftUtil.getClient(ThriftClientTypes.FATE_WORKER, address, context); + try { + client.setPartitions(TraceUtil.traceInfo(), context.rpcCreds(), + current.stream().map(FatePartition::toThrift).toList(), + desired.stream().map(FatePartition::toThrift).toList()); + } finally { + ThriftUtil.returnClient(client, context); + } } /** @@ -121,12 +134,13 @@ private Map> computeDesiredAssignments( Map> currentAssignments, Set desiredParititions) { // min number of partitions a single worker must have - int minPerWorker = currentAssignments.size() / desiredParititions.size(); + int minPerWorker = desiredParititions.size() / currentAssignments.size(); // max number of partitions a single worker can have int maxPerWorker = - minPerWorker + Math.min(currentAssignments.size() % desiredParititions.size(), 1); + minPerWorker + Math.min(desiredParititions.size() % currentAssignments.size(), 1); // number of workers that can have the max partitions - int desiredWorkersWithMax = currentAssignments.size() % desiredParititions.size(); + int desiredWorkersWithMax = + desiredParititions.size() - minPerWorker * currentAssignments.size(); Map> desiredAssignments = new HashMap<>(); Set availablePartitions = new HashSet<>(desiredParititions); @@ -134,6 +148,13 @@ private Map> computeDesiredAssignments( // remove everything that is assigned currentAssignments.values().forEach(p -> p.forEach(availablePartitions::remove)); + System.out.println("currentAssignments.size():" + currentAssignments.size()); + System.out.println("desiredParititions.size():" + desiredParititions.size()); + System.out.println("minPerWorker:" + minPerWorker); + System.out.println("maxPerWorker:" + maxPerWorker); + System.out.println("desiredWorkersWithMax:" + desiredWorkersWithMax); + System.out.println("availablePartitions:" + availablePartitions); + // Find workers that currently have too many partitions assigned and place their excess in the // available set. Let workers keep what they have when its under the limit. int numWorkersWithMax = 0; @@ -178,45 +199,47 @@ private Map> computeDesiredAssignments( } } + desiredAssignments.forEach((hp, parts) -> { + System.out.println(" desired " + hp + " " + parts.size() + " " + parts); + }); + return desiredAssignments; } private Set getDesiredPartitions() { - HashSet desired = new HashSet<>(); // TODO created based on the number of available servers - for(long i = 0; i<=15; i++){ - UUID start = new UUID((i<<60) , -0); - UUID stop = new UUID((i<<60) | (-1L>>>4), -1); - desired.add(new FatePartition(FateId.from(FateInstanceType.USER, start), FateId.from(FateInstanceType.USER, stop))); + for (long i = 0; i <= 15; i++) { + UUID start = new UUID((i << 60), -0); + UUID stop = new UUID((i << 60) | (-1L >>> 4), -1); + desired.add(new FatePartition(FateId.from(FateInstanceType.USER, start), + FateId.from(FateInstanceType.USER, stop))); } return desired; } - private Map> getCurrentAssignments() throws InterruptedException, KeeperException, TException { - ZooReaderWriter zk = context.getZooSession().asReaderWriter(); - var managerPath = context.getServerPaths().createManagerPath(); - - var children = ServiceLock.validateAndSort(managerPath, zk.getChildren(managerPath.toString())); + private Map> getCurrentAssignments() throws TException { + var workers = + context.getServerPaths().getManagerWorker(DEFAULT_RG_ONLY, AddressSelector.all(), true); - List locksData = new ArrayList<>(children.size()); - - for(var child : children){ - ServiceLockData.parse(zk.getData(managerPath +"/"+child)).ifPresent(locksData::add); - } + System.out.println("workers : " + workers); Map> currentAssignments = new HashMap<>(); - for(var lockData : locksData) { - var address = lockData.getAddress(ServiceLockData.ThriftService.FATE_WORKER); + for (var worker : workers) { + var address = HostAndPort.fromString(worker.getServer()); FateWorkerService.Client client = - ThriftUtil.getClient(ThriftClientTypes.FATE_WORKER, address, context); - - var tparitions = client.getPartitions(TraceUtil.traceInfo(), context.rpcCreds()); - var partitions = tparitions.stream().map(FatePartition::from).collect(Collectors.toSet()); - currentAssignments.put(address, partitions); + ThriftUtil.getClient(ThriftClientTypes.FATE_WORKER, address, context); + try { + + var tparitions = client.getPartitions(TraceUtil.traceInfo(), context.rpcCreds()); + var partitions = tparitions.stream().map(FatePartition::from).collect(Collectors.toSet()); + currentAssignments.put(address, partitions); + } finally { + ThriftUtil.returnClient(client, context); + } } return currentAssignments; diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index d83981ac720..e2277488eaf 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -74,8 +74,10 @@ public boolean setPartitions(TInfo tinfo, TCredentials credentials, List log.info("old partition {}", p)); currentPartitions.clear(); desired.stream().map(FatePartition::from).forEach(currentPartitions::add); + desired.stream().map(FatePartition::from).forEach(p -> log.info("new partition {}", p)); log.info("Changed partitions from {} to {}", expectedSet, currentPartitions); return true; } else { diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Scanner.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Scanner.java index e121a9fa1e9..3500b9c645f 100644 --- a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Scanner.java +++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Scanner.java @@ -34,7 +34,6 @@ import org.apache.accumulo.core.trace.TraceUtil; import org.apache.accumulo.core.util.Pair; import org.apache.accumulo.core.util.ShutdownUtil; -import org.apache.accumulo.tserver.ScanServer; import org.apache.accumulo.tserver.scan.NextBatchTask; import org.apache.accumulo.tserver.scan.ScanParameters; import org.slf4j.Logger; diff --git a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java index b6af8b48f1c..d02a48ce75c 100644 --- a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java +++ b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java @@ -1,35 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ package org.apache.accumulo.test; -import org.apache.accumulo.harness.AccumuloClusterHarness; -import org.apache.accumulo.manager.Manager; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Executors; + +import org.apache.accumulo.manager.ManagerWorker; import org.apache.accumulo.manager.fate.FateManager; import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl; import org.apache.accumulo.test.functional.ConfigurableMacBase; import org.apache.hadoop.conf.Configuration; import org.junit.jupiter.api.Test; -import java.util.ArrayList; -import java.util.BitSet; -import java.util.List; - public class MultipleManagerIT extends ConfigurableMacBase { - @Override - protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { - // TODO add a way to start multiple managers to mini - super.configure(cfg, hadoopCoreSite); - } + @Override + protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { + // TODO add a way to start multiple managers to mini + super.configure(cfg, hadoopCoreSite); + } - @Test - public void test() throws Exception { + @Test + public void test() throws Exception { - List managers = new ArrayList<>(); - for(int i = 0; i<5;i++){ - managers.add(exec(Manager.class)); - } + List managerWorkers = new ArrayList<>(); + for (int i = 0; i < 2; i++) { + managerWorkers.add(exec(ManagerWorker.class)); + } - var fateMgr = new FateManager(getServerContext()); - fateMgr.managerWorkers(); + var executor = Executors.newCachedThreadPool(); - // TODO kill processes + var fateMgr = new FateManager(getServerContext()); + var future = executor.submit(() -> { + fateMgr.managerWorkers(); + return null; + }); + + Thread.sleep(30_000); + for (int i = 0; i < 3; i++) { + managerWorkers.add(exec(ManagerWorker.class)); } + + Thread.sleep(30_000); + System.out.println("DONE"); + // TODO kill processes + } } From af4a3ccfccc21bc0855d01c7583fe0b8c4a31741 Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Thu, 5 Feb 2026 22:26:44 +0000 Subject: [PATCH 07/38] WIP --- .../accumulo/core/fate/AbstractFateStore.java | 14 +- .../org/apache/accumulo/core/fate/Fate.java | 7 + .../accumulo/core/fate/FateExecutor.java | 11 +- .../accumulo/core/fate/FatePartition.java | 52 +++++++ .../accumulo/core/fate/ReadOnlyFateStore.java | 4 +- .../core/fate/user/UserFateStore.java | 15 +- .../core/fate/zookeeper/MetaFateStore.java | 13 ++ .../accumulo/core/logging/FateLogger.java | 7 +- .../apache/accumulo/core/fate/TestStore.java | 3 +- .../org/apache/accumulo/manager/Manager.java | 5 + .../accumulo/manager/ManagerWorker.java | 22 +-- .../accumulo/manager/fate/FateManager.java | 13 +- .../accumulo/manager/fate/FateWorker.java | 30 +++- .../accumulo/manager/fate/FateWorkerEnv.java | 141 ++++++++++++++++++ .../accumulo/test/MultipleManagerIT.java | 8 + .../accumulo/test/fate/FateStoreITBase.java | 10 +- 16 files changed, 316 insertions(+), 39 deletions(-) create mode 100644 core/src/main/java/org/apache/accumulo/core/fate/FatePartition.java create mode 100644 server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java diff --git a/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java b/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java index 751e3d42a4b..38d1ecba3cd 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java @@ -161,7 +161,12 @@ public FateTxStore reserve(FateId fateId) { EnumSet.of(TStatus.SUBMITTED, TStatus.FAILED_IN_PROGRESS); @Override - public void runnable(AtomicBoolean keepWaiting, Consumer idConsumer) { + public void runnable(Set partitions, AtomicBoolean keepWaiting, + Consumer idConsumer) { + + if(partitions.isEmpty()){ + return; + } AtomicLong seen = new AtomicLong(0); @@ -169,8 +174,8 @@ public void runnable(AtomicBoolean keepWaiting, Consumer idConsume final long beforeCount = unreservedRunnableCount.getCount(); final boolean beforeDeferredOverflow = deferredOverflow.get(); - try (Stream inProgress = getTransactions(IN_PROGRESS_SET); - Stream other = getTransactions(OTHER_RUNNABLE_SET)) { + try (Stream inProgress = getTransactions(partitions, IN_PROGRESS_SET); + Stream other = getTransactions(partitions, OTHER_RUNNABLE_SET)) { // read the in progress transaction first and then everything else in order to process those // first var transactions = Stream.concat(inProgress, other); @@ -289,6 +294,9 @@ protected void verifyLock(ZooUtil.LockID lockID, FateId fateId) { protected abstract Stream getTransactions(EnumSet statuses); + protected abstract Stream getTransactions(Set partitions, + EnumSet statuses); + protected abstract TStatus _getStatus(FateId fateId); protected abstract Optional getKey(FateId fateId); diff --git a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java index eebe1147853..b952009d1cd 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java @@ -572,6 +572,13 @@ public void close() { store.close(); } + public void setPartitions(Set partitions){ + synchronized (fateExecutors) { + // TODO would need to set these when executors change... + fateExecutors.forEach(fe -> fe.setPartitions(partitions)); + } + } + private boolean anyFateExecutorIsAlive() { synchronized (fateExecutors) { return fateExecutors.stream().anyMatch(FateExecutor::isAlive); diff --git a/core/src/main/java/org/apache/accumulo/core/fate/FateExecutor.java b/core/src/main/java/org/apache/accumulo/core/fate/FateExecutor.java index bbf0bcb81ef..95a89706a62 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/FateExecutor.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/FateExecutor.java @@ -33,6 +33,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentLinkedQueue; @@ -43,6 +44,7 @@ import java.util.concurrent.TransferQueue; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException; import org.apache.accumulo.core.conf.Property; @@ -81,6 +83,7 @@ public class FateExecutor { private final Set fateOps; private final ConcurrentLinkedQueue idleCountHistory = new ConcurrentLinkedQueue<>(); private final FateExecutorMetrics fateExecutorMetrics; + private final AtomicReference> partitions = new AtomicReference<>(Set.of()); public FateExecutor(Fate fate, T environment, Set fateOps, int poolSize, String name) { @@ -298,6 +301,11 @@ protected ConcurrentLinkedQueue getIdleCountHistory() { return idleCountHistory; } + public void setPartitions(Set partitions) { + Objects.requireNonNull(partitions); + this.partitions.set(Set.copyOf(partitions)); + } + /** * A single thread that finds transactions to work on and queues them up. Do not want each worker * thread going to the store and looking for work as it would place more load on the store. @@ -308,7 +316,8 @@ private class WorkFinder implements Runnable { public void run() { while (fate.getKeepRunning().get() && !isShutdown()) { try { - fate.getStore().runnable(fate.getKeepRunning(), fateIdStatus -> { + // TODO + fate.getStore().runnable(partitions.get(), fate.getKeepRunning(), fateIdStatus -> { // The FateId with the fate operation 'fateOp' is workable by this FateExecutor if // 1) This FateExecutor is assigned to work on 'fateOp' ('fateOp' is in 'fateOps') // 2) The transaction was cancelled while NEW. This is an edge case that needs to be diff --git a/core/src/main/java/org/apache/accumulo/core/fate/FatePartition.java b/core/src/main/java/org/apache/accumulo/core/fate/FatePartition.java new file mode 100644 index 00000000000..290dbeabaf3 --- /dev/null +++ b/core/src/main/java/org/apache/accumulo/core/fate/FatePartition.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.accumulo.core.fate; + +import java.util.UUID; + +import org.apache.accumulo.core.fate.thrift.TFatePartition; + +public record FatePartition(FateId start, FateId end) { + + public TFatePartition toThrift() { + return new TFatePartition(start.canonical(), end.canonical()); + } + + public static FatePartition from(TFatePartition tfp) { + return new FatePartition(FateId.from(tfp.start), FateId.from(tfp.stop)); + } + + private static final FatePartition ALL_USER = + new FatePartition(FateId.from(FateInstanceType.USER, new UUID(0, 0)), + FateId.from(FateInstanceType.USER, new UUID(-1, -1))); + private static final FatePartition ALL_META = + new FatePartition(FateId.from(FateInstanceType.META, new UUID(0, 0)), + FateId.from(FateInstanceType.META, new UUID(-1, -1))); + + public static FatePartition all(FateInstanceType type) { + return switch (type) { + case META -> ALL_META; + case USER -> ALL_USER; + }; + } + + public boolean contains(FateId fateId) { + return start.compareTo(fateId) >= 0 && end.compareTo(fateId) <= 0; + } +} diff --git a/core/src/main/java/org/apache/accumulo/core/fate/ReadOnlyFateStore.java b/core/src/main/java/org/apache/accumulo/core/fate/ReadOnlyFateStore.java index 263a9b090b9..3608f9b1c29 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/ReadOnlyFateStore.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/ReadOnlyFateStore.java @@ -23,6 +23,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.stream.Stream; @@ -163,7 +164,8 @@ interface FateIdStatus { * is found or until the keepWaiting parameter is false. It will return once all runnable ids * found were passed to the consumer. */ - void runnable(AtomicBoolean keepWaiting, Consumer idConsumer); + void runnable(Set partitions, AtomicBoolean keepWaiting, + Consumer idConsumer); /** * Returns true if the deferred map was cleared and if deferred executions are currently disabled diff --git a/core/src/main/java/org/apache/accumulo/core/fate/user/UserFateStore.java b/core/src/main/java/org/apache/accumulo/core/fate/user/UserFateStore.java index 55300470d7b..eaca40473df 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/user/UserFateStore.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/user/UserFateStore.java @@ -28,6 +28,7 @@ import java.util.Map.Entry; import java.util.Objects; import java.util.Optional; +import java.util.Set; import java.util.SortedMap; import java.util.UUID; import java.util.concurrent.CompletableFuture; @@ -56,6 +57,7 @@ import org.apache.accumulo.core.fate.FateId; import org.apache.accumulo.core.fate.FateInstanceType; import org.apache.accumulo.core.fate.FateKey; +import org.apache.accumulo.core.fate.FatePartition; import org.apache.accumulo.core.fate.ReadOnlyRepo; import org.apache.accumulo.core.fate.Repo; import org.apache.accumulo.core.fate.StackOverflowException; @@ -281,9 +283,20 @@ public void deleteDeadReservations() { @Override protected Stream getTransactions(EnumSet statuses) { + return getTransactions(FatePartition.all(FateInstanceType.USER), statuses); + } + + @Override + protected Stream getTransactions(Set partitions, + EnumSet statuses) { + return partitions.stream().flatMap(p -> getTransactions(p, statuses)); + } + + private Stream getTransactions(FatePartition partition, EnumSet statuses) { try { Scanner scanner = context.createScanner(tableName, Authorizations.EMPTY); - scanner.setRange(new Range()); + scanner + .setRange(new Range(getRowId(partition.start()), true, getRowId(partition.end()), true)); RowFateStatusFilter.configureScanner(scanner, statuses); // columns fetched here must be in/added to TxAdminColumnFamily for locality group benefits TxAdminColumnFamily.STATUS_COLUMN.fetch(scanner); diff --git a/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/MetaFateStore.java b/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/MetaFateStore.java index f6f67dfef22..7d7f9bd8852 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/MetaFateStore.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/MetaFateStore.java @@ -55,6 +55,7 @@ import org.apache.accumulo.core.fate.FateId; import org.apache.accumulo.core.fate.FateInstanceType; import org.apache.accumulo.core.fate.FateKey; +import org.apache.accumulo.core.fate.FatePartition; import org.apache.accumulo.core.fate.ReadOnlyRepo; import org.apache.accumulo.core.fate.Repo; import org.apache.accumulo.core.fate.StackOverflowException; @@ -616,6 +617,18 @@ public Optional getFateOperation() { } } + @Override + protected Stream getTransactions(Set partitions, + EnumSet statuses) { + return getTransactions(statuses).filter(fis -> { + // TODO this could be inefficient + for (var p : partitions) { + return p.contains(fis.getFateId()); + } + return false; + }); + } + @Override public Stream list(FateKey.FateKeyType type) { return getTransactions(EnumSet.allOf(TStatus.class)) diff --git a/core/src/main/java/org/apache/accumulo/core/logging/FateLogger.java b/core/src/main/java/org/apache/accumulo/core/logging/FateLogger.java index 331401fc6bb..38e8976f515 100644 --- a/core/src/main/java/org/apache/accumulo/core/logging/FateLogger.java +++ b/core/src/main/java/org/apache/accumulo/core/logging/FateLogger.java @@ -23,6 +23,7 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; @@ -34,6 +35,7 @@ import org.apache.accumulo.core.fate.FateId; import org.apache.accumulo.core.fate.FateInstanceType; import org.apache.accumulo.core.fate.FateKey; +import org.apache.accumulo.core.fate.FatePartition; import org.apache.accumulo.core.fate.FateStore; import org.apache.accumulo.core.fate.FateStore.FateTxStore; import org.apache.accumulo.core.fate.FateStore.Seeder; @@ -140,8 +142,9 @@ public Stream list(FateKey.FateKeyType type) { } @Override - public void runnable(AtomicBoolean keepWaiting, Consumer idConsumer) { - store.runnable(keepWaiting, idConsumer); + public void runnable(Set partitions, AtomicBoolean keepWaiting, + Consumer idConsumer) { + store.runnable(partitions, keepWaiting, idConsumer); } @Override diff --git a/core/src/test/java/org/apache/accumulo/core/fate/TestStore.java b/core/src/test/java/org/apache/accumulo/core/fate/TestStore.java index e4d057fc108..da50b715d1a 100644 --- a/core/src/test/java/org/apache/accumulo/core/fate/TestStore.java +++ b/core/src/test/java/org/apache/accumulo/core/fate/TestStore.java @@ -273,7 +273,8 @@ public Stream list(FateKey.FateKeyType type) { } @Override - public void runnable(AtomicBoolean keepWaiting, Consumer idConsumer) { + public void runnable(Set partitions, AtomicBoolean keepWaiting, + Consumer idConsumer) { throw new UnsupportedOperationException(); } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java index 5391ddbd0aa..fad0a516931 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java @@ -70,6 +70,7 @@ import org.apache.accumulo.core.fate.FateCleaner; import org.apache.accumulo.core.fate.FateId; import org.apache.accumulo.core.fate.FateInstanceType; +import org.apache.accumulo.core.fate.FatePartition; import org.apache.accumulo.core.fate.FateStore; import org.apache.accumulo.core.fate.user.UserFateStore; import org.apache.accumulo.core.fate.zookeeper.MetaFateStore; @@ -1276,6 +1277,10 @@ protected Fate initializeFateInstance(ServerContext context, FateStore< ThreadPools.watchCriticalScheduledTask(context.getScheduledExecutor() .scheduleWithFixedDelay(fateCleaner::ageOff, 10, 4 * 60, MINUTES)); + if(store.type() == FateInstanceType.META){ + fateInstance.setPartitions(Set.of(FatePartition.all(FateInstanceType.META))); + }// else do not run user transactions for now in the manager... it will have an empty set of partitions + return fateInstance; } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java index 9541b82d0bd..9218a498529 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java @@ -47,7 +47,8 @@ public class ManagerWorker extends AbstractServer { private static final Logger log = LoggerFactory.getLogger(ManagerWorker.class); - private ServiceLock scanServerLock; + private volatile ServiceLock managerWorkerLock; + private FateWorker fateWorker; protected ManagerWorker(ConfigOpts opts, String[] args) { super(ServerId.Type.MANAGER_WORKER, opts, ServerContext::new, args); @@ -55,7 +56,7 @@ protected ManagerWorker(ConfigOpts opts, String[] args) { protected void startClientService() throws UnknownHostException { - var fateWorker = new FateWorker(getContext()); + fateWorker = new FateWorker(getContext(), this::getLock); // This class implements TabletClientService.Iface and then delegates calls. Be sure // to set up the ThriftProcessor using this class, not the delegate. @@ -80,7 +81,7 @@ private ServiceLock announceExistence() { .createManagerWorkerPath(getResourceGroup(), getAdvertiseAddress()); ServiceLockSupport.createNonHaServiceLockPath(ServerId.Type.MANAGER_WORKER, zoo, zLockPath); var serverLockUUID = UUID.randomUUID(); - scanServerLock = new ServiceLock(getContext().getZooSession(), zLockPath, serverLockUUID); + managerWorkerLock = new ServiceLock(getContext().getZooSession(), zLockPath, serverLockUUID); ServiceLock.LockWatcher lw = new ServiceLockSupport.ServiceLockWatcher( ServerId.Type.MANAGER_WORKER, () -> getShutdownComplete().get(), (type) -> getContext().getLowMemoryDetector().logGCInfo(getConfiguration())); @@ -95,25 +96,25 @@ private ServiceLock announceExistence() { getAdvertiseAddress().toString(), this.getResourceGroup())); } - if (scanServerLock.tryLock(lw, new ServiceLockData(descriptors))) { - log.debug("Obtained scan server lock {}", scanServerLock.getLockPath()); - return scanServerLock; + if (managerWorkerLock.tryLock(lw, new ServiceLockData(descriptors))) { + log.debug("Obtained scan server lock {}", managerWorkerLock.getLockPath()); + return managerWorkerLock; } - log.info("Waiting for scan server lock"); + log.info("Waiting for manager worker lock"); sleepUninterruptibly(5, TimeUnit.SECONDS); } String msg = "Too many retries, exiting."; log.info(msg); throw new RuntimeException(msg); } catch (Exception e) { - log.info("Could not obtain scan server lock, exiting.", e); + log.info("Could not obtain manager worker lock, exiting.", e); throw new RuntimeException(e); } } @Override public ServiceLock getLock() { - return scanServerLock; + return managerWorkerLock; } @Override @@ -132,11 +133,12 @@ public void run() { try { startClientService(); } catch (UnknownHostException e1) { - throw new RuntimeException("Failed to start the scan server client service", e1); + throw new RuntimeException("Failed to start the manager worker client service", e1); } ServiceLock lock = announceExistence(); this.getContext().setServiceLock(lock); + fateWorker.setLock(lock); while (!isShutdownRequested()) { if (Thread.currentThread().isInterrupted()) { diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index 0e4be64ffcc..5f3a4749c93 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -29,8 +29,8 @@ import org.apache.accumulo.core.fate.FateId; import org.apache.accumulo.core.fate.FateInstanceType; +import org.apache.accumulo.core.fate.FatePartition; import org.apache.accumulo.core.fate.thrift.FateWorkerService; -import org.apache.accumulo.core.fate.thrift.TFatePartition; import org.apache.accumulo.core.lock.ServiceLockPaths.AddressSelector; import org.apache.accumulo.core.rpc.ThriftUtil; import org.apache.accumulo.core.rpc.clients.ThriftClientTypes; @@ -43,17 +43,6 @@ public class FateManager { - record FatePartition(FateId start, FateId end) { - - public TFatePartition toThrift() { - return new TFatePartition(start.canonical(), end.canonical()); - } - - public static FatePartition from(TFatePartition tfp) { - return new FatePartition(FateId.from(tfp.start), FateId.from(tfp.stop)); - } - } - private final ServerContext context; public FateManager(ServerContext context) { diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index e2277488eaf..c62e07ee5a2 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -22,16 +22,26 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.function.Predicate; +import java.util.function.Supplier; import java.util.stream.Collectors; import org.apache.accumulo.core.client.AccumuloSecurityException; import org.apache.accumulo.core.clientImpl.thrift.SecurityErrorCode; import org.apache.accumulo.core.clientImpl.thrift.TInfo; import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException; +import org.apache.accumulo.core.fate.Fate; +import org.apache.accumulo.core.fate.FatePartition; import org.apache.accumulo.core.fate.thrift.FateWorkerService; import org.apache.accumulo.core.fate.thrift.TFatePartition; +import org.apache.accumulo.core.fate.user.UserFateStore; +import org.apache.accumulo.core.fate.zookeeper.MetaFateStore; +import org.apache.accumulo.core.fate.zookeeper.ZooUtil; +import org.apache.accumulo.core.lock.ServiceLock; +import org.apache.accumulo.core.metadata.SystemTables; import org.apache.accumulo.core.securityImpl.thrift.TCredentials; -import org.apache.accumulo.manager.fate.FateManager.FatePartition; +import org.apache.accumulo.manager.tableOps.FateEnv; +import org.apache.accumulo.manager.tableOps.TraceRepo; import org.apache.accumulo.server.ServerContext; import org.apache.accumulo.server.security.AuditedSecurityOperation; import org.slf4j.Logger; @@ -43,11 +53,24 @@ public class FateWorker implements FateWorkerService.Iface { private final ServerContext context; private final AuditedSecurityOperation security; private final Set currentPartitions; + private volatile Fate fate; - public FateWorker(ServerContext ctx) { + + public FateWorker(ServerContext ctx, Supplier serviceLockSupplier) { this.context = ctx; this.security = ctx.getSecurityOperation(); this.currentPartitions = Collections.synchronizedSet(new HashSet<>()); + this.fate = null; + } + + public void setLock(ServiceLock lock){ + FateEnv env = new FateWorkerEnv(context, lock); + Predicate isLockHeld = + l -> ServiceLock.isLockHeld(context.getZooCache(), l); + UserFateStore store = new UserFateStore<>(context, + SystemTables.FATE.tableName(), lock.getLockID(), isLockHeld); + this.fate = new Fate<>(env, store, true, TraceRepo::toLogString, + context.getConfiguration(), context.getScheduledExecutor()); } @Override @@ -73,12 +96,13 @@ public boolean setPartitions(TInfo tinfo, TCredentials credentials, List log.info("old partition {}", p)); currentPartitions.clear(); desired.stream().map(FatePartition::from).forEach(currentPartitions::add); desired.stream().map(FatePartition::from).forEach(p -> log.info("new partition {}", p)); log.info("Changed partitions from {} to {}", expectedSet, currentPartitions); + fate.setPartitions(Set.copyOf(currentPartitions)); return true; } else { log.info("Did not change partitions to {} because {} != {}", desired, expectedSet, diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java new file mode 100644 index 00000000000..dd2ccb3399b --- /dev/null +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java @@ -0,0 +1,141 @@ +package org.apache.accumulo.manager.fate; + +import org.apache.accumulo.core.client.AccumuloException; +import org.apache.accumulo.core.client.AccumuloSecurityException; +import org.apache.accumulo.core.data.TableId; +import org.apache.accumulo.core.dataImpl.KeyExtent; +import org.apache.accumulo.core.lock.ServiceLock; +import org.apache.accumulo.core.manager.thrift.BulkImportState; +import org.apache.accumulo.core.metadata.TServerInstance; +import org.apache.accumulo.core.metadata.schema.Ample; +import org.apache.accumulo.core.metadata.schema.ExternalCompactionId; +import org.apache.accumulo.core.util.time.SteadyTime; +import org.apache.accumulo.manager.EventPublisher; +import org.apache.accumulo.manager.split.Splitter; +import org.apache.accumulo.manager.tableOps.FateEnv; +import org.apache.accumulo.server.ServerContext; +import org.apache.accumulo.server.fs.VolumeManager; +import org.apache.accumulo.server.manager.LiveTServerSet; +import org.apache.accumulo.server.tables.TableManager; + +import java.util.Collection; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.function.Supplier; + +public class FateWorkerEnv implements FateEnv { + private final ServerContext ctx; + private final ExecutorService refreshPool; + private final ExecutorService renamePool; + private final ServiceLock serviceLock; + private final LiveTServerSet tservers; + + FateWorkerEnv(ServerContext ctx, ServiceLock lock){ + this.ctx = ctx; + // TODO create the proper way + this.refreshPool = Executors.newFixedThreadPool(2); + this.renamePool = Executors.newFixedThreadPool(2); + this.serviceLock = lock; + this.tservers = new LiveTServerSet(ctx); + } + + @Override + public ServerContext getContext() { + return ctx; + } + + @Override + public EventPublisher getEventPublisher() { + // TODO do something w/ the events + return new EventPublisher() { + @Override + public void event(String msg, Object... args) { + + } + + @Override + public void event(Ample.DataLevel level, String msg, Object... args) { + + } + + @Override + public void event(TableId tableId, String msg, Object... args) { + + } + + @Override + public void event(KeyExtent extent, String msg, Object... args) { + + } + + @Override + public void event(Collection extents, String msg, Object... args) { + + } + }; + } + + @Override + public void recordCompactionCompletion(ExternalCompactionId ecid) { + // TODO do something w/ this + } + + @Override + public Set onlineTabletServers() { + return tservers.getSnapshot().getTservers(); + } + + @Override + public TableManager getTableManager() { + return ctx.getTableManager(); + } + + @Override + public VolumeManager getVolumeManager() { + return ctx.getVolumeManager(); + } + + @Override + public void updateBulkImportStatus(String string, BulkImportState bulkImportState) { + //TODO + } + + @Override + public void removeBulkImportStatus(String sourceDir) { + //TODO + } + + @Override + public ServiceLock getServiceLock() { + return serviceLock; + } + + @Override + public SteadyTime getSteadyTime() { + try { + return SteadyTime.from(ctx.instanceOperations().getManagerTime()); + } catch (AccumuloException e) { + // TODO exceptions, add to to method signature or use a diff type?? + throw new RuntimeException(e); + } catch (AccumuloSecurityException e) { + throw new RuntimeException(e); + } + // return ctx.get + } + + @Override + public ExecutorService getTabletRefreshThreadPool() { + return refreshPool; + } + + @Override + public Splitter getSplitter() { + throw new UnsupportedOperationException(); + } + + @Override + public ExecutorService getRenamePool() { + return renamePool; + } +} diff --git a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java index d02a48ce75c..240a14b3c6c 100644 --- a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java +++ b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.concurrent.Executors; +import org.apache.accumulo.core.client.Accumulo; import org.apache.accumulo.manager.ManagerWorker; import org.apache.accumulo.manager.fate.FateManager; import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl; @@ -57,6 +58,13 @@ public void test() throws Exception { managerWorkers.add(exec(ManagerWorker.class)); } + try(var client = Accumulo.newClient().from(getClientProperties()).build()){ + for(int i =0; i<30;i++){ + client.tableOperations().create("t"+i); + log.info("Created table t{}", i); + } + } + Thread.sleep(30_000); System.out.println("DONE"); // TODO kill processes diff --git a/test/src/main/java/org/apache/accumulo/test/fate/FateStoreITBase.java b/test/src/main/java/org/apache/accumulo/test/fate/FateStoreITBase.java index 2198b737675..67b9ac67f2e 100644 --- a/test/src/main/java/org/apache/accumulo/test/fate/FateStoreITBase.java +++ b/test/src/main/java/org/apache/accumulo/test/fate/FateStoreITBase.java @@ -55,6 +55,7 @@ import org.apache.accumulo.core.fate.FateInstanceType; import org.apache.accumulo.core.fate.FateKey; import org.apache.accumulo.core.fate.FateKey.FateKeyType; +import org.apache.accumulo.core.fate.FatePartition; import org.apache.accumulo.core.fate.FateStore; import org.apache.accumulo.core.fate.FateStore.FateTxStore; import org.apache.accumulo.core.fate.ReadOnlyFateStore.FateIdStatus; @@ -199,8 +200,8 @@ protected void testDeferredOverflow(FateStore store, ServerContext sctx try { // Run and verify all 10 transactions still exist and were not // run because of the deferral time of all the transactions - future = executor.submit(() -> store.runnable(keepRunning, - fateIdStatus -> transactions.remove(fateIdStatus.getFateId()))); + future = executor.submit(() -> store.runnable(Set.of(FatePartition.all(store.type())), + keepRunning, fateIdStatus -> transactions.remove(fateIdStatus.getFateId()))); Thread.sleep(2000); assertEquals(10, transactions.size()); // Setting this flag to false should terminate the task if sleeping @@ -225,8 +226,8 @@ protected void testDeferredOverflow(FateStore store, ServerContext sctx // Run and verify all 11 transactions were processed // and removed from the store keepRunning.set(true); - future = executor.submit(() -> store.runnable(keepRunning, - fateIdStatus -> transactions.remove(fateIdStatus.getFateId()))); + future = executor.submit(() -> store.runnable(Set.of(FatePartition.all(store.type())), + keepRunning, fateIdStatus -> transactions.remove(fateIdStatus.getFateId()))); Wait.waitFor(transactions::isEmpty); // Setting this flag to false should terminate the task if sleeping keepRunning.set(false); @@ -769,5 +770,4 @@ public TestOperation2() { super("testOperation2"); } } - } From 1bc611e0d887e674a7bb539e897f40cec3c2934a Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Thu, 5 Feb 2026 22:40:14 +0000 Subject: [PATCH 08/38] WIP --- .../java/org/apache/accumulo/manager/fate/FateWorker.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index c62e07ee5a2..bc26b1eb590 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -69,8 +69,10 @@ public void setLock(ServiceLock lock){ l -> ServiceLock.isLockHeld(context.getZooCache(), l); UserFateStore store = new UserFateStore<>(context, SystemTables.FATE.tableName(), lock.getLockID(), isLockHeld); - this.fate = new Fate<>(env, store, true, TraceRepo::toLogString, + this.fate = new Fate<>(env, store, false, TraceRepo::toLogString, context.getConfiguration(), context.getScheduledExecutor()); + // TODO where will the 2 fate cleanup task run? + } @Override From 9e78b23d4409a927b348a7d41679ef4831b50c3b Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Thu, 5 Feb 2026 23:26:37 +0000 Subject: [PATCH 09/38] Refactor manager split code used by fate Separated out functionality in the manager Splitter class that was only used by the fate split operation. This avoids having to expose code used by TGW to Fate making it easier to execute fate operation outside the manager. --- .../org/apache/accumulo/manager/Manager.java | 9 +- .../manager/split/SplitFileCache.java | 143 ++++++++++++++++++ .../accumulo/manager/split/Splitter.java | 109 ------------- .../accumulo/manager/tableOps/FateEnv.java | 4 +- .../manager/tableOps/split/UpdateTablets.java | 2 +- .../tableOps/split/UpdateTabletsTest.java | 26 ++-- 6 files changed, 169 insertions(+), 124 deletions(-) create mode 100644 server/manager/src/main/java/org/apache/accumulo/manager/split/SplitFileCache.java diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java index 72a9c0e687d..1cb9a97188a 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java @@ -111,6 +111,7 @@ import org.apache.accumulo.manager.merge.FindMergeableRangeTask; import org.apache.accumulo.manager.metrics.ManagerMetrics; import org.apache.accumulo.manager.recovery.RecoveryManager; +import org.apache.accumulo.manager.split.SplitFileCache; import org.apache.accumulo.manager.split.Splitter; import org.apache.accumulo.manager.state.TableCounts; import org.apache.accumulo.manager.tableOps.FateEnv; @@ -557,12 +558,17 @@ ManagerGoalState getManagerGoalState() { } private Splitter splitter; + private SplitFileCache splitFileCache; - @Override public Splitter getSplitter() { return splitter; } + @Override + public SplitFileCache getSplitFileCache() { + return splitFileCache; + } + public UpgradeCoordinator.UpgradeStatus getUpgradeStatus() { return upgradeCoordinator.getStatus(); } @@ -1118,6 +1124,7 @@ boolean canSuspendTablets() { this.splitter = new Splitter(this); this.splitter.start(); + this.splitFileCache = new SplitFileCache(context); try { Predicate isLockHeld = diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/split/SplitFileCache.java b/server/manager/src/main/java/org/apache/accumulo/manager/split/SplitFileCache.java new file mode 100644 index 00000000000..4455cef553d --- /dev/null +++ b/server/manager/src/main/java/org/apache/accumulo/manager/split/SplitFileCache.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.accumulo.manager.split; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import org.apache.accumulo.core.data.TableId; +import org.apache.accumulo.core.file.FileOperations; +import org.apache.accumulo.core.file.FileSKVIterator; +import org.apache.accumulo.core.metadata.TabletFile; +import org.apache.accumulo.core.util.cache.Caches; +import org.apache.accumulo.server.ServerContext; +import org.apache.accumulo.server.conf.TableConfiguration; +import org.apache.hadoop.fs.FileSystem; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.github.benmanes.caffeine.cache.CacheLoader; +import com.github.benmanes.caffeine.cache.LoadingCache; +import com.github.benmanes.caffeine.cache.Weigher; + +public class SplitFileCache { + + private static final Logger LOG = LoggerFactory.getLogger(SplitFileCache.class); + + private static class CacheKey { + + final TableId tableId; + final TabletFile tabletFile; + + public CacheKey(TableId tableId, TabletFile tabletFile) { + this.tableId = tableId; + this.tabletFile = tabletFile; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + CacheKey cacheKey = (CacheKey) o; + return Objects.equals(tableId, cacheKey.tableId) + && Objects.equals(tabletFile, cacheKey.tabletFile); + } + + @Override + public int hashCode() { + return Objects.hash(tableId, tabletFile); + } + + } + + public static Map tryToGetFirstAndLastRows( + ServerContext context, TableConfiguration tableConf, Set dataFiles) { + + HashMap dataFilesInfo = new HashMap<>(); + + long t1 = System.currentTimeMillis(); + + for (T dataFile : dataFiles) { + + FileSKVIterator reader = null; + FileSystem ns = context.getVolumeManager().getFileSystemByPath(dataFile.getPath()); + try { + reader = FileOperations.getInstance().newReaderBuilder() + .forFile(dataFile, ns, ns.getConf(), tableConf.getCryptoService()) + .withTableConfiguration(tableConf).build(); + + dataFilesInfo.put(dataFile, reader.getFileRange()); + } catch (IOException ioe) { + LOG.warn("Failed to read data file to determine first and last key : " + dataFile, ioe); + } finally { + if (reader != null) { + try { + reader.close(); + } catch (IOException ioe) { + LOG.warn("failed to close " + dataFile, ioe); + } + } + } + + } + + long t2 = System.currentTimeMillis(); + + String message = String.format("Found first and last keys for %d data files in %6.2f secs", + dataFiles.size(), (t2 - t1) / 1000.0); + if (t2 - t1 > 500) { + LOG.debug(message); + } else { + LOG.trace(message); + } + + return dataFilesInfo; + } + + final LoadingCache splitFileCache; + + public SplitFileCache(ServerContext context) { + Weigher weigher = (key, frange) -> key.tableId.canonical() + .length() + key.tabletFile.getPath().toString().length() + + (frange.empty ? 0 + : frange.rowRange.getStartKey().getLength() + frange.rowRange.getEndKey().getLength()); + + CacheLoader loader = key -> { + TableConfiguration tableConf = context.getTableConfiguration(key.tableId); + return tryToGetFirstAndLastRows(context, tableConf, Set.of(key.tabletFile)) + .get(key.tabletFile); + }; + + splitFileCache = context.getCaches().createNewBuilder(Caches.CacheName.SPLITTER_FILES, true) + .expireAfterAccess(10, TimeUnit.MINUTES).maximumWeight(10_000_000L).weigher(weigher) + .build(loader); + } + + public FileSKVIterator.FileRange getCachedFileInfo(TableId tableId, TabletFile tabletFile) { + return splitFileCache.get(new CacheKey(tableId, tabletFile)); + } +} diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/split/Splitter.java b/server/manager/src/main/java/org/apache/accumulo/manager/split/Splitter.java index 58d02a4f81f..1f21fde170e 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/split/Splitter.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/split/Splitter.java @@ -20,37 +20,23 @@ import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly; -import java.io.IOException; import java.util.HashMap; import java.util.Map; -import java.util.Objects; -import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import org.apache.accumulo.core.data.TableId; import org.apache.accumulo.core.dataImpl.KeyExtent; import org.apache.accumulo.core.fate.Fate; import org.apache.accumulo.core.fate.FateInstanceType; import org.apache.accumulo.core.fate.FateKey; -import org.apache.accumulo.core.file.FileOperations; -import org.apache.accumulo.core.file.FileSKVIterator; -import org.apache.accumulo.core.metadata.TabletFile; -import org.apache.accumulo.core.util.cache.Caches.CacheName; import org.apache.accumulo.manager.Manager; import org.apache.accumulo.manager.tableOps.split.FindSplits; import org.apache.accumulo.server.ServerContext; -import org.apache.accumulo.server.conf.TableConfiguration; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.io.Text; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.github.benmanes.caffeine.cache.CacheLoader; -import com.github.benmanes.caffeine.cache.LoadingCache; -import com.github.benmanes.caffeine.cache.Weigher; - public class Splitter { private static final Logger LOG = LoggerFactory.getLogger(Splitter.class); @@ -115,82 +101,6 @@ private void seedSplits(FateInstanceType instanceType, Map split } } - public static Map tryToGetFirstAndLastRows( - ServerContext context, TableConfiguration tableConf, Set dataFiles) { - - HashMap dataFilesInfo = new HashMap<>(); - - long t1 = System.currentTimeMillis(); - - for (T dataFile : dataFiles) { - - FileSKVIterator reader = null; - FileSystem ns = context.getVolumeManager().getFileSystemByPath(dataFile.getPath()); - try { - reader = FileOperations.getInstance().newReaderBuilder() - .forFile(dataFile, ns, ns.getConf(), tableConf.getCryptoService()) - .withTableConfiguration(tableConf).build(); - - dataFilesInfo.put(dataFile, reader.getFileRange()); - } catch (IOException ioe) { - LOG.warn("Failed to read data file to determine first and last key : " + dataFile, ioe); - } finally { - if (reader != null) { - try { - reader.close(); - } catch (IOException ioe) { - LOG.warn("failed to close " + dataFile, ioe); - } - } - } - - } - - long t2 = System.currentTimeMillis(); - - String message = String.format("Found first and last keys for %d data files in %6.2f secs", - dataFiles.size(), (t2 - t1) / 1000.0); - if (t2 - t1 > 500) { - LOG.debug(message); - } else { - LOG.trace(message); - } - - return dataFilesInfo; - } - - private static class CacheKey { - - final TableId tableId; - final TabletFile tabletFile; - - public CacheKey(TableId tableId, TabletFile tabletFile) { - this.tableId = tableId; - this.tabletFile = tabletFile; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - CacheKey cacheKey = (CacheKey) o; - return Objects.equals(tableId, cacheKey.tableId) - && Objects.equals(tabletFile, cacheKey.tabletFile); - } - - @Override - public int hashCode() { - return Objects.hash(tableId, tabletFile); - } - - } - - final LoadingCache splitFileCache; - public Splitter(Manager manager) { this.manager = manager; ServerContext context = manager.getContext(); @@ -198,21 +108,6 @@ public Splitter(Manager manager) { this.splitExecutor = context.threadPools().getPoolBuilder("split_seeder").numCoreThreads(1) .numMaxThreads(1).withTimeOut(0L, TimeUnit.MILLISECONDS).enableThreadPoolMetrics().build(); - Weigher weigher = (key, frange) -> key.tableId.canonical() - .length() + key.tabletFile.getPath().toString().length() - + (frange.empty ? 0 - : frange.rowRange.getStartKey().getLength() + frange.rowRange.getEndKey().getLength()); - - CacheLoader loader = key -> { - TableConfiguration tableConf = context.getTableConfiguration(key.tableId); - return tryToGetFirstAndLastRows(context, tableConf, Set.of(key.tabletFile)) - .get(key.tabletFile); - }; - - splitFileCache = context.getCaches().createNewBuilder(CacheName.SPLITTER_FILES, true) - .expireAfterAccess(10, TimeUnit.MINUTES).maximumWeight(10_000_000L).weigher(weigher) - .build(loader); - } public synchronized void start() { @@ -223,10 +118,6 @@ public synchronized void stop() { splitExecutor.shutdownNow(); } - public FileSKVIterator.FileRange getCachedFileInfo(TableId tableId, TabletFile tabletFile) { - return splitFileCache.get(new CacheKey(tableId, tabletFile)); - } - public void initiateSplit(KeyExtent extent) { // Want to avoid queuing the same tablet multiple times, it would not cause bugs but would waste // work. Use the metadata row to identify a tablet because the KeyExtent also includes the prev diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/FateEnv.java b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/FateEnv.java index 6b78e503e4c..e76a613f37b 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/FateEnv.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/FateEnv.java @@ -27,7 +27,7 @@ import org.apache.accumulo.core.metadata.schema.ExternalCompactionId; import org.apache.accumulo.core.util.time.SteadyTime; import org.apache.accumulo.manager.EventPublisher; -import org.apache.accumulo.manager.split.Splitter; +import org.apache.accumulo.manager.split.SplitFileCache; import org.apache.accumulo.server.ServerContext; import org.apache.accumulo.server.fs.VolumeManager; import org.apache.accumulo.server.tables.TableManager; @@ -55,7 +55,7 @@ public interface FateEnv { ExecutorService getTabletRefreshThreadPool(); - Splitter getSplitter(); + SplitFileCache getSplitFileCache(); ExecutorService getRenamePool(); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/UpdateTablets.java b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/UpdateTablets.java index f3147165b16..4c9b1f11c47 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/UpdateTablets.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/UpdateTablets.java @@ -111,7 +111,7 @@ public Repo call(FateId fateId, FateEnv env) throws Exception { var newTablets = splitInfo.getTablets(); var newTabletsFiles = getNewTabletFiles(fateId, newTablets, tabletMetadata, - file -> env.getSplitter().getCachedFileInfo(splitInfo.getOriginal().tableId(), file)); + file -> env.getSplitFileCache().getCachedFileInfo(splitInfo.getOriginal().tableId(), file)); addNewTablets(fateId, env, tabletMetadata, opid, newTablets, newTabletsFiles); diff --git a/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java b/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java index 0e242f75e04..eae62009f72 100644 --- a/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java +++ b/server/manager/src/test/java/org/apache/accumulo/manager/tableOps/split/UpdateTabletsTest.java @@ -66,7 +66,7 @@ import org.apache.accumulo.core.tabletserver.log.LogEntry; import org.apache.accumulo.core.util.time.SteadyTime; import org.apache.accumulo.manager.Manager; -import org.apache.accumulo.manager.split.Splitter; +import org.apache.accumulo.manager.split.SplitFileCache; import org.apache.accumulo.server.ServerContext; import org.apache.accumulo.server.metadata.ConditionalTabletMutatorImpl; import org.apache.hadoop.fs.Path; @@ -237,12 +237,16 @@ public void testManyColumns() throws Exception { EasyMock.expect(manager.getContext()).andReturn(context).atLeastOnce(); Ample ample = EasyMock.mock(Ample.class); EasyMock.expect(context.getAmple()).andReturn(ample).atLeastOnce(); - Splitter splitter = EasyMock.mock(Splitter.class); - EasyMock.expect(splitter.getCachedFileInfo(tableId, file1)).andReturn(newFileInfo("a", "z")); - EasyMock.expect(splitter.getCachedFileInfo(tableId, file2)).andReturn(newFileInfo("a", "b")); - EasyMock.expect(splitter.getCachedFileInfo(tableId, file3)).andReturn(newFileInfo("d", "f")); - EasyMock.expect(splitter.getCachedFileInfo(tableId, file4)).andReturn(newFileInfo("d", "j")); - EasyMock.expect(manager.getSplitter()).andReturn(splitter).atLeastOnce(); + SplitFileCache splitFileCache = EasyMock.mock(SplitFileCache.class); + EasyMock.expect(splitFileCache.getCachedFileInfo(tableId, file1)) + .andReturn(newFileInfo("a", "z")); + EasyMock.expect(splitFileCache.getCachedFileInfo(tableId, file2)) + .andReturn(newFileInfo("a", "b")); + EasyMock.expect(splitFileCache.getCachedFileInfo(tableId, file3)) + .andReturn(newFileInfo("d", "f")); + EasyMock.expect(splitFileCache.getCachedFileInfo(tableId, file4)) + .andReturn(newFileInfo("d", "j")); + EasyMock.expect(manager.getSplitFileCache()).andReturn(splitFileCache).atLeastOnce(); EasyMock.expect(manager.getSteadyTime()).andReturn(SteadyTime.from(100_000, TimeUnit.SECONDS)) .atLeastOnce(); @@ -389,8 +393,8 @@ public void testManyColumns() throws Exception { tabletsMutator.close(); EasyMock.expectLastCall().anyTimes(); - EasyMock.replay(manager, context, ample, tabletMeta, splitter, tabletsMutator, tablet1Mutator, - tablet2Mutator, tablet3Mutator, cr, compactions); + EasyMock.replay(manager, context, ample, tabletMeta, splitFileCache, tabletsMutator, + tablet1Mutator, tablet2Mutator, tablet3Mutator, cr, compactions); // Now we can actually test the split code that writes the new tablets with a bunch columns in // the original tablet SortedSet splits = new TreeSet<>(List.of(newExtent1.endRow(), newExtent2.endRow())); @@ -399,8 +403,8 @@ public void testManyColumns() throws Exception { List.of(dir1, dir2)); updateTablets.call(fateId, manager); - EasyMock.verify(manager, context, ample, tabletMeta, splitter, tabletsMutator, tablet1Mutator, - tablet2Mutator, tablet3Mutator, cr, compactions); + EasyMock.verify(manager, context, ample, tabletMeta, splitFileCache, tabletsMutator, + tablet1Mutator, tablet2Mutator, tablet3Mutator, cr, compactions); } @Test From d7e16c693bd9ed9ba8b8a16fd062703ae6fe3810 Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Fri, 6 Feb 2026 18:06:31 +0000 Subject: [PATCH 10/38] WIP --- .../accumulo/core/fate/AbstractFateStore.java | 2 +- .../org/apache/accumulo/core/fate/Fate.java | 2 +- .../org/apache/accumulo/manager/Manager.java | 5 +- .../accumulo/manager/ManagerWorker.java | 3 + .../accumulo/manager/fate/FateManager.java | 45 ++-- .../accumulo/manager/fate/FateWorker.java | 15 +- .../accumulo/manager/fate/FateWorkerEnv.java | 253 ++++++++++-------- .../accumulo/test/MultipleManagerIT.java | 58 +++- 8 files changed, 232 insertions(+), 151 deletions(-) diff --git a/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java b/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java index 38d1ecba3cd..39bcbe258fd 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java @@ -164,7 +164,7 @@ public FateTxStore reserve(FateId fateId) { public void runnable(Set partitions, AtomicBoolean keepWaiting, Consumer idConsumer) { - if(partitions.isEmpty()){ + if (partitions.isEmpty()) { return; } diff --git a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java index b952009d1cd..dfcdb424e9f 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java @@ -572,7 +572,7 @@ public void close() { store.close(); } - public void setPartitions(Set partitions){ + public void setPartitions(Set partitions) { synchronized (fateExecutors) { // TODO would need to set these when executors change... fateExecutors.forEach(fe -> fe.setPartitions(partitions)); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java index fad0a516931..ea7648e33b2 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java @@ -1277,9 +1277,10 @@ protected Fate initializeFateInstance(ServerContext context, FateStore< ThreadPools.watchCriticalScheduledTask(context.getScheduledExecutor() .scheduleWithFixedDelay(fateCleaner::ageOff, 10, 4 * 60, MINUTES)); - if(store.type() == FateInstanceType.META){ + if (store.type() == FateInstanceType.META) { fateInstance.setPartitions(Set.of(FatePartition.all(FateInstanceType.META))); - }// else do not run user transactions for now in the manager... it will have an empty set of partitions + } // else do not run user transactions for now in the manager... it will have an empty set of + // partitions return fateInstance; } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java index 9218a498529..84ab1f397d3 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java @@ -44,6 +44,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +/** + * An assistant to the manager + */ public class ManagerWorker extends AbstractServer { private static final Logger log = LoggerFactory.getLogger(ManagerWorker.class); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index 5f3a4749c93..3091640ea62 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -40,9 +40,16 @@ import org.apache.thrift.TException; import com.google.common.net.HostAndPort; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +/** + * Partitions fate across manager assistant processes. This is done by assigning ranges of the fate uuid key space to different processes. + */ public class FateManager { + private static final Logger log = LoggerFactory.getLogger(FateManager.class); + private final ServerContext context; public FateManager(ServerContext context) { @@ -50,7 +57,7 @@ public FateManager(ServerContext context) { } public void managerWorkers() throws Exception { - while (true) { + outer : while (true) { // TODO make configurable Thread.sleep(10_000); @@ -61,8 +68,8 @@ public void managerWorkers() throws Exception { // TODO handle duplicate current assignments - System.out.println("current : " + currentAssignments); - System.out.println("desired : " + desiredParititions); + log.info("current : {}", currentAssignments); + log.info("desired : {}", desiredParititions); Map> desired = computeDesiredAssignments(currentAssignments, desiredParititions); @@ -83,7 +90,11 @@ public void managerWorkers() throws Exception { var curr = currentAssignments.getOrDefault(worker, Set.of()); if (!curr.equals(partitions)) { var intersection = Sets.intersection(curr, partitions); - setWorkerPartitions(worker, curr, intersection); + if(!setWorkerPartitions(worker, curr, intersection)){ + log.debug("Failed to set partitions for {} to {}", worker, intersection); + // could not set, so start completely over + continue outer; + } currentAssignments.put(worker, intersection); } } @@ -95,19 +106,23 @@ public void managerWorkers() throws Exception { Set partitions = entry.getValue(); var curr = currentAssignments.getOrDefault(worker, Set.of()); if (!curr.equals(partitions)) { - setWorkerPartitions(worker, curr, partitions); + if(!setWorkerPartitions(worker, curr, partitions)){ + log.debug("Failed to set partitions for {} to {}", worker, partitions); + // could not set, so start completely over + continue outer; + } } } } } - private void setWorkerPartitions(HostAndPort address, Set current, + private boolean setWorkerPartitions(HostAndPort address, Set current, Set desired) throws TException { // TODO make a compare and set type RPC that uses the current and desired FateWorkerService.Client client = ThriftUtil.getClient(ThriftClientTypes.FATE_WORKER, address, context); try { - client.setPartitions(TraceUtil.traceInfo(), context.rpcCreds(), + return client.setPartitions(TraceUtil.traceInfo(), context.rpcCreds(), current.stream().map(FatePartition::toThrift).toList(), desired.stream().map(FatePartition::toThrift).toList()); } finally { @@ -137,12 +152,12 @@ private Map> computeDesiredAssignments( // remove everything that is assigned currentAssignments.values().forEach(p -> p.forEach(availablePartitions::remove)); - System.out.println("currentAssignments.size():" + currentAssignments.size()); - System.out.println("desiredParititions.size():" + desiredParititions.size()); - System.out.println("minPerWorker:" + minPerWorker); - System.out.println("maxPerWorker:" + maxPerWorker); - System.out.println("desiredWorkersWithMax:" + desiredWorkersWithMax); - System.out.println("availablePartitions:" + availablePartitions); + log.debug("currentAssignments.size():{}", currentAssignments.size()); + log.debug("desiredParititions.size():{}", desiredParititions.size()); + log.debug("minPerWorker:{}", minPerWorker); + log.debug("maxPerWorker:{}", maxPerWorker); + log.debug("desiredWorkersWithMax:{}", desiredWorkersWithMax); + log.debug("availablePartitions:{}", availablePartitions); // Find workers that currently have too many partitions assigned and place their excess in the // available set. Let workers keep what they have when its under the limit. @@ -189,7 +204,7 @@ private Map> computeDesiredAssignments( } desiredAssignments.forEach((hp, parts) -> { - System.out.println(" desired " + hp + " " + parts.size() + " " + parts); + log.debug(" desired " + hp + " " + parts.size() + " " + parts); }); return desiredAssignments; @@ -212,7 +227,7 @@ private Map> getCurrentAssignments() throws TExce var workers = context.getServerPaths().getManagerWorker(DEFAULT_RG_ONLY, AddressSelector.all(), true); - System.out.println("workers : " + workers); + log.debug("workers : " + workers); Map> currentAssignments = new HashMap<>(); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index bc26b1eb590..56c5f0dee32 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -35,7 +35,6 @@ import org.apache.accumulo.core.fate.thrift.FateWorkerService; import org.apache.accumulo.core.fate.thrift.TFatePartition; import org.apache.accumulo.core.fate.user.UserFateStore; -import org.apache.accumulo.core.fate.zookeeper.MetaFateStore; import org.apache.accumulo.core.fate.zookeeper.ZooUtil; import org.apache.accumulo.core.lock.ServiceLock; import org.apache.accumulo.core.metadata.SystemTables; @@ -55,7 +54,6 @@ public class FateWorker implements FateWorkerService.Iface { private final Set currentPartitions; private volatile Fate fate; - public FateWorker(ServerContext ctx, Supplier serviceLockSupplier) { this.context = ctx; this.security = ctx.getSecurityOperation(); @@ -63,14 +61,13 @@ public FateWorker(ServerContext ctx, Supplier serviceLockSupplier) this.fate = null; } - public void setLock(ServiceLock lock){ + public void setLock(ServiceLock lock) { FateEnv env = new FateWorkerEnv(context, lock); - Predicate isLockHeld = - l -> ServiceLock.isLockHeld(context.getZooCache(), l); - UserFateStore store = new UserFateStore<>(context, - SystemTables.FATE.tableName(), lock.getLockID(), isLockHeld); - this.fate = new Fate<>(env, store, false, TraceRepo::toLogString, - context.getConfiguration(), context.getScheduledExecutor()); + Predicate isLockHeld = l -> ServiceLock.isLockHeld(context.getZooCache(), l); + UserFateStore store = + new UserFateStore<>(context, SystemTables.FATE.tableName(), lock.getLockID(), isLockHeld); + this.fate = new Fate<>(env, store, false, TraceRepo::toLogString, context.getConfiguration(), + context.getScheduledExecutor()); // TODO where will the 2 fate cleanup task run? } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java index dd2ccb3399b..0b38196ae0d 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java @@ -1,5 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ package org.apache.accumulo.manager.fate; +import java.util.Collection; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + import org.apache.accumulo.core.client.AccumuloException; import org.apache.accumulo.core.client.AccumuloSecurityException; import org.apache.accumulo.core.data.TableId; @@ -18,124 +41,118 @@ import org.apache.accumulo.server.manager.LiveTServerSet; import org.apache.accumulo.server.tables.TableManager; -import java.util.Collection; -import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.function.Supplier; - public class FateWorkerEnv implements FateEnv { - private final ServerContext ctx; - private final ExecutorService refreshPool; - private final ExecutorService renamePool; - private final ServiceLock serviceLock; - private final LiveTServerSet tservers; - - FateWorkerEnv(ServerContext ctx, ServiceLock lock){ - this.ctx = ctx; - // TODO create the proper way - this.refreshPool = Executors.newFixedThreadPool(2); - this.renamePool = Executors.newFixedThreadPool(2); - this.serviceLock = lock; - this.tservers = new LiveTServerSet(ctx); - } - - @Override - public ServerContext getContext() { - return ctx; - } - - @Override - public EventPublisher getEventPublisher() { - // TODO do something w/ the events - return new EventPublisher() { - @Override - public void event(String msg, Object... args) { - - } - - @Override - public void event(Ample.DataLevel level, String msg, Object... args) { - - } - - @Override - public void event(TableId tableId, String msg, Object... args) { - - } - - @Override - public void event(KeyExtent extent, String msg, Object... args) { - - } - - @Override - public void event(Collection extents, String msg, Object... args) { - - } - }; - } - - @Override - public void recordCompactionCompletion(ExternalCompactionId ecid) { - // TODO do something w/ this - } - - @Override - public Set onlineTabletServers() { - return tservers.getSnapshot().getTservers(); - } - - @Override - public TableManager getTableManager() { - return ctx.getTableManager(); - } - - @Override - public VolumeManager getVolumeManager() { - return ctx.getVolumeManager(); - } - - @Override - public void updateBulkImportStatus(String string, BulkImportState bulkImportState) { - //TODO - } - - @Override - public void removeBulkImportStatus(String sourceDir) { - //TODO - } - - @Override - public ServiceLock getServiceLock() { - return serviceLock; - } - - @Override - public SteadyTime getSteadyTime() { - try { - return SteadyTime.from(ctx.instanceOperations().getManagerTime()); - } catch (AccumuloException e) { - // TODO exceptions, add to to method signature or use a diff type?? - throw new RuntimeException(e); - } catch (AccumuloSecurityException e) { - throw new RuntimeException(e); - } - // return ctx.get - } - - @Override - public ExecutorService getTabletRefreshThreadPool() { - return refreshPool; - } - - @Override - public Splitter getSplitter() { - throw new UnsupportedOperationException(); - } - - @Override - public ExecutorService getRenamePool() { - return renamePool; + private final ServerContext ctx; + private final ExecutorService refreshPool; + private final ExecutorService renamePool; + private final ServiceLock serviceLock; + private final LiveTServerSet tservers; + + FateWorkerEnv(ServerContext ctx, ServiceLock lock) { + this.ctx = ctx; + // TODO create the proper way + this.refreshPool = Executors.newFixedThreadPool(2); + this.renamePool = Executors.newFixedThreadPool(2); + this.serviceLock = lock; + this.tservers = new LiveTServerSet(ctx); + } + + @Override + public ServerContext getContext() { + return ctx; + } + + @Override + public EventPublisher getEventPublisher() { + // TODO do something w/ the events + return new EventPublisher() { + @Override + public void event(String msg, Object... args) { + + } + + @Override + public void event(Ample.DataLevel level, String msg, Object... args) { + + } + + @Override + public void event(TableId tableId, String msg, Object... args) { + + } + + @Override + public void event(KeyExtent extent, String msg, Object... args) { + + } + + @Override + public void event(Collection extents, String msg, Object... args) { + + } + }; + } + + @Override + public void recordCompactionCompletion(ExternalCompactionId ecid) { + // TODO do something w/ this + } + + @Override + public Set onlineTabletServers() { + return tservers.getSnapshot().getTservers(); + } + + @Override + public TableManager getTableManager() { + return ctx.getTableManager(); + } + + @Override + public VolumeManager getVolumeManager() { + return ctx.getVolumeManager(); + } + + @Override + public void updateBulkImportStatus(String string, BulkImportState bulkImportState) { + // TODO + } + + @Override + public void removeBulkImportStatus(String sourceDir) { + // TODO + } + + @Override + public ServiceLock getServiceLock() { + return serviceLock; + } + + @Override + public SteadyTime getSteadyTime() { + try { + return SteadyTime.from(ctx.instanceOperations().getManagerTime()); + } catch (AccumuloException e) { + // TODO exceptions, add to to method signature or use a diff type?? + throw new RuntimeException(e); + } catch (AccumuloSecurityException e) { + throw new RuntimeException(e); } + // return ctx.get + } + + @Override + public ExecutorService getTabletRefreshThreadPool() { + return refreshPool; + } + + @Override + public Splitter getSplitter() { + throw new UnsupportedOperationException(); + } + + @Override + public ExecutorService getRenamePool() { + return renamePool; + } } diff --git a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java index 240a14b3c6c..ed97ecf9f99 100644 --- a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java +++ b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java @@ -18,16 +18,26 @@ */ package org.apache.accumulo.test; +import static org.junit.jupiter.api.Assertions.assertEquals; + import java.util.ArrayList; +import java.util.HashSet; import java.util.List; +import java.util.TreeSet; import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import org.apache.accumulo.core.client.Accumulo; +import org.apache.accumulo.core.client.admin.CompactionConfig; +import org.apache.accumulo.core.data.Mutation; import org.apache.accumulo.manager.ManagerWorker; import org.apache.accumulo.manager.fate.FateManager; import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl; import org.apache.accumulo.test.functional.ConfigurableMacBase; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.Text; import org.junit.jupiter.api.Test; public class MultipleManagerIT extends ConfigurableMacBase { @@ -58,13 +68,51 @@ public void test() throws Exception { managerWorkers.add(exec(ManagerWorker.class)); } - try(var client = Accumulo.newClient().from(getClientProperties()).build()){ - for(int i =0; i<30;i++){ - client.tableOperations().create("t"+i); - log.info("Created table t{}", i); - } + try (var client = Accumulo.newClient().from(getClientProperties()).build()) { + var splits = IntStream.range(1, 10).mapToObj(i -> String.format("%03d", i)).map(Text::new) + .collect(Collectors.toCollection(TreeSet::new)); + var tableOpFutures = new ArrayList>(); + for (int i = 0; i < 30; i++) { + var table = "t" + i; + // TODO seeing in the logs that fate operations for the same table are running on different processes, however there is a 5 second delay because there is no notification mechanism + + // TODO its hard to find everything related to a table id in the logs, especially when the table id is like "b" + var tableOpsFuture = executor.submit(() -> { + client.tableOperations().create(table); + log.info("Created table {}", table); + var expectedRows = new HashSet(); + try (var writer = client.createBatchWriter(table)) { + for (int r = 0; r < 10; r++) { + var row = String.format("%03d", r); + expectedRows.add(row); + Mutation m = new Mutation(row); + m.put("f", "q", "v"); + writer.addMutation(m); + } + } + log.info("Wrote data to table {}", table); + client.tableOperations().addSplits(table, splits); + log.info("Split table {}", table); // TODO split operation does not log table id and fate opid anywhere + client.tableOperations().compact(table, new CompactionConfig().setWait(true)); + log.info("Compacted table {}", table); + client.tableOperations().merge(table, null, null); + log.info("Merged table {}", table); + try (var scanner = client.createScanner(table)) { + var rowsSeen = scanner.stream().map(e -> e.getKey().getRowData().toString()) + .collect(Collectors.toSet()); + assertEquals(expectedRows, rowsSeen); + log.info("verified table {}", table); + } + return null; + }); + tableOpFutures.add(tableOpsFuture); } + for(var tof : tableOpFutures){ + tof.get(); + } + } + Thread.sleep(30_000); System.out.println("DONE"); // TODO kill processes From e1597f75493fdeb4a91832cd43e24e29fe9f339c Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Fri, 6 Feb 2026 21:20:35 +0000 Subject: [PATCH 11/38] WIP --- .../org/apache/accumulo/core/fate/Fate.java | 12 +- .../accumulo/core/fate/FatePartition.java | 13 +- .../core/fate/user/UserFateStore.java | 5 +- .../accumulo/manager/fate/FateManager.java | 167 ++++++++---------- .../accumulo/manager/fate/FateWorkerEnv.java | 8 +- .../accumulo/test/MultipleManagerIT.java | 18 +- 6 files changed, 110 insertions(+), 113 deletions(-) diff --git a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java index dfcdb424e9f..e5945166793 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java @@ -36,6 +36,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.TreeSet; @@ -90,6 +91,7 @@ public class Fate { private final AtomicBoolean keepRunning = new AtomicBoolean(true); // Visible for FlakyFate test object protected final Set> fateExecutors = new HashSet<>(); + private Set currentPartitions = Set.of(); public enum TxInfo { FATE_OP, AUTO_CLEAN, EXCEPTION, TX_AGEOFF, RETURN_VALUE @@ -222,8 +224,9 @@ public void run() { fe -> fe.getFateOps().equals(fateOps) && fe.getName().equals(fateExecutorName))) { log.debug("[{}] Adding FateExecutor for {} with {} threads", store.type(), fateOps, poolSize); - fateExecutors.add( - new FateExecutor<>(Fate.this, environment, fateOps, poolSize, fateExecutorName)); + var fateExecutor = new FateExecutor<>(Fate.this, environment, fateOps, poolSize, fateExecutorName); + fateExecutors.add(fateExecutor); + fateExecutor.setPartitions(currentPartitions); } } } @@ -573,9 +576,10 @@ public void close() { } public void setPartitions(Set partitions) { + Objects.requireNonNull(partitions); synchronized (fateExecutors) { - // TODO would need to set these when executors change... - fateExecutors.forEach(fe -> fe.setPartitions(partitions)); + currentPartitions = Set.copyOf(partitions); + fateExecutors.forEach(fe -> fe.setPartitions(currentPartitions)); } } diff --git a/core/src/main/java/org/apache/accumulo/core/fate/FatePartition.java b/core/src/main/java/org/apache/accumulo/core/fate/FatePartition.java index 290dbeabaf3..2c1ca11a718 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/FatePartition.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/FatePartition.java @@ -46,7 +46,18 @@ public static FatePartition all(FateInstanceType type) { }; } + private static final UUID LAST_UUID = new UUID(-1, -1); + + public boolean isEndInclusive() { + return end.getTxUUID().equals(LAST_UUID); + } + public boolean contains(FateId fateId) { - return start.compareTo(fateId) >= 0 && end.compareTo(fateId) <= 0; + if (isEndInclusive()) { + return start.compareTo(fateId) >= 0 && end.compareTo(fateId) <= 0; + } else { + return start.compareTo(fateId) >= 0 && end.compareTo(fateId) < 0; + } + } } diff --git a/core/src/main/java/org/apache/accumulo/core/fate/user/UserFateStore.java b/core/src/main/java/org/apache/accumulo/core/fate/user/UserFateStore.java index eaca40473df..7b20caad884 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/user/UserFateStore.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/user/UserFateStore.java @@ -295,8 +295,9 @@ protected Stream getTransactions(Set partitions, private Stream getTransactions(FatePartition partition, EnumSet statuses) { try { Scanner scanner = context.createScanner(tableName, Authorizations.EMPTY); - scanner - .setRange(new Range(getRowId(partition.start()), true, getRowId(partition.end()), true)); + var range = new Range(getRowId(partition.start()), true, getRowId(partition.end()), + partition.isEndInclusive()); + scanner.setRange(range); RowFateStatusFilter.configureScanner(scanner, statuses); // columns fetched here must be in/added to TxAdminColumnFamily for locality group benefits TxAdminColumnFamily.STATUS_COLUMN.fetch(scanner); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index 3091640ea62..54244df7d4c 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -25,6 +25,7 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import org.apache.accumulo.core.fate.FateId; @@ -36,15 +37,17 @@ import org.apache.accumulo.core.rpc.clients.ThriftClientTypes; import org.apache.accumulo.core.trace.TraceUtil; import org.apache.accumulo.server.ServerContext; -import org.apache.hadoop.util.Sets; import org.apache.thrift.TException; - -import com.google.common.net.HostAndPort; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.base.Preconditions; +import com.google.common.collect.Sets; +import com.google.common.net.HostAndPort; + /** - * Partitions fate across manager assistant processes. This is done by assigning ranges of the fate uuid key space to different processes. + * Partitions fate across manager assistant processes. This is done by assigning ranges of the fate + * uuid key space to different processes. */ public class FateManager { @@ -56,47 +59,38 @@ public FateManager(ServerContext context) { this.context = context; } + // TODO remove, here for testing + public static final AtomicBoolean stop = new AtomicBoolean(false); + public void managerWorkers() throws Exception { - outer : while (true) { + outer: while (!stop.get()) { // TODO make configurable - Thread.sleep(10_000); + Thread.sleep(3_000); // TODO could support RG... could user ServerId // This map will contain all current workers even their partitions are empty Map> currentAssignments = getCurrentAssignments(); - Set desiredParititions = getDesiredPartitions(); - - // TODO handle duplicate current assignments - - log.info("current : {}", currentAssignments); - log.info("desired : {}", desiredParititions); + Set desiredParititions = getDesiredPartitions(currentAssignments.size()); Map> desired = computeDesiredAssignments(currentAssignments, desiredParititions); // are there any workers with extra partitions? If so need to unload those first. - boolean haveExtra = desired.entrySet().stream().anyMatch(e -> { - HostAndPort worker = e.getKey(); + for (Map.Entry> entry : desired.entrySet()) { + HostAndPort worker = entry.getKey(); + Set partitions = entry.getValue(); var curr = currentAssignments.getOrDefault(worker, Set.of()); - var extra = Sets.difference(curr, e.getValue()); - return !extra.isEmpty(); - }); - - if (haveExtra) { - // force unload of extra partitions to make them available for other workers - for (Map.Entry> entry : desired.entrySet()) { - HostAndPort worker = entry.getKey(); - Set partitions = entry.getValue(); - var curr = currentAssignments.getOrDefault(worker, Set.of()); - if (!curr.equals(partitions)) { - var intersection = Sets.intersection(curr, partitions); - if(!setWorkerPartitions(worker, curr, intersection)){ - log.debug("Failed to set partitions for {} to {}", worker, intersection); - // could not set, so start completely over - continue outer; - } - currentAssignments.put(worker, intersection); + if (!Sets.difference(curr, partitions).isEmpty()) { + // This worker has extra partitions that are not desired, unload those + var intersection = Sets.intersection(curr, partitions); + if (!setWorkerPartitions(worker, curr, intersection)) { + log.debug("Failed to set partitions for {} to {}", worker, intersection); + // could not set, so start completely over + continue outer; + } else { + log.debug("Set partitions for {} to {} from {}", worker, intersection, curr); } + currentAssignments.put(worker, intersection); } } @@ -106,10 +100,12 @@ public void managerWorkers() throws Exception { Set partitions = entry.getValue(); var curr = currentAssignments.getOrDefault(worker, Set.of()); if (!curr.equals(partitions)) { - if(!setWorkerPartitions(worker, curr, partitions)){ + if (!setWorkerPartitions(worker, curr, partitions)) { log.debug("Failed to set partitions for {} to {}", worker, partitions); // could not set, so start completely over continue outer; + } else { + log.debug("Set partitions for {} to {} from {}", worker, partitions, curr); } } } @@ -137,71 +133,28 @@ private boolean setWorkerPartitions(HostAndPort address, Set curr private Map> computeDesiredAssignments( Map> currentAssignments, Set desiredParititions) { - // min number of partitions a single worker must have - int minPerWorker = desiredParititions.size() / currentAssignments.size(); - // max number of partitions a single worker can have - int maxPerWorker = - minPerWorker + Math.min(desiredParititions.size() % currentAssignments.size(), 1); - // number of workers that can have the max partitions - int desiredWorkersWithMax = - desiredParititions.size() - minPerWorker * currentAssignments.size(); + Preconditions.checkArgument(currentAssignments.size() == desiredParititions.size()); Map> desiredAssignments = new HashMap<>(); - Set availablePartitions = new HashSet<>(desiredParititions); - - // remove everything that is assigned - currentAssignments.values().forEach(p -> p.forEach(availablePartitions::remove)); - - log.debug("currentAssignments.size():{}", currentAssignments.size()); - log.debug("desiredParititions.size():{}", desiredParititions.size()); - log.debug("minPerWorker:{}", minPerWorker); - log.debug("maxPerWorker:{}", maxPerWorker); - log.debug("desiredWorkersWithMax:{}", desiredWorkersWithMax); - log.debug("availablePartitions:{}", availablePartitions); - - // Find workers that currently have too many partitions assigned and place their excess in the - // available set. Let workers keep what they have when its under the limit. - int numWorkersWithMax = 0; - for (var worker : currentAssignments.keySet()) { - var assignments = new HashSet(); - var curr = currentAssignments.getOrDefault(worker, Set.of()); - // The number of partitions this worker can have, anything in excess should be added to - // available - int canHave = numWorkersWithMax < desiredWorkersWithMax ? maxPerWorker : minPerWorker; - - var iter = curr.iterator(); - for (int i = 0; i < canHave && iter.hasNext(); i++) { - assignments.add(iter.next()); - } - iter.forEachRemaining(availablePartitions::add); - desiredAssignments.put(worker, assignments); - if (curr.size() >= maxPerWorker) { - numWorkersWithMax++; - } - } + var copy = new HashSet<>(desiredParititions); - // Distribute available partitions to workers that do not have the minimum. - var availIter = availablePartitions.iterator(); - for (var worker : currentAssignments.keySet()) { - var assignments = desiredAssignments.get(worker); - while (assignments.size() < minPerWorker) { - // This should always have next if the creation of available partitions was done correctly. - assignments.add(availIter.next()); + currentAssignments.forEach((hp, partitions) -> { + if (!partitions.isEmpty()) { + var firstPart = partitions.iterator().next(); + if (copy.contains(firstPart)) { + desiredAssignments.put(hp, Set.of(firstPart)); + copy.remove(firstPart); + } } - } + }); - // Distribute available partitions to workers that do not have the max until no more partitions - // available. - for (var worker : currentAssignments.keySet()) { - var assignments = desiredAssignments.get(worker); - while (assignments.size() < maxPerWorker && availIter.hasNext()) { - assignments.add(availIter.next()); + var iter = copy.iterator(); + currentAssignments.forEach((hp, partitions) -> { + if (!desiredAssignments.containsKey(hp)) { + desiredAssignments.put(hp, Set.of(iter.next())); } - if (!availIter.hasNext()) { - break; - } - } + }); desiredAssignments.forEach((hp, parts) -> { log.debug(" desired " + hp + " " + parts.size() + " " + parts); @@ -210,16 +163,34 @@ private Map> computeDesiredAssignments( return desiredAssignments; } - private Set getDesiredPartitions() { + private Set getDesiredPartitions(int numWorkers) { + Preconditions.checkArgument(numWorkers >= 0); + + if (numWorkers == 0) { + return Set.of(); + } + + // create a single partition per worker that equally divides the space HashSet desired = new HashSet<>(); - // TODO created based on the number of available servers - for (long i = 0; i <= 15; i++) { - UUID start = new UUID((i << 60), -0); - UUID stop = new UUID((i << 60) | (-1L >>> 4), -1); - desired.add(new FatePartition(FateId.from(FateInstanceType.USER, start), - FateId.from(FateInstanceType.USER, stop))); + long jump = ((1L << 60)) / numWorkers; + for (int i = 0; i < numWorkers - 1; i++) { + long start = (i * jump) << 4; + long end = ((i + 1) * jump) << 4; + + UUID startUuid = new UUID(start, 0); + UUID endUuid = new UUID(end, 0); + + desired.add(new FatePartition(FateId.from(FateInstanceType.USER, startUuid), + FateId.from(FateInstanceType.USER, endUuid))); } + // last one is + long start = ((numWorkers - 1) * jump) << 4; + UUID startUuid = new UUID(start, 0); + UUID endUuid = new UUID(-1, -1); + desired.add(new FatePartition(FateId.from(FateInstanceType.USER, startUuid), + FateId.from(FateInstanceType.USER, endUuid))); + return desired; } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java index 0b38196ae0d..b553a021d1b 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java @@ -34,7 +34,7 @@ import org.apache.accumulo.core.metadata.schema.ExternalCompactionId; import org.apache.accumulo.core.util.time.SteadyTime; import org.apache.accumulo.manager.EventPublisher; -import org.apache.accumulo.manager.split.Splitter; +import org.apache.accumulo.manager.split.SplitFileCache; import org.apache.accumulo.manager.tableOps.FateEnv; import org.apache.accumulo.server.ServerContext; import org.apache.accumulo.server.fs.VolumeManager; @@ -47,6 +47,7 @@ public class FateWorkerEnv implements FateEnv { private final ExecutorService renamePool; private final ServiceLock serviceLock; private final LiveTServerSet tservers; + private final SplitFileCache splitCache; FateWorkerEnv(ServerContext ctx, ServiceLock lock) { this.ctx = ctx; @@ -55,6 +56,7 @@ public class FateWorkerEnv implements FateEnv { this.renamePool = Executors.newFixedThreadPool(2); this.serviceLock = lock; this.tservers = new LiveTServerSet(ctx); + this.splitCache = new SplitFileCache(ctx); } @Override @@ -147,8 +149,8 @@ public ExecutorService getTabletRefreshThreadPool() { } @Override - public Splitter getSplitter() { - throw new UnsupportedOperationException(); + public SplitFileCache getSplitFileCache() { + return splitCache; } @Override diff --git a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java index ed97ecf9f99..67a918b19a8 100644 --- a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java +++ b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java @@ -74,9 +74,11 @@ public void test() throws Exception { var tableOpFutures = new ArrayList>(); for (int i = 0; i < 30; i++) { var table = "t" + i; - // TODO seeing in the logs that fate operations for the same table are running on different processes, however there is a 5 second delay because there is no notification mechanism + // TODO seeing in the logs that fate operations for the same table are running on different + // processes, however there is a 5 second delay because there is no notification mechanism - // TODO its hard to find everything related to a table id in the logs, especially when the table id is like "b" + // TODO its hard to find everything related to a table id in the logs, especially when the + // table id is like "b" var tableOpsFuture = executor.submit(() -> { client.tableOperations().create(table); log.info("Created table {}", table); @@ -92,7 +94,8 @@ public void test() throws Exception { } log.info("Wrote data to table {}", table); client.tableOperations().addSplits(table, splits); - log.info("Split table {}", table); // TODO split operation does not log table id and fate opid anywhere + log.info("Split table {}", table); // TODO split operation does not log table id and fate + // opid anywhere client.tableOperations().compact(table, new CompactionConfig().setWait(true)); log.info("Compacted table {}", table); client.tableOperations().merge(table, null, null); @@ -108,12 +111,17 @@ public void test() throws Exception { tableOpFutures.add(tableOpsFuture); } - for(var tof : tableOpFutures){ + for (var tof : tableOpFutures) { tof.get(); } } - Thread.sleep(30_000); + FateManager.stop.set(true); + + future.get(); + + executor.shutdown(); + System.out.println("DONE"); // TODO kill processes } From 2b11a54c48aacc397e27197cdc4d82aef95f68ba Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Fri, 6 Feb 2026 21:59:24 +0000 Subject: [PATCH 12/38] WIP --- .../org/apache/accumulo/core/fate/Fate.java | 21 +++++++++++--- .../org/apache/accumulo/manager/Manager.java | 2 +- .../accumulo/manager/fate/FateManager.java | 2 +- .../accumulo/manager/fate/FateWorker.java | 29 +++++++++---------- 4 files changed, 33 insertions(+), 21 deletions(-) diff --git a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java index e5945166793..07eb9aa8ad0 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java @@ -224,7 +224,8 @@ public void run() { fe -> fe.getFateOps().equals(fateOps) && fe.getName().equals(fateExecutorName))) { log.debug("[{}] Adding FateExecutor for {} with {} threads", store.type(), fateOps, poolSize); - var fateExecutor = new FateExecutor<>(Fate.this, environment, fateOps, poolSize, fateExecutorName); + var fateExecutor = + new FateExecutor<>(Fate.this, environment, fateOps, poolSize, fateExecutorName); fateExecutors.add(fateExecutor); fateExecutor.setPartitions(currentPartitions); } @@ -575,11 +576,23 @@ public void close() { store.close(); } - public void setPartitions(Set partitions) { + public Set getPartitions() { + synchronized (fateExecutors) { + return currentPartitions; + } + } + + public boolean setPartitions(Set expected, Set partitions) { + Objects.requireNonNull(expected); Objects.requireNonNull(partitions); synchronized (fateExecutors) { - currentPartitions = Set.copyOf(partitions); - fateExecutors.forEach(fe -> fe.setPartitions(currentPartitions)); + if (currentPartitions.equals(expected)) { + currentPartitions = Set.copyOf(partitions); + fateExecutors.forEach(fe -> fe.setPartitions(currentPartitions)); + return true; + } else { + return false; + } } } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java index b5f73a2f722..11daea38701 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java @@ -1285,7 +1285,7 @@ protected Fate initializeFateInstance(ServerContext context, FateStore< .scheduleWithFixedDelay(fateCleaner::ageOff, 10, 4 * 60, MINUTES)); if (store.type() == FateInstanceType.META) { - fateInstance.setPartitions(Set.of(FatePartition.all(FateInstanceType.META))); + fateInstance.setPartitions(Set.of(), Set.of(FatePartition.all(FateInstanceType.META))); } // else do not run user transactions for now in the manager... it will have an empty set of // partitions diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index 54244df7d4c..ec2f563062e 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -81,7 +81,7 @@ public void managerWorkers() throws Exception { Set partitions = entry.getValue(); var curr = currentAssignments.getOrDefault(worker, Set.of()); if (!Sets.difference(curr, partitions).isEmpty()) { - // This worker has extra partitions that are not desired, unload those + // This worker has extra partitions that are not desired var intersection = Sets.intersection(curr, partitions); if (!setWorkerPartitions(worker, curr, intersection)) { log.debug("Failed to set partitions for {} to {}", worker, intersection); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index 56c5f0dee32..8817474dfd3 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -80,8 +80,11 @@ public List getPartitions(TInfo tinfo, TCredentials credentials) SecurityErrorCode.PERMISSION_DENIED).asThriftException(); } - synchronized (currentPartitions) { - return currentPartitions.stream().map(FatePartition::toThrift).toList(); + var localFate = fate; + if (localFate == null) { + return List.of(); + } else { + return localFate.getPartitions().stream().map(FatePartition::toThrift).toList(); } } @@ -93,21 +96,17 @@ public boolean setPartitions(TInfo tinfo, TCredentials credentials, List log.info("old partition {}", p)); - currentPartitions.clear(); - desired.stream().map(FatePartition::from).forEach(currentPartitions::add); - desired.stream().map(FatePartition::from).forEach(p -> log.info("new partition {}", p)); - log.info("Changed partitions from {} to {}", expectedSet, currentPartitions); - fate.setPartitions(Set.copyOf(currentPartitions)); + var localFate = fate; + if (localFate != null) { + var expectedSet = expected.stream().map(FatePartition::from).collect(Collectors.toSet()); + var desiredSet = desired.stream().map(FatePartition::from).collect(Collectors.toSet()); + if (localFate.setPartitions(expectedSet, desiredSet)) { + log.info("Changed partitions from {} to {}", expectedSet, desiredSet); return true; - } else { - log.info("Did not change partitions to {} because {} != {}", desired, expectedSet, - currentPartitions); - return false; } } + + log.info("Did not change partitions to {}", desired); + return false; } } From 10dcee99ab2b0c20419cab7387d4a7e89113ebdb Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Fri, 6 Feb 2026 22:42:17 +0000 Subject: [PATCH 13/38] WIP --- .../apache/accumulo/core/fate/AbstractFateStore.java | 8 ++++---- .../org/apache/accumulo/core/fate/FateExecutor.java | 11 ++++++++--- .../apache/accumulo/core/fate/ReadOnlyFateStore.java | 4 ++-- .../org/apache/accumulo/core/logging/FateLogger.java | 4 ++-- .../java/org/apache/accumulo/core/fate/TestStore.java | 4 ++-- .../org/apache/accumulo/manager/fate/FateManager.java | 9 +++++++-- .../org/apache/accumulo/test/MultipleManagerIT.java | 7 ++++++- .../apache/accumulo/test/fate/FateStoreITBase.java | 4 ++-- 8 files changed, 33 insertions(+), 18 deletions(-) diff --git a/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java b/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java index 39bcbe258fd..f0d3ac8500b 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java @@ -40,6 +40,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -161,7 +162,7 @@ public FateTxStore reserve(FateId fateId) { EnumSet.of(TStatus.SUBMITTED, TStatus.FAILED_IN_PROGRESS); @Override - public void runnable(Set partitions, AtomicBoolean keepWaiting, + public void runnable(Set partitions, BooleanSupplier keepWaiting, Consumer idConsumer) { if (partitions.isEmpty()) { @@ -170,7 +171,7 @@ public void runnable(Set partitions, AtomicBoolean keepWaiting, AtomicLong seen = new AtomicLong(0); - while (keepWaiting.get() && seen.get() == 0) { + while (keepWaiting.getAsBoolean() && seen.get() == 0) { final long beforeCount = unreservedRunnableCount.getCount(); final boolean beforeDeferredOverflow = deferredOverflow.get(); @@ -212,8 +213,7 @@ public void runnable(Set partitions, AtomicBoolean keepWaiting, } if (waitTime > 0) { - unreservedRunnableCount.waitFor(count -> count != beforeCount, waitTime, - keepWaiting::get); + unreservedRunnableCount.waitFor(count -> count != beforeCount, waitTime, keepWaiting); } } } diff --git a/core/src/main/java/org/apache/accumulo/core/fate/FateExecutor.java b/core/src/main/java/org/apache/accumulo/core/fate/FateExecutor.java index 95a89706a62..2457095209f 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/FateExecutor.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/FateExecutor.java @@ -45,6 +45,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BooleanSupplier; import org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException; import org.apache.accumulo.core.conf.Property; @@ -316,8 +317,12 @@ private class WorkFinder implements Runnable { public void run() { while (fate.getKeepRunning().get() && !isShutdown()) { try { - // TODO - fate.getStore().runnable(partitions.get(), fate.getKeepRunning(), fateIdStatus -> { + var localPartitions = partitions.get(); + // if the set of partitions changes, we should stop looking for work w/ the old set of + // partitions + BooleanSupplier keepRunning = + () -> fate.getKeepRunning().get() && localPartitions == partitions.get(); + fate.getStore().runnable(localPartitions, keepRunning, fateIdStatus -> { // The FateId with the fate operation 'fateOp' is workable by this FateExecutor if // 1) This FateExecutor is assigned to work on 'fateOp' ('fateOp' is in 'fateOps') // 2) The transaction was cancelled while NEW. This is an edge case that needs to be @@ -328,7 +333,7 @@ public void run() { var fateOp = fateIdStatus.getFateOperation().orElse(null); if ((fateOp != null && fateOps.contains(fateOp)) || txCancelledWhileNew(status, fateOp)) { - while (fate.getKeepRunning().get() && !isShutdown()) { + while (keepRunning.getAsBoolean() && !isShutdown()) { try { // The reason for calling transfer instead of queueing is avoid rescanning the // storage layer and adding the same thing over and over. For example if all diff --git a/core/src/main/java/org/apache/accumulo/core/fate/ReadOnlyFateStore.java b/core/src/main/java/org/apache/accumulo/core/fate/ReadOnlyFateStore.java index 3608f9b1c29..4776fe3997b 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/ReadOnlyFateStore.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/ReadOnlyFateStore.java @@ -24,7 +24,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.stream.Stream; @@ -164,7 +164,7 @@ interface FateIdStatus { * is found or until the keepWaiting parameter is false. It will return once all runnable ids * found were passed to the consumer. */ - void runnable(Set partitions, AtomicBoolean keepWaiting, + void runnable(Set partitions, BooleanSupplier keepWaiting, Consumer idConsumer); /** diff --git a/core/src/main/java/org/apache/accumulo/core/logging/FateLogger.java b/core/src/main/java/org/apache/accumulo/core/logging/FateLogger.java index 38e8976f515..0d100844947 100644 --- a/core/src/main/java/org/apache/accumulo/core/logging/FateLogger.java +++ b/core/src/main/java/org/apache/accumulo/core/logging/FateLogger.java @@ -25,7 +25,7 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Stream; @@ -142,7 +142,7 @@ public Stream list(FateKey.FateKeyType type) { } @Override - public void runnable(Set partitions, AtomicBoolean keepWaiting, + public void runnable(Set partitions, BooleanSupplier keepWaiting, Consumer idConsumer) { store.runnable(partitions, keepWaiting, idConsumer); } diff --git a/core/src/test/java/org/apache/accumulo/core/fate/TestStore.java b/core/src/test/java/org/apache/accumulo/core/fate/TestStore.java index da50b715d1a..7fd4d5157e4 100644 --- a/core/src/test/java/org/apache/accumulo/core/fate/TestStore.java +++ b/core/src/test/java/org/apache/accumulo/core/fate/TestStore.java @@ -30,7 +30,7 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.stream.Stream; @@ -273,7 +273,7 @@ public Stream list(FateKey.FateKeyType type) { } @Override - public void runnable(Set partitions, AtomicBoolean keepWaiting, + public void runnable(Set partitions, BooleanSupplier keepWaiting, Consumer idConsumer) { throw new UnsupportedOperationException(); } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index ec2f563062e..3137272727e 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -47,7 +47,8 @@ /** * Partitions fate across manager assistant processes. This is done by assigning ranges of the fate - * uuid key space to different processes. + * uuid key space to different processes. The partitions are logical and do not correspond to the + * physical partitioning of the fate table. */ public class FateManager { @@ -163,6 +164,10 @@ private Map> computeDesiredAssignments( return desiredAssignments; } + /** + * Computes a single partition for each worker such that the partition cover all possible UUIDs + * and evenly divide the UUIDs. + */ private Set getDesiredPartitions(int numWorkers) { Preconditions.checkArgument(numWorkers >= 0); @@ -184,9 +189,9 @@ private Set getDesiredPartitions(int numWorkers) { FateId.from(FateInstanceType.USER, endUuid))); } - // last one is long start = ((numWorkers - 1) * jump) << 4; UUID startUuid = new UUID(start, 0); + // last partition has a special end uuid that is all f nibbles. UUID endUuid = new UUID(-1, -1); desired.add(new FatePartition(FateId.from(FateInstanceType.USER, startUuid), FateId.from(FateInstanceType.USER, endUuid))); diff --git a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java index 67a918b19a8..ba7e68efaaa 100644 --- a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java +++ b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java @@ -51,12 +51,14 @@ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSit public void test() throws Exception { List managerWorkers = new ArrayList<>(); + // start two fate workers initially for (int i = 0; i < 2; i++) { managerWorkers.add(exec(ManagerWorker.class)); } var executor = Executors.newCachedThreadPool(); + // This assigns fate partitions to fate worker processes, run it in a background thread. var fateMgr = new FateManager(getServerContext()); var future = executor.submit(() -> { fateMgr.managerWorkers(); @@ -64,6 +66,7 @@ public void test() throws Exception { }); Thread.sleep(30_000); + // start more fate workers, should see the partitions be shuffled eventually for (int i = 0; i < 3; i++) { managerWorkers.add(exec(ManagerWorker.class)); } @@ -76,9 +79,11 @@ public void test() throws Exception { var table = "t" + i; // TODO seeing in the logs that fate operations for the same table are running on different // processes, however there is a 5 second delay because there is no notification mechanism + // currently. // TODO its hard to find everything related to a table id in the logs, especially when the - // table id is like "b" + // table id is like "b". Was tring to follow a single table across multiple manager workers + // processes. var tableOpsFuture = executor.submit(() -> { client.tableOperations().create(table); log.info("Created table {}", table); diff --git a/test/src/main/java/org/apache/accumulo/test/fate/FateStoreITBase.java b/test/src/main/java/org/apache/accumulo/test/fate/FateStoreITBase.java index 67b9ac67f2e..36e0c0f2474 100644 --- a/test/src/main/java/org/apache/accumulo/test/fate/FateStoreITBase.java +++ b/test/src/main/java/org/apache/accumulo/test/fate/FateStoreITBase.java @@ -201,7 +201,7 @@ protected void testDeferredOverflow(FateStore store, ServerContext sctx // Run and verify all 10 transactions still exist and were not // run because of the deferral time of all the transactions future = executor.submit(() -> store.runnable(Set.of(FatePartition.all(store.type())), - keepRunning, fateIdStatus -> transactions.remove(fateIdStatus.getFateId()))); + keepRunning::get, fateIdStatus -> transactions.remove(fateIdStatus.getFateId()))); Thread.sleep(2000); assertEquals(10, transactions.size()); // Setting this flag to false should terminate the task if sleeping @@ -227,7 +227,7 @@ protected void testDeferredOverflow(FateStore store, ServerContext sctx // and removed from the store keepRunning.set(true); future = executor.submit(() -> store.runnable(Set.of(FatePartition.all(store.type())), - keepRunning, fateIdStatus -> transactions.remove(fateIdStatus.getFateId()))); + keepRunning::get, fateIdStatus -> transactions.remove(fateIdStatus.getFateId()))); Wait.waitFor(transactions::isEmpty); // Setting this flag to false should terminate the task if sleeping keepRunning.set(false); From 25cfe5b9ac5e486695d63f7fceb744de3e6fb153 Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Fri, 6 Feb 2026 23:48:29 +0000 Subject: [PATCH 14/38] WIP --- .../org/apache/accumulo/core/fate/Fate.java | 6 ++++++ .../accumulo/manager/fate/FateManager.java | 17 +++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java index 07eb9aa8ad0..169fcb1b5cd 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java @@ -65,6 +65,7 @@ import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; import com.google.gson.JsonParser; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; @@ -585,6 +586,11 @@ public Set getPartitions() { public boolean setPartitions(Set expected, Set partitions) { Objects.requireNonNull(expected); Objects.requireNonNull(partitions); + Preconditions.checkArgument( + partitions.stream().allMatch( + fp -> fp.start().getType() == store.type() && fp.end().getType() == store.type()), + "type mismatch type:%s partitions:%s", store.type(), partitions); + synchronized (fateExecutors) { if (currentPartitions.equals(expected)) { currentPartitions = Set.copyOf(partitions); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index 3137272727e..a90880863b4 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -113,6 +113,19 @@ public void managerWorkers() throws Exception { } } + /** + * Sets the complete set of partitions a server should work on. It will only succeed if the + * current set we pass in matches the severs actual current set of partitions. Passing the current + * set avoids some race conditions w/ previously queued network messages, it's a distributed + * compare and set mechanism that can detect changes. + * + * @param address The server to set partitions on + * @param current What we think the servers current set of fate partitions are. + * @param desired The new set of fate partitions this server should start working. It should only + * work on these and nothing else. + * @return true if the partitions were set false if they were not set. + * @throws TException + */ private boolean setWorkerPartitions(HostAndPort address, Set current, Set desired) throws TException { // TODO make a compare and set type RPC that uses the current and desired @@ -177,6 +190,10 @@ private Set getDesiredPartitions(int numWorkers) { // create a single partition per worker that equally divides the space HashSet desired = new HashSet<>(); + // All the shifting is because java does not have unsigned integers. Want to evenly partition + // [0,2^64) into numWorker ranges, but can not directly do that. Work w/ 60 bit unsigned + // integers to partition the space and then shift over by 4. Used 60 bits instead of 63 so it + // nicely aligns w/ hex in the uuid. long jump = ((1L << 60)) / numWorkers; for (int i = 0; i < numWorkers - 1; i++) { long start = (i * jump) << 4; From 905a94d60c9feb2d96af4caee2944116e09f7d6f Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Wed, 11 Feb 2026 22:43:01 +0000 Subject: [PATCH 15/38] WIP --- .../org/apache/accumulo/core/fate/Fate.java | 14 +- .../core/fate/thrift/FateWorkerService.java | 356 ++++------- .../core/fate/thrift/TFatePartitions.java | 561 ++++++++++++++++++ core/src/main/thrift/fate-worker.thrift | 9 +- .../org/apache/accumulo/manager/Manager.java | 2 +- .../accumulo/manager/fate/FateManager.java | 44 +- .../accumulo/manager/fate/FateWorker.java | 47 +- 7 files changed, 758 insertions(+), 275 deletions(-) create mode 100644 core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/TFatePartitions.java diff --git a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java index 169fcb1b5cd..e0815937db2 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java @@ -583,8 +583,7 @@ public Set getPartitions() { } } - public boolean setPartitions(Set expected, Set partitions) { - Objects.requireNonNull(expected); + public Set setPartitions(Set partitions) { Objects.requireNonNull(partitions); Preconditions.checkArgument( partitions.stream().allMatch( @@ -592,13 +591,10 @@ public boolean setPartitions(Set expected, Set par "type mismatch type:%s partitions:%s", store.type(), partitions); synchronized (fateExecutors) { - if (currentPartitions.equals(expected)) { - currentPartitions = Set.copyOf(partitions); - fateExecutors.forEach(fe -> fe.setPartitions(currentPartitions)); - return true; - } else { - return false; - } + var old = currentPartitions; + currentPartitions = Set.copyOf(partitions); + fateExecutors.forEach(fe -> fe.setPartitions(currentPartitions)); + return old; } } diff --git a/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/FateWorkerService.java b/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/FateWorkerService.java index eb04b865cbb..2ef0da2c4f3 100644 --- a/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/FateWorkerService.java +++ b/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/FateWorkerService.java @@ -29,17 +29,17 @@ public class FateWorkerService { public interface Iface { - public java.util.List getPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException; + public TFatePartitions getPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException; - public boolean setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List current, java.util.List desired) throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException; + public boolean setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, long updateId, java.util.List desired) throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException; } public interface AsyncIface { - public void getPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException; + public void getPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List current, java.util.List desired, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, long updateId, java.util.List desired, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; } @@ -66,7 +66,7 @@ public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.prot } @Override - public java.util.List getPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException + public TFatePartitions getPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException { send_getPartitions(tinfo, credentials); return recv_getPartitions(); @@ -80,7 +80,7 @@ public void send_getPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo sendBase("getPartitions", args); } - public java.util.List recv_getPartitions() throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException + public TFatePartitions recv_getPartitions() throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException { getPartitions_result result = new getPartitions_result(); receiveBase(result, "getPartitions"); @@ -94,18 +94,18 @@ public java.util.List recv_getPartitions() throws org.apache.acc } @Override - public boolean setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List current, java.util.List desired) throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException + public boolean setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, long updateId, java.util.List desired) throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException { - send_setPartitions(tinfo, credentials, current, desired); + send_setPartitions(tinfo, credentials, updateId, desired); return recv_setPartitions(); } - public void send_setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List current, java.util.List desired) throws org.apache.thrift.TException + public void send_setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, long updateId, java.util.List desired) throws org.apache.thrift.TException { setPartitions_args args = new setPartitions_args(); args.setTinfo(tinfo); args.setCredentials(credentials); - args.setCurrent(current); + args.setUpdateId(updateId); args.setDesired(desired); sendBase("setPartitions", args); } @@ -143,17 +143,17 @@ public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory, } @Override - public void getPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + public void getPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); getPartitions_call method_call = new getPartitions_call(tinfo, credentials, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } - public static class getPartitions_call extends org.apache.thrift.async.TAsyncMethodCall> { + public static class getPartitions_call extends org.apache.thrift.async.TAsyncMethodCall { private org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo; private org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials; - public getPartitions_call(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, org.apache.thrift.async.AsyncMethodCallback> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + public getPartitions_call(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.tinfo = tinfo; this.credentials = credentials; @@ -170,7 +170,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } @Override - public java.util.List getResult() throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException { + public TFatePartitions getResult() throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException { if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { throw new java.lang.IllegalStateException("Method call not finished!"); } @@ -181,9 +181,9 @@ public java.util.List getResult() throws org.apache.accumulo.cor } @Override - public void setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List current, java.util.List desired, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, long updateId, java.util.List desired, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - setPartitions_call method_call = new setPartitions_call(tinfo, credentials, current, desired, resultHandler, this, ___protocolFactory, ___transport); + setPartitions_call method_call = new setPartitions_call(tinfo, credentials, updateId, desired, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @@ -191,13 +191,13 @@ public void setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo public static class setPartitions_call extends org.apache.thrift.async.TAsyncMethodCall { private org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo; private org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials; - private java.util.List current; + private long updateId; private java.util.List desired; - public setPartitions_call(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List current, java.util.List desired, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + public setPartitions_call(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, long updateId, java.util.List desired, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); this.tinfo = tinfo; this.credentials = credentials; - this.current = current; + this.updateId = updateId; this.desired = desired; } @@ -207,7 +207,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa setPartitions_args args = new setPartitions_args(); args.setTinfo(tinfo); args.setCredentials(credentials); - args.setCurrent(current); + args.setUpdateId(updateId); args.setDesired(desired); args.write(prot); prot.writeMessageEnd(); @@ -298,7 +298,7 @@ protected boolean rethrowUnhandledExceptions() { public setPartitions_result getResult(I iface, setPartitions_args args) throws org.apache.thrift.TException { setPartitions_result result = new setPartitions_result(); try { - result.success = iface.setPartitions(args.tinfo, args.credentials, args.current, args.desired); + result.success = iface.setPartitions(args.tinfo, args.credentials, args.updateId, args.desired); result.setSuccessIsSet(true); } catch (org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException sec) { result.sec = sec; @@ -325,7 +325,7 @@ protected AsyncProcessor(I iface, java.util.Map extends org.apache.thrift.AsyncProcessFunction> { + public static class getPartitions extends org.apache.thrift.AsyncProcessFunction { public getPartitions() { super("getPartitions"); } @@ -336,11 +336,11 @@ public getPartitions_args getEmptyArgsInstance() { } @Override - public org.apache.thrift.async.AsyncMethodCallback> getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new org.apache.thrift.async.AsyncMethodCallback>() { + return new org.apache.thrift.async.AsyncMethodCallback() { @Override - public void onComplete(java.util.List o) { + public void onComplete(TFatePartitions o) { getPartitions_result result = new getPartitions_result(); result.success = o; try { @@ -391,7 +391,7 @@ protected boolean isOneway() { } @Override - public void start(I iface, getPartitions_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws org.apache.thrift.TException { + public void start(I iface, getPartitions_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { iface.getPartitions(args.tinfo, args.credentials,resultHandler); } } @@ -464,7 +464,7 @@ protected boolean isOneway() { @Override public void start(I iface, setPartitions_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.setPartitions(args.tinfo, args.credentials, args.current, args.desired,resultHandler); + iface.setPartitions(args.tinfo, args.credentials, args.updateId, args.desired,resultHandler); } } @@ -968,13 +968,13 @@ private static S scheme(org.apache. public static class getPartitions_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getPartitions_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField SEC_FIELD_DESC = new org.apache.thrift.protocol.TField("sec", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new getPartitions_resultStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new getPartitions_resultTupleSchemeFactory(); - public @org.apache.thrift.annotation.Nullable java.util.List success; // required + public @org.apache.thrift.annotation.Nullable TFatePartitions success; // required public @org.apache.thrift.annotation.Nullable org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException sec; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -1047,8 +1047,7 @@ public java.lang.String getFieldName() { static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TFatePartition.class)))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TFatePartitions.class))); tmpMap.put(_Fields.SEC, new org.apache.thrift.meta_data.FieldMetaData("sec", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException.class))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); @@ -1059,7 +1058,7 @@ public getPartitions_result() { } public getPartitions_result( - java.util.List success, + TFatePartitions success, org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException sec) { this(); @@ -1072,11 +1071,7 @@ public getPartitions_result( */ public getPartitions_result(getPartitions_result other) { if (other.isSetSuccess()) { - java.util.List __this__success = new java.util.ArrayList(other.success.size()); - for (TFatePartition other_element : other.success) { - __this__success.add(new TFatePartition(other_element)); - } - this.success = __this__success; + this.success = new TFatePartitions(other.success); } if (other.isSetSec()) { this.sec = new org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException(other.sec); @@ -1094,28 +1089,12 @@ public void clear() { this.sec = null; } - public int getSuccessSize() { - return (this.success == null) ? 0 : this.success.size(); - } - - @org.apache.thrift.annotation.Nullable - public java.util.Iterator getSuccessIterator() { - return (this.success == null) ? null : this.success.iterator(); - } - - public void addToSuccess(TFatePartition elem) { - if (this.success == null) { - this.success = new java.util.ArrayList(); - } - this.success.add(elem); - } - @org.apache.thrift.annotation.Nullable - public java.util.List getSuccess() { + public TFatePartitions getSuccess() { return this.success; } - public getPartitions_result setSuccess(@org.apache.thrift.annotation.Nullable java.util.List success) { + public getPartitions_result setSuccess(@org.apache.thrift.annotation.Nullable TFatePartitions success) { this.success = success; return this; } @@ -1167,7 +1146,7 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable if (value == null) { unsetSuccess(); } else { - setSuccess((java.util.List)value); + setSuccess((TFatePartitions)value); } break; @@ -1334,6 +1313,9 @@ public java.lang.String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -1373,19 +1355,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getPartitions_resul } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list0 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list0.size); - @org.apache.thrift.annotation.Nullable TFatePartition _elem1; - for (int _i2 = 0; _i2 < _list0.size; ++_i2) - { - _elem1 = new TFatePartition(); - _elem1.read(iprot); - struct.success.add(_elem1); - } - iprot.readListEnd(); - } + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new TFatePartitions(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -1418,14 +1390,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getPartitions_resu oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TFatePartition _iter3 : struct.success) - { - _iter3.write(oprot); - } - oprot.writeListEnd(); - } + struct.success.write(oprot); oprot.writeFieldEnd(); } if (struct.sec != null) { @@ -1460,13 +1425,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getPartitions_resul } oprot.writeBitSet(optionals, 2); if (struct.isSetSuccess()) { - { - oprot.writeI32(struct.success.size()); - for (TFatePartition _iter4 : struct.success) - { - _iter4.write(oprot); - } - } + struct.success.write(oprot); } if (struct.isSetSec()) { struct.sec.write(oprot); @@ -1478,17 +1437,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getPartitions_result org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { - { - org.apache.thrift.protocol.TList _list5 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list5.size); - @org.apache.thrift.annotation.Nullable TFatePartition _elem6; - for (int _i7 = 0; _i7 < _list5.size; ++_i7) - { - _elem6 = new TFatePartition(); - _elem6.read(iprot); - struct.success.add(_elem6); - } - } + struct.success = new TFatePartitions(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } if (incoming.get(1)) { @@ -1510,7 +1460,7 @@ public static class setPartitions_args implements org.apache.thrift.TBase current; // required + public long updateId; // required public @org.apache.thrift.annotation.Nullable java.util.List desired; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { TINFO((short)1, "tinfo"), CREDENTIALS((short)2, "credentials"), - CURRENT((short)3, "current"), + UPDATE_ID((short)3, "updateId"), DESIRED((short)4, "desired"); private static final java.util.Map byName = new java.util.HashMap(); @@ -1546,8 +1496,8 @@ public static _Fields findByThriftId(int fieldId) { return TINFO; case 2: // CREDENTIALS return CREDENTIALS; - case 3: // CURRENT - return CURRENT; + case 3: // UPDATE_ID + return UPDATE_ID; case 4: // DESIRED return DESIRED; default: @@ -1593,6 +1543,8 @@ public java.lang.String getFieldName() { } // isset id assignments + private static final int __UPDATEID_ISSET_ID = 0; + private byte __isset_bitfield = 0; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -1600,9 +1552,8 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.core.clientImpl.thrift.TInfo.class))); tmpMap.put(_Fields.CREDENTIALS, new org.apache.thrift.meta_data.FieldMetaData("credentials", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.core.securityImpl.thrift.TCredentials.class))); - tmpMap.put(_Fields.CURRENT, new org.apache.thrift.meta_data.FieldMetaData("current", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TFatePartition.class)))); + tmpMap.put(_Fields.UPDATE_ID, new org.apache.thrift.meta_data.FieldMetaData("updateId", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.DESIRED, new org.apache.thrift.meta_data.FieldMetaData("desired", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TFatePartition.class)))); @@ -1616,13 +1567,14 @@ public setPartitions_args() { public setPartitions_args( org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, - java.util.List current, + long updateId, java.util.List desired) { this(); this.tinfo = tinfo; this.credentials = credentials; - this.current = current; + this.updateId = updateId; + setUpdateIdIsSet(true); this.desired = desired; } @@ -1630,19 +1582,14 @@ public setPartitions_args( * Performs a deep copy on other. */ public setPartitions_args(setPartitions_args other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetTinfo()) { this.tinfo = new org.apache.accumulo.core.clientImpl.thrift.TInfo(other.tinfo); } if (other.isSetCredentials()) { this.credentials = new org.apache.accumulo.core.securityImpl.thrift.TCredentials(other.credentials); } - if (other.isSetCurrent()) { - java.util.List __this__current = new java.util.ArrayList(other.current.size()); - for (TFatePartition other_element : other.current) { - __this__current.add(new TFatePartition(other_element)); - } - this.current = __this__current; - } + this.updateId = other.updateId; if (other.isSetDesired()) { java.util.List __this__desired = new java.util.ArrayList(other.desired.size()); for (TFatePartition other_element : other.desired) { @@ -1661,7 +1608,8 @@ public setPartitions_args deepCopy() { public void clear() { this.tinfo = null; this.credentials = null; - this.current = null; + setUpdateIdIsSet(false); + this.updateId = 0; this.desired = null; } @@ -1715,45 +1663,27 @@ public void setCredentialsIsSet(boolean value) { } } - public int getCurrentSize() { - return (this.current == null) ? 0 : this.current.size(); - } - - @org.apache.thrift.annotation.Nullable - public java.util.Iterator getCurrentIterator() { - return (this.current == null) ? null : this.current.iterator(); - } - - public void addToCurrent(TFatePartition elem) { - if (this.current == null) { - this.current = new java.util.ArrayList(); - } - this.current.add(elem); - } - - @org.apache.thrift.annotation.Nullable - public java.util.List getCurrent() { - return this.current; + public long getUpdateId() { + return this.updateId; } - public setPartitions_args setCurrent(@org.apache.thrift.annotation.Nullable java.util.List current) { - this.current = current; + public setPartitions_args setUpdateId(long updateId) { + this.updateId = updateId; + setUpdateIdIsSet(true); return this; } - public void unsetCurrent() { - this.current = null; + public void unsetUpdateId() { + __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __UPDATEID_ISSET_ID); } - /** Returns true if field current is set (has been assigned a value) and false otherwise */ - public boolean isSetCurrent() { - return this.current != null; + /** Returns true if field updateId is set (has been assigned a value) and false otherwise */ + public boolean isSetUpdateId() { + return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __UPDATEID_ISSET_ID); } - public void setCurrentIsSet(boolean value) { - if (!value) { - this.current = null; - } + public void setUpdateIdIsSet(boolean value) { + __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __UPDATEID_ISSET_ID, value); } public int getDesiredSize() { @@ -1816,11 +1746,11 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; - case CURRENT: + case UPDATE_ID: if (value == null) { - unsetCurrent(); + unsetUpdateId(); } else { - setCurrent((java.util.List)value); + setUpdateId((java.lang.Long)value); } break; @@ -1845,8 +1775,8 @@ public java.lang.Object getFieldValue(_Fields field) { case CREDENTIALS: return getCredentials(); - case CURRENT: - return getCurrent(); + case UPDATE_ID: + return getUpdateId(); case DESIRED: return getDesired(); @@ -1867,8 +1797,8 @@ public boolean isSet(_Fields field) { return isSetTinfo(); case CREDENTIALS: return isSetCredentials(); - case CURRENT: - return isSetCurrent(); + case UPDATE_ID: + return isSetUpdateId(); case DESIRED: return isSetDesired(); } @@ -1906,12 +1836,12 @@ public boolean equals(setPartitions_args that) { return false; } - boolean this_present_current = true && this.isSetCurrent(); - boolean that_present_current = true && that.isSetCurrent(); - if (this_present_current || that_present_current) { - if (!(this_present_current && that_present_current)) + boolean this_present_updateId = true; + boolean that_present_updateId = true; + if (this_present_updateId || that_present_updateId) { + if (!(this_present_updateId && that_present_updateId)) return false; - if (!this.current.equals(that.current)) + if (this.updateId != that.updateId) return false; } @@ -1939,9 +1869,7 @@ public int hashCode() { if (isSetCredentials()) hashCode = hashCode * 8191 + credentials.hashCode(); - hashCode = hashCode * 8191 + ((isSetCurrent()) ? 131071 : 524287); - if (isSetCurrent()) - hashCode = hashCode * 8191 + current.hashCode(); + hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(updateId); hashCode = hashCode * 8191 + ((isSetDesired()) ? 131071 : 524287); if (isSetDesired()) @@ -1978,12 +1906,12 @@ public int compareTo(setPartitions_args other) { return lastComparison; } } - lastComparison = java.lang.Boolean.compare(isSetCurrent(), other.isSetCurrent()); + lastComparison = java.lang.Boolean.compare(isSetUpdateId(), other.isSetUpdateId()); if (lastComparison != 0) { return lastComparison; } - if (isSetCurrent()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.current, other.current); + if (isSetUpdateId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.updateId, other.updateId); if (lastComparison != 0) { return lastComparison; } @@ -2038,12 +1966,8 @@ public java.lang.String toString() { } first = false; if (!first) sb.append(", "); - sb.append("current:"); - if (this.current == null) { - sb.append("null"); - } else { - sb.append(this.current); - } + sb.append("updateId:"); + sb.append(this.updateId); first = false; if (!first) sb.append(", "); sb.append("desired:"); @@ -2078,6 +2002,8 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -2122,21 +2048,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, setPartitions_args org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // CURRENT - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list8 = iprot.readListBegin(); - struct.current = new java.util.ArrayList(_list8.size); - @org.apache.thrift.annotation.Nullable TFatePartition _elem9; - for (int _i10 = 0; _i10 < _list8.size; ++_i10) - { - _elem9 = new TFatePartition(); - _elem9.read(iprot); - struct.current.add(_elem9); - } - iprot.readListEnd(); - } - struct.setCurrentIsSet(true); + case 3: // UPDATE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.updateId = iprot.readI64(); + struct.setUpdateIdIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -2144,14 +2059,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, setPartitions_args case 4: // DESIRED if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list11 = iprot.readListBegin(); - struct.desired = new java.util.ArrayList(_list11.size); - @org.apache.thrift.annotation.Nullable TFatePartition _elem12; - for (int _i13 = 0; _i13 < _list11.size; ++_i13) + org.apache.thrift.protocol.TList _list8 = iprot.readListBegin(); + struct.desired = new java.util.ArrayList(_list8.size); + @org.apache.thrift.annotation.Nullable TFatePartition _elem9; + for (int _i10 = 0; _i10 < _list8.size; ++_i10) { - _elem12 = new TFatePartition(); - _elem12.read(iprot); - struct.desired.add(_elem12); + _elem9 = new TFatePartition(); + _elem9.read(iprot); + struct.desired.add(_elem9); } iprot.readListEnd(); } @@ -2186,25 +2101,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, setPartitions_args struct.credentials.write(oprot); oprot.writeFieldEnd(); } - if (struct.current != null) { - oprot.writeFieldBegin(CURRENT_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.current.size())); - for (TFatePartition _iter14 : struct.current) - { - _iter14.write(oprot); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } + oprot.writeFieldBegin(UPDATE_ID_FIELD_DESC); + oprot.writeI64(struct.updateId); + oprot.writeFieldEnd(); if (struct.desired != null) { oprot.writeFieldBegin(DESIRED_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.desired.size())); - for (TFatePartition _iter15 : struct.desired) + for (TFatePartition _iter11 : struct.desired) { - _iter15.write(oprot); + _iter11.write(oprot); } oprot.writeListEnd(); } @@ -2235,7 +2141,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, setPartitions_args if (struct.isSetCredentials()) { optionals.set(1); } - if (struct.isSetCurrent()) { + if (struct.isSetUpdateId()) { optionals.set(2); } if (struct.isSetDesired()) { @@ -2248,21 +2154,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, setPartitions_args if (struct.isSetCredentials()) { struct.credentials.write(oprot); } - if (struct.isSetCurrent()) { - { - oprot.writeI32(struct.current.size()); - for (TFatePartition _iter16 : struct.current) - { - _iter16.write(oprot); - } - } + if (struct.isSetUpdateId()) { + oprot.writeI64(struct.updateId); } if (struct.isSetDesired()) { { oprot.writeI32(struct.desired.size()); - for (TFatePartition _iter17 : struct.desired) + for (TFatePartition _iter12 : struct.desired) { - _iter17.write(oprot); + _iter12.write(oprot); } } } @@ -2283,29 +2183,19 @@ public void read(org.apache.thrift.protocol.TProtocol prot, setPartitions_args s struct.setCredentialsIsSet(true); } if (incoming.get(2)) { - { - org.apache.thrift.protocol.TList _list18 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.current = new java.util.ArrayList(_list18.size); - @org.apache.thrift.annotation.Nullable TFatePartition _elem19; - for (int _i20 = 0; _i20 < _list18.size; ++_i20) - { - _elem19 = new TFatePartition(); - _elem19.read(iprot); - struct.current.add(_elem19); - } - } - struct.setCurrentIsSet(true); + struct.updateId = iprot.readI64(); + struct.setUpdateIdIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list21 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.desired = new java.util.ArrayList(_list21.size); - @org.apache.thrift.annotation.Nullable TFatePartition _elem22; - for (int _i23 = 0; _i23 < _list21.size; ++_i23) + org.apache.thrift.protocol.TList _list13 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.desired = new java.util.ArrayList(_list13.size); + @org.apache.thrift.annotation.Nullable TFatePartition _elem14; + for (int _i15 = 0; _i15 < _list13.size; ++_i15) { - _elem22 = new TFatePartition(); - _elem22.read(iprot); - struct.desired.add(_elem22); + _elem14 = new TFatePartition(); + _elem14.read(iprot); + struct.desired.add(_elem14); } } struct.setDesiredIsSet(true); diff --git a/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/TFatePartitions.java b/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/TFatePartitions.java new file mode 100644 index 00000000000..7d8a327484b --- /dev/null +++ b/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/TFatePartitions.java @@ -0,0 +1,561 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/** + * Autogenerated by Thrift Compiler (0.17.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.accumulo.core.fate.thrift; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) +public class TFatePartitions implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TFatePartitions"); + + private static final org.apache.thrift.protocol.TField UPDATE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("updateId", org.apache.thrift.protocol.TType.I64, (short)1); + private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)2); + + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TFatePartitionsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TFatePartitionsTupleSchemeFactory(); + + public long updateId; // required + public @org.apache.thrift.annotation.Nullable java.util.List partitions; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + UPDATE_ID((short)1, "updateId"), + PARTITIONS((short)2, "partitions"); + + private static final java.util.Map byName = new java.util.HashMap(); + + static { + for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // UPDATE_ID + return UPDATE_ID; + case 2: // PARTITIONS + return PARTITIONS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByName(java.lang.String name) { + return byName.get(name); + } + + private final short _thriftId; + private final java.lang.String _fieldName; + + _Fields(short thriftId, java.lang.String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + @Override + public short getThriftFieldId() { + return _thriftId; + } + + @Override + public java.lang.String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __UPDATEID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.UPDATE_ID, new org.apache.thrift.meta_data.FieldMetaData("updateId", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("partitions", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TFatePartition.class)))); + metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TFatePartitions.class, metaDataMap); + } + + public TFatePartitions() { + } + + public TFatePartitions( + long updateId, + java.util.List partitions) + { + this(); + this.updateId = updateId; + setUpdateIdIsSet(true); + this.partitions = partitions; + } + + /** + * Performs a deep copy on other. + */ + public TFatePartitions(TFatePartitions other) { + __isset_bitfield = other.__isset_bitfield; + this.updateId = other.updateId; + if (other.isSetPartitions()) { + java.util.List __this__partitions = new java.util.ArrayList(other.partitions.size()); + for (TFatePartition other_element : other.partitions) { + __this__partitions.add(new TFatePartition(other_element)); + } + this.partitions = __this__partitions; + } + } + + @Override + public TFatePartitions deepCopy() { + return new TFatePartitions(this); + } + + @Override + public void clear() { + setUpdateIdIsSet(false); + this.updateId = 0; + this.partitions = null; + } + + public long getUpdateId() { + return this.updateId; + } + + public TFatePartitions setUpdateId(long updateId) { + this.updateId = updateId; + setUpdateIdIsSet(true); + return this; + } + + public void unsetUpdateId() { + __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __UPDATEID_ISSET_ID); + } + + /** Returns true if field updateId is set (has been assigned a value) and false otherwise */ + public boolean isSetUpdateId() { + return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __UPDATEID_ISSET_ID); + } + + public void setUpdateIdIsSet(boolean value) { + __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __UPDATEID_ISSET_ID, value); + } + + public int getPartitionsSize() { + return (this.partitions == null) ? 0 : this.partitions.size(); + } + + @org.apache.thrift.annotation.Nullable + public java.util.Iterator getPartitionsIterator() { + return (this.partitions == null) ? null : this.partitions.iterator(); + } + + public void addToPartitions(TFatePartition elem) { + if (this.partitions == null) { + this.partitions = new java.util.ArrayList(); + } + this.partitions.add(elem); + } + + @org.apache.thrift.annotation.Nullable + public java.util.List getPartitions() { + return this.partitions; + } + + public TFatePartitions setPartitions(@org.apache.thrift.annotation.Nullable java.util.List partitions) { + this.partitions = partitions; + return this; + } + + public void unsetPartitions() { + this.partitions = null; + } + + /** Returns true if field partitions is set (has been assigned a value) and false otherwise */ + public boolean isSetPartitions() { + return this.partitions != null; + } + + public void setPartitionsIsSet(boolean value) { + if (!value) { + this.partitions = null; + } + } + + @Override + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { + switch (field) { + case UPDATE_ID: + if (value == null) { + unsetUpdateId(); + } else { + setUpdateId((java.lang.Long)value); + } + break; + + case PARTITIONS: + if (value == null) { + unsetPartitions(); + } else { + setPartitions((java.util.List)value); + } + break; + + } + } + + @org.apache.thrift.annotation.Nullable + @Override + public java.lang.Object getFieldValue(_Fields field) { + switch (field) { + case UPDATE_ID: + return getUpdateId(); + + case PARTITIONS: + return getPartitions(); + + } + throw new java.lang.IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + @Override + public boolean isSet(_Fields field) { + if (field == null) { + throw new java.lang.IllegalArgumentException(); + } + + switch (field) { + case UPDATE_ID: + return isSetUpdateId(); + case PARTITIONS: + return isSetPartitions(); + } + throw new java.lang.IllegalStateException(); + } + + @Override + public boolean equals(java.lang.Object that) { + if (that instanceof TFatePartitions) + return this.equals((TFatePartitions)that); + return false; + } + + public boolean equals(TFatePartitions that) { + if (that == null) + return false; + if (this == that) + return true; + + boolean this_present_updateId = true; + boolean that_present_updateId = true; + if (this_present_updateId || that_present_updateId) { + if (!(this_present_updateId && that_present_updateId)) + return false; + if (this.updateId != that.updateId) + return false; + } + + boolean this_present_partitions = true && this.isSetPartitions(); + boolean that_present_partitions = true && that.isSetPartitions(); + if (this_present_partitions || that_present_partitions) { + if (!(this_present_partitions && that_present_partitions)) + return false; + if (!this.partitions.equals(that.partitions)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + int hashCode = 1; + + hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(updateId); + + hashCode = hashCode * 8191 + ((isSetPartitions()) ? 131071 : 524287); + if (isSetPartitions()) + hashCode = hashCode * 8191 + partitions.hashCode(); + + return hashCode; + } + + @Override + public int compareTo(TFatePartitions other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = java.lang.Boolean.compare(isSetUpdateId(), other.isSetUpdateId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetUpdateId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.updateId, other.updateId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = java.lang.Boolean.compare(isSetPartitions(), other.isSetPartitions()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPartitions()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitions, other.partitions); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + @org.apache.thrift.annotation.Nullable + @Override + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + scheme(iprot).read(iprot, this); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + scheme(oprot).write(oprot, this); + } + + @Override + public java.lang.String toString() { + java.lang.StringBuilder sb = new java.lang.StringBuilder("TFatePartitions("); + boolean first = true; + + sb.append("updateId:"); + sb.append(this.updateId); + first = false; + if (!first) sb.append(", "); + sb.append("partitions:"); + if (this.partitions == null) { + sb.append("null"); + } else { + sb.append(this.partitions); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TFatePartitionsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public TFatePartitionsStandardScheme getScheme() { + return new TFatePartitionsStandardScheme(); + } + } + + private static class TFatePartitionsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot, TFatePartitions struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // UPDATE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.updateId = iprot.readI64(); + struct.setUpdateIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // PARTITIONS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list0 = iprot.readListBegin(); + struct.partitions = new java.util.ArrayList(_list0.size); + @org.apache.thrift.annotation.Nullable TFatePartition _elem1; + for (int _i2 = 0; _i2 < _list0.size; ++_i2) + { + _elem1 = new TFatePartition(); + _elem1.read(iprot); + struct.partitions.add(_elem1); + } + iprot.readListEnd(); + } + struct.setPartitionsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot, TFatePartitions struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(UPDATE_ID_FIELD_DESC); + oprot.writeI64(struct.updateId); + oprot.writeFieldEnd(); + if (struct.partitions != null) { + oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); + for (TFatePartition _iter3 : struct.partitions) + { + _iter3.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TFatePartitionsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public TFatePartitionsTupleScheme getScheme() { + return new TFatePartitionsTupleScheme(); + } + } + + private static class TFatePartitionsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TFatePartitions struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet optionals = new java.util.BitSet(); + if (struct.isSetUpdateId()) { + optionals.set(0); + } + if (struct.isSetPartitions()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetUpdateId()) { + oprot.writeI64(struct.updateId); + } + if (struct.isSetPartitions()) { + { + oprot.writeI32(struct.partitions.size()); + for (TFatePartition _iter4 : struct.partitions) + { + _iter4.write(oprot); + } + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TFatePartitions struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.updateId = iprot.readI64(); + struct.setUpdateIdIsSet(true); + } + if (incoming.get(1)) { + { + org.apache.thrift.protocol.TList _list5 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.partitions = new java.util.ArrayList(_list5.size); + @org.apache.thrift.annotation.Nullable TFatePartition _elem6; + for (int _i7 = 0; _i7 < _list5.size; ++_i7) + { + _elem6 = new TFatePartition(); + _elem6.read(iprot); + struct.partitions.add(_elem6); + } + } + struct.setPartitionsIsSet(true); + } + } + } + + private static S scheme(org.apache.thrift.protocol.TProtocol proto) { + return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); + } + private static void unusedMethod() {} +} + diff --git a/core/src/main/thrift/fate-worker.thrift b/core/src/main/thrift/fate-worker.thrift index b27a254382d..91c23aeb6b9 100644 --- a/core/src/main/thrift/fate-worker.thrift +++ b/core/src/main/thrift/fate-worker.thrift @@ -22,6 +22,11 @@ namespace cpp org.apache.accumulo.core.fate.thrift include "client.thrift" include "security.thrift" +struct TFatePartitions { + 1:i64 updateId + 2:list partitions +} + struct TFatePartition { 1:string start 2:string stop @@ -29,7 +34,7 @@ struct TFatePartition { service FateWorkerService { - list getPartitions( + TFatePartitions getPartitions( 1:client.TInfo tinfo, 2:security.TCredentials credentials ) throws ( @@ -39,7 +44,7 @@ service FateWorkerService { bool setPartitions( 1:client.TInfo tinfo, 2:security.TCredentials credentials, - 3:list expected, + 3:i64 updateId, 4:list desired ) throws ( 1:client.ThriftSecurityException sec diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java index 11daea38701..b5f73a2f722 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java @@ -1285,7 +1285,7 @@ protected Fate initializeFateInstance(ServerContext context, FateStore< .scheduleWithFixedDelay(fateCleaner::ageOff, 10, 4 * 60, MINUTES)); if (store.type() == FateInstanceType.META) { - fateInstance.setPartitions(Set.of(), Set.of(FatePartition.all(FateInstanceType.META))); + fateInstance.setPartitions(Set.of(FatePartition.all(FateInstanceType.META))); } // else do not run user transactions for now in the manager... it will have an empty set of // partitions diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index a90880863b4..3f5262fc570 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -70,13 +70,16 @@ public void managerWorkers() throws Exception { // TODO could support RG... could user ServerId // This map will contain all current workers even their partitions are empty - Map> currentAssignments = getCurrentAssignments(); + Map currentPartitions = getCurrentAssignments(); + Map> currentAssignments = new HashMap<>(); + currentPartitions.forEach((k, v) -> currentAssignments.put(k, v.partitions())); Set desiredParititions = getDesiredPartitions(currentAssignments.size()); Map> desired = computeDesiredAssignments(currentAssignments, desiredParititions); // are there any workers with extra partitions? If so need to unload those first. + int unloads = 0; for (Map.Entry> entry : desired.entrySet()) { HostAndPort worker = entry.getKey(); Set partitions = entry.getValue(); @@ -84,24 +87,31 @@ public void managerWorkers() throws Exception { if (!Sets.difference(curr, partitions).isEmpty()) { // This worker has extra partitions that are not desired var intersection = Sets.intersection(curr, partitions); - if (!setWorkerPartitions(worker, curr, intersection)) { + if (!setWorkerPartitions(worker, currentPartitions.get(worker).updateId(), + intersection)) { log.debug("Failed to set partitions for {} to {}", worker, intersection); // could not set, so start completely over continue outer; } else { log.debug("Set partitions for {} to {} from {}", worker, intersection, curr); + unloads++; } - currentAssignments.put(worker, intersection); } } + if (unloads > 0) { + // some tablets were unloaded, so start over and get new update ids and the current + // partitions + continue outer; + } + // Load all partitions on all workers.. for (Map.Entry> entry : desired.entrySet()) { HostAndPort worker = entry.getKey(); Set partitions = entry.getValue(); var curr = currentAssignments.getOrDefault(worker, Set.of()); if (!curr.equals(partitions)) { - if (!setWorkerPartitions(worker, curr, partitions)) { + if (!setWorkerPartitions(worker, currentPartitions.get(worker).updateId(), partitions)) { log.debug("Failed to set partitions for {} to {}", worker, partitions); // could not set, so start completely over continue outer; @@ -114,26 +124,24 @@ public void managerWorkers() throws Exception { } /** - * Sets the complete set of partitions a server should work on. It will only succeed if the - * current set we pass in matches the severs actual current set of partitions. Passing the current - * set avoids some race conditions w/ previously queued network messages, it's a distributed - * compare and set mechanism that can detect changes. + * Sets the complete set of partitions a server should work on. It will only succeed if the update + * id is valid. The update id avoids race conditions w/ previously queued network messages, it's a + * distributed compare and set mechanism that can detect changes. * * @param address The server to set partitions on - * @param current What we think the servers current set of fate partitions are. + * @param updateId What we think the servers current set of fate partitions are. * @param desired The new set of fate partitions this server should start working. It should only * work on these and nothing else. * @return true if the partitions were set false if they were not set. * @throws TException */ - private boolean setWorkerPartitions(HostAndPort address, Set current, + private boolean setWorkerPartitions(HostAndPort address, long updateId, Set desired) throws TException { // TODO make a compare and set type RPC that uses the current and desired FateWorkerService.Client client = ThriftUtil.getClient(ThriftClientTypes.FATE_WORKER, address, context); try { - return client.setPartitions(TraceUtil.traceInfo(), context.rpcCreds(), - current.stream().map(FatePartition::toThrift).toList(), + return client.setPartitions(TraceUtil.traceInfo(), context.rpcCreds(), updateId, desired.stream().map(FatePartition::toThrift).toList()); } finally { ThriftUtil.returnClient(client, context); @@ -216,13 +224,16 @@ private Set getDesiredPartitions(int numWorkers) { return desired; } - private Map> getCurrentAssignments() throws TException { + record CurrentPartitions(long updateId, Set partitions) { + } + + private Map getCurrentAssignments() throws TException { var workers = context.getServerPaths().getManagerWorker(DEFAULT_RG_ONLY, AddressSelector.all(), true); log.debug("workers : " + workers); - Map> currentAssignments = new HashMap<>(); + Map currentAssignments = new HashMap<>(); for (var worker : workers) { var address = HostAndPort.fromString(worker.getServer()); @@ -232,8 +243,9 @@ private Map> getCurrentAssignments() throws TExce try { var tparitions = client.getPartitions(TraceUtil.traceInfo(), context.rpcCreds()); - var partitions = tparitions.stream().map(FatePartition::from).collect(Collectors.toSet()); - currentAssignments.put(address, partitions); + var partitions = + tparitions.partitions.stream().map(FatePartition::from).collect(Collectors.toSet()); + currentAssignments.put(address, new CurrentPartitions(tparitions.updateId, partitions)); } finally { ThriftUtil.returnClient(client, context); } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index 8817474dfd3..a31e9a7bd1c 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -34,11 +34,13 @@ import org.apache.accumulo.core.fate.FatePartition; import org.apache.accumulo.core.fate.thrift.FateWorkerService; import org.apache.accumulo.core.fate.thrift.TFatePartition; +import org.apache.accumulo.core.fate.thrift.TFatePartitions; import org.apache.accumulo.core.fate.user.UserFateStore; import org.apache.accumulo.core.fate.zookeeper.ZooUtil; import org.apache.accumulo.core.lock.ServiceLock; import org.apache.accumulo.core.metadata.SystemTables; import org.apache.accumulo.core.securityImpl.thrift.TCredentials; +import org.apache.accumulo.core.util.LazySingletons; import org.apache.accumulo.manager.tableOps.FateEnv; import org.apache.accumulo.manager.tableOps.TraceRepo; import org.apache.accumulo.server.ServerContext; @@ -72,8 +74,10 @@ public void setLock(ServiceLock lock) { } + private volatile long expectedUpdateId = 0; + @Override - public List getPartitions(TInfo tinfo, TCredentials credentials) + public TFatePartitions getPartitions(TInfo tinfo, TCredentials credentials) throws ThriftSecurityException { if (!security.canPerformSystemActions(credentials)) { throw new AccumuloSecurityException(credentials.getPrincipal(), @@ -81,32 +85,47 @@ public List getPartitions(TInfo tinfo, TCredentials credentials) } var localFate = fate; + + // generate a new one time use update id + long updateId = LazySingletons.RANDOM.get().nextLong(); + while (updateId == 0) { + updateId = LazySingletons.RANDOM.get().nextLong(); + } + + // invalidate any outstanding updates and set the new update id + expectedUpdateId = updateId; + if (localFate == null) { - return List.of(); + return new TFatePartitions(updateId, List.of()); } else { - return localFate.getPartitions().stream().map(FatePartition::toThrift).toList(); + return new TFatePartitions(updateId, + localFate.getPartitions().stream().map(FatePartition::toThrift).toList()); } } @Override - public boolean setPartitions(TInfo tinfo, TCredentials credentials, List expected, + public boolean setPartitions(TInfo tinfo, TCredentials credentials, long updateId, List desired) throws ThriftSecurityException { if (!security.canPerformSystemActions(credentials)) { throw new AccumuloSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED).asThriftException(); } - var localFate = fate; - if (localFate != null) { - var expectedSet = expected.stream().map(FatePartition::from).collect(Collectors.toSet()); - var desiredSet = desired.stream().map(FatePartition::from).collect(Collectors.toSet()); - if (localFate.setPartitions(expectedSet, desiredSet)) { - log.info("Changed partitions from {} to {}", expectedSet, desiredSet); - return true; + synchronized (this) { + if (updateId != 0 && updateId == expectedUpdateId) { + // Zero is not an accepted update id, so set to zero which makes it so that an update id can + // only be used once. + expectedUpdateId = 0; + var localFate = fate; + if (localFate != null) { + var desiredSet = desired.stream().map(FatePartition::from).collect(Collectors.toSet()); + var oldPartitions = localFate.setPartitions(desiredSet); + log.info("Changed partitions from {} to {}", oldPartitions, desiredSet); + return true; + } } + log.info("Did not change partitions to {}", desired); + return false; } - - log.info("Did not change partitions to {}", desired); - return false; } } From 1952a477f6ecc7b0949e58cf718e4b34d2c2b4bf Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Wed, 11 Feb 2026 23:57:37 +0000 Subject: [PATCH 16/38] WIP --- .../apache/accumulo/manager/fate/FateWorker.java | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index a31e9a7bd1c..d89c57eb726 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -74,7 +74,7 @@ public void setLock(ServiceLock lock) { } - private volatile long expectedUpdateId = 0; + private volatile Long expectedUpdateId = null; @Override public TFatePartitions getPartitions(TInfo tinfo, TCredentials credentials) @@ -88,9 +88,6 @@ public TFatePartitions getPartitions(TInfo tinfo, TCredentials credentials) // generate a new one time use update id long updateId = LazySingletons.RANDOM.get().nextLong(); - while (updateId == 0) { - updateId = LazySingletons.RANDOM.get().nextLong(); - } // invalidate any outstanding updates and set the new update id expectedUpdateId = updateId; @@ -112,10 +109,9 @@ public boolean setPartitions(TInfo tinfo, TCredentials credentials, long updateI } synchronized (this) { - if (updateId != 0 && updateId == expectedUpdateId) { - // Zero is not an accepted update id, so set to zero which makes it so that an update id can - // only be used once. - expectedUpdateId = 0; + if (expectedUpdateId != null && updateId == expectedUpdateId) { + // Set to null which makes it so that an update id can only be used once. + expectedUpdateId = null; var localFate = fate; if (localFate != null) { var desiredSet = desired.stream().map(FatePartition::from).collect(Collectors.toSet()); @@ -124,7 +120,7 @@ public boolean setPartitions(TInfo tinfo, TCredentials credentials, long updateI return true; } } - log.info("Did not change partitions to {}", desired); + log.debug("Did not change partitions to {} expectedUpdateId:{} updateId:{}", desired, expectedUpdateId, updateId); return false; } } From 765d1e170ade0117a731ca9b2c2325cf3ebb1d6a Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Thu, 12 Feb 2026 17:22:21 +0000 Subject: [PATCH 17/38] WIP --- .../accumulo/manager/fate/FateManager.java | 3 +++ .../accumulo/manager/fate/FateWorker.java | 25 ++++++++++++------- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index 3f5262fc570..0eeec86eb1d 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -224,6 +224,9 @@ private Set getDesiredPartitions(int numWorkers) { return desired; } + // The updateId accomplishes two things. First it ensures that setting partition RPC can only + // execute once on the server side. Second when a new update id is requested it cancels any + // outstanding RPCs to set partitions that have not executed yet. record CurrentPartitions(long updateId, Set partitions) { } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index d89c57eb726..0f4f29fed2a 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -74,7 +74,7 @@ public void setLock(ServiceLock lock) { } - private volatile Long expectedUpdateId = null; + private Long expectedUpdateId = null; @Override public TFatePartitions getPartitions(TInfo tinfo, TCredentials credentials) @@ -89,14 +89,20 @@ public TFatePartitions getPartitions(TInfo tinfo, TCredentials credentials) // generate a new one time use update id long updateId = LazySingletons.RANDOM.get().nextLong(); - // invalidate any outstanding updates and set the new update id - expectedUpdateId = updateId; + // Getting the partitions and setting the new update id must be mutually exclusive with any + // updates of the partitions concurrently executing. This ensures the new update id goes with + // the current partitions returned. + synchronized (this) { + // invalidate any queued partitions update that have not executed yet and set the new update + // id + expectedUpdateId = updateId; - if (localFate == null) { - return new TFatePartitions(updateId, List.of()); - } else { - return new TFatePartitions(updateId, - localFate.getPartitions().stream().map(FatePartition::toThrift).toList()); + if (localFate == null) { + return new TFatePartitions(updateId, List.of()); + } else { + return new TFatePartitions(updateId, + localFate.getPartitions().stream().map(FatePartition::toThrift).toList()); + } } } @@ -120,7 +126,8 @@ public boolean setPartitions(TInfo tinfo, TCredentials credentials, long updateI return true; } } - log.debug("Did not change partitions to {} expectedUpdateId:{} updateId:{}", desired, expectedUpdateId, updateId); + log.debug("Did not change partitions to {} expectedUpdateId:{} updateId:{}", desired, + expectedUpdateId, updateId); return false; } } From 0dd14703eb361c5279058985d2a8d5d1ac5af159 Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Thu, 12 Feb 2026 18:53:07 +0000 Subject: [PATCH 18/38] WIP --- .../core/client/admin/servers/ServerId.java | 2 +- .../apache/accumulo/core/conf/Property.java | 2 + .../org/apache/accumulo/core/fate/Fate.java | 2 + .../accumulo/server/AbstractServer.java | 2 +- .../server/rpc/ThriftProcessorTypes.java | 10 +- .../accumulo/manager/ManagerWorker.java | 129 ++++++++---------- .../accumulo/manager/fate/FateWorker.java | 21 ++- 7 files changed, 76 insertions(+), 92 deletions(-) diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/servers/ServerId.java b/core/src/main/java/org/apache/accumulo/core/client/admin/servers/ServerId.java index 9125e188d25..63c375d2fac 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/admin/servers/ServerId.java +++ b/core/src/main/java/org/apache/accumulo/core/client/admin/servers/ServerId.java @@ -38,7 +38,7 @@ public final class ServerId implements Comparable { * @since 4.0.0 */ public enum Type { - MANAGER, MONITOR, GARBAGE_COLLECTOR, COMPACTOR, SCAN_SERVER, TABLET_SERVER, MANAGER_WORKER; + MANAGER, MONITOR, GARBAGE_COLLECTOR, COMPACTOR, SCAN_SERVER, TABLET_SERVER, MANAGER_ASSISTANT; } private final Type type; diff --git a/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/core/src/main/java/org/apache/accumulo/core/conf/Property.java index dd311d2eda0..4507aa39d60 100644 --- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java +++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java @@ -401,6 +401,8 @@ was changed and it now can accept multiple class names. The metrics spi was intr "Properties in this category affect the behavior of the manager server.", "2.1.0"), MANAGER_CLIENTPORT("manager.port.client", "9999", PropertyType.PORT, "The port used for handling client connections on the manager.", "1.3.5"), + MANAGER_ASSISTANTPORT("manager.port.assistant", "9994", PropertyType.PORT, + "The port used by the primary manager to assign task to all manager processes.", "4.0.0"), MANAGER_TABLET_BALANCER("manager.tablet.balancer", "org.apache.accumulo.core.spi.balancer.TableLoadBalancer", PropertyType.CLASSNAME, "The balancer class that accumulo will use to make tablet assignment and " diff --git a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java index e0815937db2..95ba495beeb 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java @@ -276,6 +276,8 @@ public Fate(T environment, FateStore store, boolean runDeadResCleaner, ScheduledExecutorService deadResCleanerExecutor = null; if (runDeadResCleaner) { + // TODO make this use partitions + // Create a dead reservation cleaner for this store that will periodically clean up // reservations held by dead processes, if they exist. deadResCleanerExecutor = ThreadPools.getServerThreadPools().createScheduledExecutorService(1, diff --git a/server/base/src/main/java/org/apache/accumulo/server/AbstractServer.java b/server/base/src/main/java/org/apache/accumulo/server/AbstractServer.java index 2b65dc6b0ac..899ea466695 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/AbstractServer.java +++ b/server/base/src/main/java/org/apache/accumulo/server/AbstractServer.java @@ -174,7 +174,7 @@ protected AbstractServer(ServerId.Type serverType, ConfigOpts opts, case TABLET_SERVER: metricSource = MetricSource.TABLET_SERVER; break; - case MANAGER_WORKER: + case MANAGER_ASSISTANT: // TODO create a new source? metricSource = MetricSource.MANAGER; break; diff --git a/server/base/src/main/java/org/apache/accumulo/server/rpc/ThriftProcessorTypes.java b/server/base/src/main/java/org/apache/accumulo/server/rpc/ThriftProcessorTypes.java index 86a5a77dc96..975c7ca3fdb 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/rpc/ThriftProcessorTypes.java +++ b/server/base/src/main/java/org/apache/accumulo/server/rpc/ThriftProcessorTypes.java @@ -147,15 +147,9 @@ public static TMultiplexedProcessor getManagerTProcessor( return muxProcessor; } - public static TMultiplexedProcessor getManagerWorkerTProcessor( - ServerProcessService.Iface processHandler, ClientServiceHandler clientHandler, - FateWorkerService.Iface fateWorkerHandler, ServerContext context) { + public static TMultiplexedProcessor + getManagerWorkerTProcessor(FateWorkerService.Iface fateWorkerHandler, ServerContext context) { TMultiplexedProcessor muxProcessor = new TMultiplexedProcessor(); - muxProcessor.registerProcessor(CLIENT.getServiceName(), CLIENT.getTProcessor( - ClientService.Processor.class, ClientService.Iface.class, clientHandler, context)); - muxProcessor.registerProcessor(SERVER_PROCESS.getServiceName(), - SERVER_PROCESS.getTProcessor(ServerProcessService.Processor.class, - ServerProcessService.Iface.class, processHandler, context)); muxProcessor.registerProcessor(FATE_WORKER.getServiceName(), FATE_WORKER.getTProcessor(FateWorkerService.Processor.class, FateWorkerService.Iface.class, fateWorkerHandler, context)); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java index 84ab1f397d3..9fecefa7652 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java @@ -24,9 +24,9 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; -import org.apache.accumulo.core.cli.ConfigOpts; import org.apache.accumulo.core.client.admin.servers.ServerId; import org.apache.accumulo.core.conf.Property; +import org.apache.accumulo.core.data.ResourceGroupId; import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter; import org.apache.accumulo.core.fate.zookeeper.ZooUtil; import org.apache.accumulo.core.lock.ServiceLock; @@ -34,60 +34,77 @@ import org.apache.accumulo.core.lock.ServiceLockPaths; import org.apache.accumulo.core.lock.ServiceLockSupport; import org.apache.accumulo.manager.fate.FateWorker; -import org.apache.accumulo.server.AbstractServer; import org.apache.accumulo.server.ServerContext; -import org.apache.accumulo.server.client.ClientServiceHandler; +import org.apache.accumulo.server.rpc.ServerAddress; import org.apache.accumulo.server.rpc.TServerUtils; import org.apache.accumulo.server.rpc.ThriftProcessorTypes; -import org.apache.accumulo.server.security.SecurityUtil; import org.apache.thrift.TProcessor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.net.HostAndPort; + /** * An assistant to the manager */ -public class ManagerWorker extends AbstractServer { +// TODO because this does not extend abstract server it does not get some of the benefits like +// monitoring of lock +public class ManagerWorker /* extends AbstractServer */ { private static final Logger log = LoggerFactory.getLogger(ManagerWorker.class); + private final ServerContext context; + private final String bindAddress; private volatile ServiceLock managerWorkerLock; private FateWorker fateWorker; + private volatile ServerAddress thriftServer; - protected ManagerWorker(ConfigOpts opts, String[] args) { - super(ServerId.Type.MANAGER_WORKER, opts, ServerContext::new, args); + protected ManagerWorker(ServerContext context, String bindAddress) { + this.context = context; + this.bindAddress = bindAddress; } - protected void startClientService() throws UnknownHostException { + public ServerContext getContext() { + return context; + } - fateWorker = new FateWorker(getContext(), this::getLock); + private ResourceGroupId getResourceGroup() { + return ResourceGroupId.DEFAULT; + } + + private HostAndPort startClientService() throws UnknownHostException { + fateWorker = new FateWorker(getContext()); // This class implements TabletClientService.Iface and then delegates calls. Be sure // to set up the ThriftProcessor using this class, not the delegate. - ClientServiceHandler clientHandler = new ClientServiceHandler(getContext()); - TProcessor processor = ThriftProcessorTypes.getManagerWorkerTProcessor(this, clientHandler, - fateWorker, getContext()); - - // TODO using scan server props - updateThriftServer(() -> { - return TServerUtils.createThriftServer(getContext(), getBindAddress(), - Property.SSERV_CLIENTPORT, processor, this.getClass().getSimpleName(), - Property.SSERV_PORTSEARCH, Property.SSERV_MINTHREADS, Property.SSERV_MINTHREADS_TIMEOUT, - Property.SSERV_THREADCHECK); - }, true); + TProcessor processor = + ThriftProcessorTypes.getManagerWorkerTProcessor(fateWorker, getContext()); + + // TODO should the minthreads and timeout have their own props? Probably, do not expect this to + // have lots of RPCs so could be less. + var thriftServer = + TServerUtils.createThriftServer(getContext(), bindAddress, Property.MANAGER_ASSISTANTPORT, + processor, this.getClass().getSimpleName(), null, Property.MANAGER_MINTHREADS, + Property.MANAGER_MINTHREADS_TIMEOUT, Property.MANAGER_THREADCHECK); + thriftServer.startThriftServer("Thrift Manager Assistant Server"); + log.info("Starting {} Thrift server, listening on {}", this.getClass().getSimpleName(), + thriftServer.address); + return thriftServer.address; } - private ServiceLock announceExistence() { + private void announceExistence(HostAndPort advertiseAddress) { final ZooReaderWriter zoo = getContext().getZooSession().asReaderWriter(); try { final ServiceLockPaths.ServiceLockPath zLockPath = getContext().getServerPaths() - .createManagerWorkerPath(getResourceGroup(), getAdvertiseAddress()); - ServiceLockSupport.createNonHaServiceLockPath(ServerId.Type.MANAGER_WORKER, zoo, zLockPath); + .createManagerWorkerPath(getResourceGroup(), advertiseAddress); + ServiceLockSupport.createNonHaServiceLockPath(ServerId.Type.MANAGER_ASSISTANT, zoo, + zLockPath); var serverLockUUID = UUID.randomUUID(); managerWorkerLock = new ServiceLock(getContext().getZooSession(), zLockPath, serverLockUUID); + // TODO shutdown supplier, anything to do here? ServiceLock.LockWatcher lw = new ServiceLockSupport.ServiceLockWatcher( - ServerId.Type.MANAGER_WORKER, () -> getShutdownComplete().get(), - (type) -> getContext().getLowMemoryDetector().logGCInfo(getConfiguration())); + ServerId.Type.MANAGER_ASSISTANT, () -> false, + (type) -> getContext().getLowMemoryDetector().logGCInfo(getContext().getConfiguration())); for (int i = 0; i < 120 / 5; i++) { zoo.putPersistentData(zLockPath.toString(), new byte[0], ZooUtil.NodeExistsPolicy.SKIP); @@ -96,72 +113,42 @@ private ServiceLock announceExistence() { for (ServiceLockData.ThriftService svc : new ServiceLockData.ThriftService[] { ServiceLockData.ThriftService.CLIENT, ServiceLockData.ThriftService.FATE_WORKER}) { descriptors.addService(new ServiceLockData.ServiceDescriptor(serverLockUUID, svc, - getAdvertiseAddress().toString(), this.getResourceGroup())); + advertiseAddress.toString(), this.getResourceGroup())); } if (managerWorkerLock.tryLock(lw, new ServiceLockData(descriptors))) { - log.debug("Obtained scan server lock {}", managerWorkerLock.getLockPath()); - return managerWorkerLock; + log.debug("Obtained manager assistant lock {}", managerWorkerLock.getLockPath()); + return; } - log.info("Waiting for manager worker lock"); + log.info("Waiting for manager assistant lock"); sleepUninterruptibly(5, TimeUnit.SECONDS); } String msg = "Too many retries, exiting."; log.info(msg); throw new RuntimeException(msg); } catch (Exception e) { - log.info("Could not obtain manager worker lock, exiting.", e); + log.info("Could not obtain manager assistant lock, exiting.", e); throw new RuntimeException(e); } } - @Override - public ServiceLock getLock() { - return managerWorkerLock; - } - - @Override - public void run() { - try { - waitForUpgrade(); - } catch (InterruptedException e) { - log.error("Interrupted while waiting for upgrade to complete, exiting..."); - System.exit(1); - } - - SecurityUtil.serverLogin(getConfiguration()); - - // TODO metrics - + public void start() { + HostAndPort advertiseAddress; try { - startClientService(); + advertiseAddress = startClientService(); } catch (UnknownHostException e1) { - throw new RuntimeException("Failed to start the manager worker client service", e1); + throw new RuntimeException("Failed to start the manager assistant client service", e1); } - ServiceLock lock = announceExistence(); - this.getContext().setServiceLock(lock); - fateWorker.setLock(lock); - - while (!isShutdownRequested()) { - if (Thread.currentThread().isInterrupted()) { - log.info("Server process thread has been interrupted, shutting down"); - break; - } - try { - Thread.sleep(1000); - // TODO update idle status - } catch (InterruptedException e) { - log.info("Interrupt Exception received, shutting down"); - gracefulShutdown(getContext().rpcCreds()); - } - } + announceExistence(advertiseAddress); + fateWorker.setLock(getLock()); + } - log.debug("Stopping Thrift Servers"); - getThriftServer().stop(); + public void stop() { + thriftServer.server.stop(); } - public static void main(String[] args) throws Exception { - AbstractServer.startServer(new ManagerWorker(new ConfigOpts(), args), log); + public ServiceLock getLock() { + return managerWorkerLock; } } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index 0f4f29fed2a..747a78f46ef 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -53,14 +53,13 @@ public class FateWorker implements FateWorkerService.Iface { private static final Logger log = LoggerFactory.getLogger(FateWorker.class); private final ServerContext context; private final AuditedSecurityOperation security; - private final Set currentPartitions; private volatile Fate fate; - public FateWorker(ServerContext ctx, Supplier serviceLockSupplier) { + public FateWorker(ServerContext ctx) { this.context = ctx; this.security = ctx.getSecurityOperation(); - this.currentPartitions = Collections.synchronizedSet(new HashSet<>()); this.fate = null; + // TODO fate metrics } public void setLock(ServiceLock lock) { @@ -70,7 +69,7 @@ public void setLock(ServiceLock lock) { new UserFateStore<>(context, SystemTables.FATE.tableName(), lock.getLockID(), isLockHeld); this.fate = new Fate<>(env, store, false, TraceRepo::toLogString, context.getConfiguration(), context.getScheduledExecutor()); - // TODO where will the 2 fate cleanup task run? + // TODO where will the 2 fate cleanup task run? Make dead reservation cleaner use partitions... cleanup can run in manager } @@ -114,21 +113,21 @@ public boolean setPartitions(TInfo tinfo, TCredentials credentials, long updateI SecurityErrorCode.PERMISSION_DENIED).asThriftException(); } + synchronized (this) { - if (expectedUpdateId != null && updateId == expectedUpdateId) { + var localFate = fate; + if (localFate != null && expectedUpdateId != null && updateId == expectedUpdateId) { // Set to null which makes it so that an update id can only be used once. expectedUpdateId = null; - var localFate = fate; - if (localFate != null) { var desiredSet = desired.stream().map(FatePartition::from).collect(Collectors.toSet()); var oldPartitions = localFate.setPartitions(desiredSet); log.info("Changed partitions from {} to {}", oldPartitions, desiredSet); return true; - } + }else { + log.debug("Did not change partitions to {} expectedUpdateId:{} updateId:{} localFate==null:{}", desired, + expectedUpdateId, updateId, localFate==null); + return false; } - log.debug("Did not change partitions to {} expectedUpdateId:{} updateId:{}", desired, - expectedUpdateId, updateId); - return false; } } } From 68fefef4662f34dcfac9c6015ca7c0cea0737b4a Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Fri, 13 Feb 2026 02:04:13 +0000 Subject: [PATCH 19/38] WIP --- .../apache/accumulo/core/conf/Property.java | 5 +- .../accumulo/core/fate/AbstractFateStore.java | 11 +- .../org/apache/accumulo/core/fate/Fate.java | 36 +- .../accumulo/core/fate/FatePartition.java | 4 +- .../apache/accumulo/core/fate/FateStore.java | 13 +- .../accumulo/core/fate/ReadOnlyFateStore.java | 2 +- .../core/fate/user/UserFateStore.java | 8 +- .../core/fate/zookeeper/MetaFateStore.java | 4 +- .../accumulo/core/logging/FateLogger.java | 23 +- .../core/fate/thrift/FateWorkerService.java | 783 ++++++++++++++++++ .../manager/thrift/ManagerClientService.java | 619 ++++++++++++++ core/src/main/thrift/fate-worker.thrift | 8 +- core/src/main/thrift/manager.thrift | 5 + .../apache/accumulo/core/fate/TestStore.java | 7 +- .../org/apache/accumulo/manager/Manager.java | 24 +- ...nagerWorker.java => ManagerAssistant.java} | 20 +- .../manager/ManagerClientServiceHandler.java | 10 + .../accumulo/manager/fate/FateManager.java | 116 ++- .../accumulo/manager/fate/FateWorker.java | 38 +- .../accumulo/manager/fate/FateWorkerEnv.java | 114 ++- .../accumulo/test/MultipleManagerIT.java | 26 +- .../test/fate/MultipleStoresITBase.java | 18 +- 22 files changed, 1779 insertions(+), 115 deletions(-) rename server/manager/src/main/java/org/apache/accumulo/manager/{ManagerWorker.java => ManagerAssistant.java} (88%) diff --git a/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/core/src/main/java/org/apache/accumulo/core/conf/Property.java index 4507aa39d60..94a9a75257e 100644 --- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java +++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java @@ -401,8 +401,11 @@ was changed and it now can accept multiple class names. The metrics spi was intr "Properties in this category affect the behavior of the manager server.", "2.1.0"), MANAGER_CLIENTPORT("manager.port.client", "9999", PropertyType.PORT, "The port used for handling client connections on the manager.", "1.3.5"), - MANAGER_ASSISTANTPORT("manager.port.assistant", "9994", PropertyType.PORT, + MANAGER_ASSISTANT_PORT("manager.assistant.port", "10000", PropertyType.PORT, "The port used by the primary manager to assign task to all manager processes.", "4.0.0"), + MANAGER_ASSISTANT_PORTSEARCH("manager.assistant.port.search", "true", PropertyType.BOOLEAN, + "if the manager.assistant.port ports are in use, search higher ports until one is available.", + "4.0.0"), MANAGER_TABLET_BALANCER("manager.tablet.balancer", "org.apache.accumulo.core.spi.balancer.TableLoadBalancer", PropertyType.CLASSNAME, "The balancer class that accumulo will use to make tablet assignment and " diff --git a/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java b/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java index f0d3ac8500b..3a7b30c76fb 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java @@ -205,6 +205,8 @@ public void runnable(Set partitions, BooleanSupplier keepWaiting, if (beforeCount == unreservedRunnableCount.getCount()) { long waitTime = 5000; synchronized (deferred) { + deferred.keySet().removeIf( + fateId -> partitions.stream().noneMatch(partition -> partition.contains(fateId))); if (!deferred.isEmpty()) { waitTime = deferred.values().stream() .mapToLong(countDownTimer -> countDownTimer.timeLeft(TimeUnit.MILLISECONDS)).min() @@ -245,9 +247,10 @@ public ReadOnlyFateTxStore read(FateId fateId) { } @Override - public Map getActiveReservations() { - return list().filter(entry -> entry.getFateReservation().isPresent()).collect(Collectors - .toMap(FateIdStatus::getFateId, entry -> entry.getFateReservation().orElseThrow())); + public Map getActiveReservations(Set partitions) { + return getTransactions(partitions, EnumSet.allOf(TStatus.class)) + .filter(entry -> entry.getFateReservation().isPresent()).collect(Collectors + .toMap(FateIdStatus::getFateId, entry -> entry.getFateReservation().orElseThrow())); } protected boolean isRunnable(TStatus status) { @@ -426,7 +429,7 @@ public interface FateIdGenerator { FateId newRandomId(FateInstanceType instanceType); } - protected void seededTx() { + public void seeded() { unreservedRunnableCount.increment(); } diff --git a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java index 95ba495beeb..c283d5b2702 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java @@ -47,6 +47,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -60,6 +62,7 @@ import org.apache.accumulo.core.manager.thrift.TFateOperation; import org.apache.accumulo.core.util.UtilWaitThread; import org.apache.accumulo.core.util.threads.ThreadPools; +import org.apache.hadoop.util.Sets; import org.apache.thrift.TApplicationException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -252,7 +255,11 @@ private class DeadReservationCleaner implements Runnable { @Override public void run() { if (keepRunning.get()) { - store.deleteDeadReservations(); + Set partitions; + synchronized (fateExecutors) { + partitions = currentPartitions; + } + store.deleteDeadReservations(partitions); } } } @@ -394,15 +401,27 @@ public FateId startTransaction() { return store.create(); } + private AtomicReference> seedingConsumer = new AtomicReference<>(fid -> {}); + + // TODO move seeding and waiting operation into their own class, the primary manager will not need + // to create a user fate object. Fate could extend this class to ease the change. + + public void setSeedingConsumer(Consumer seedingConsumer) { + this.seedingConsumer.set(seedingConsumer); + } + public Seeder beginSeeding() { + // TODO pass seeding consumer return store.beginSeeding(); } public void seedTransaction(FateOperation fateOp, FateKey fateKey, Repo repo, boolean autoCleanUp) { try (var seeder = store.beginSeeding()) { - @SuppressWarnings("unused") - var unused = seeder.attemptToSeedTransaction(fateOp, fateKey, repo, autoCleanUp); + seeder.attemptToSeedTransaction(fateOp, fateKey, repo, autoCleanUp) + .thenAccept(optionalFatId -> { + optionalFatId.ifPresent(seedingConsumer.get()); + }); } } @@ -412,6 +431,17 @@ public void seedTransaction(FateOperation fateOp, FateId fateId, Repo repo, boolean autoCleanUp, String goalMessage) { log.info("[{}] Seeding {} {} {}", store.type(), fateOp, fateId, goalMessage); store.seedTransaction(fateOp, fateId, repo, autoCleanUp); + seedingConsumer.get().accept(fateId); + } + + public void seeded(Set partitions) { + synchronized (fateExecutors) { + if (Sets.intersection(currentPartitions, partitions).isEmpty()) { + return; + } + } + + store.seeded(); } // check on the transaction diff --git a/core/src/main/java/org/apache/accumulo/core/fate/FatePartition.java b/core/src/main/java/org/apache/accumulo/core/fate/FatePartition.java index 2c1ca11a718..973c22030d6 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/FatePartition.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/FatePartition.java @@ -54,9 +54,9 @@ public boolean isEndInclusive() { public boolean contains(FateId fateId) { if (isEndInclusive()) { - return start.compareTo(fateId) >= 0 && end.compareTo(fateId) <= 0; + return fateId.compareTo(start) >= 0 && fateId.compareTo(end) <= 0; } else { - return start.compareTo(fateId) >= 0 && end.compareTo(fateId) < 0; + return fateId.compareTo(start) >= 0 && fateId.compareTo(end) < 0; } } diff --git a/core/src/main/java/org/apache/accumulo/core/fate/FateStore.java b/core/src/main/java/org/apache/accumulo/core/fate/FateStore.java index e28e7936421..9436cce05bd 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/FateStore.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/FateStore.java @@ -27,8 +27,10 @@ import java.util.Arrays; import java.util.Objects; import java.util.Optional; +import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletableFuture; +import java.util.function.BooleanSupplier; import org.apache.accumulo.core.fate.zookeeper.ZooUtil; import org.apache.hadoop.io.DataInputBuffer; @@ -151,8 +153,8 @@ interface FateTxStore extends ReadOnlyFateTxStore { * longer interact with it. * * @param deferTime time to keep this transaction from being returned by - * {@link #runnable(java.util.concurrent.atomic.AtomicBoolean, java.util.function.Consumer)}. - * Must be non-negative. + * {@link #runnable(Set, BooleanSupplier, java.util.function.Consumer)}. Must be + * non-negative. */ void unreserve(Duration deferTime); } @@ -248,7 +250,7 @@ public int hashCode() { * can no longer be worked on so their reservation should be deleted, so they can be picked up and * worked on again. */ - void deleteDeadReservations(); + void deleteDeadReservations(Set partitions); /** * Attempt to reserve the fate transaction. @@ -268,6 +270,11 @@ public int hashCode() { */ FateTxStore reserve(FateId fateId); + /** + * Notification that something in this store was seeded by another process. + */ + void seeded(); + @Override void close(); } diff --git a/core/src/main/java/org/apache/accumulo/core/fate/ReadOnlyFateStore.java b/core/src/main/java/org/apache/accumulo/core/fate/ReadOnlyFateStore.java index 4776fe3997b..ad7baae9bcc 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/ReadOnlyFateStore.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/ReadOnlyFateStore.java @@ -156,7 +156,7 @@ interface FateIdStatus { * @return a map of the current active reservations with the keys being the transaction that is * reserved and the value being the value stored to indicate the transaction is reserved. */ - Map getActiveReservations(); + Map getActiveReservations(Set partitions); /** * Finds all fate ops that are (IN_PROGRESS, SUBMITTED, or FAILED_IN_PROGRESS) and unreserved. Ids diff --git a/core/src/main/java/org/apache/accumulo/core/fate/user/UserFateStore.java b/core/src/main/java/org/apache/accumulo/core/fate/user/UserFateStore.java index 7b20caad884..158c2ce1aa2 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/user/UserFateStore.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/user/UserFateStore.java @@ -198,7 +198,7 @@ private boolean seedTransaction(Supplier> mutatorFactory, String var status = mutator.tryMutate(); if (status == FateMutator.Status.ACCEPTED) { // signal to the super class that a new fate transaction was seeded and is ready to run - seededTx(); + seeded(); log.trace("Attempt to seed {} returned {}", logId, status); return true; } else if (status == FateMutator.Status.REJECTED) { @@ -257,8 +257,8 @@ public Optional> tryReserve(FateId fateId) { } @Override - public void deleteDeadReservations() { - for (Entry activeRes : getActiveReservations().entrySet()) { + public void deleteDeadReservations(Set partitions) { + for (Entry activeRes : getActiveReservations(partitions).entrySet()) { FateId fateId = activeRes.getKey(); FateReservation reservation = activeRes.getValue(); if (!isLockHeld.test(reservation.getLockID())) { @@ -467,7 +467,7 @@ public void close() { var future = pending.get(fateId).getSecond(); switch (result.getValue()) { case ACCEPTED: - seededTx(); + seeded(); log.trace("Attempt to seed {} returned {}", fateId.canonical(), status); // Complete the future with the fatId and remove from pending future.complete(Optional.of(fateId)); diff --git a/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/MetaFateStore.java b/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/MetaFateStore.java index 7d7f9bd8852..ae85e3f7179 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/MetaFateStore.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/MetaFateStore.java @@ -280,8 +280,8 @@ public Optional> tryReserve(FateId fateId) { } @Override - public void deleteDeadReservations() { - for (Map.Entry entry : getActiveReservations().entrySet()) { + public void deleteDeadReservations(Set partitions) { + for (Map.Entry entry : getActiveReservations(partitions).entrySet()) { FateId fateId = entry.getKey(); FateReservation reservation = entry.getValue(); if (isLockHeld.test(reservation.getLockID())) { diff --git a/core/src/main/java/org/apache/accumulo/core/logging/FateLogger.java b/core/src/main/java/org/apache/accumulo/core/logging/FateLogger.java index 0d100844947..0fa5fec2968 100644 --- a/core/src/main/java/org/apache/accumulo/core/logging/FateLogger.java +++ b/core/src/main/java/org/apache/accumulo/core/logging/FateLogger.java @@ -115,6 +115,11 @@ public FateTxStore reserve(FateId fateId) { return new LoggingFateTxStore<>(store.reserve(fateId), toLogString, allowForceDel); } + @Override + public void seeded() { + store.seeded(); + } + @Override public Optional> tryReserve(FateId fateId) { return store.tryReserve(fateId) @@ -165,8 +170,8 @@ public Seeder beginSeeding() { public boolean seedTransaction(Fate.FateOperation fateOp, FateId fateId, Repo repo, boolean autoCleanUp) { boolean seeded = store.seedTransaction(fateOp, fateId, repo, autoCleanUp); - if (storeLog.isTraceEnabled()) { - storeLog.trace("{} {} {} {}", fateId, seeded ? "seeded" : "unable to seed", + if (storeLog.isDebugEnabled()) { + storeLog.debug("{} {} {} {}", fateId, seeded ? "seeded" : "unable to seed", toLogString.apply(repo), autoCleanUp); } return seeded; @@ -188,13 +193,13 @@ public boolean isDeferredOverflow() { } @Override - public Map getActiveReservations() { - return store.getActiveReservations(); + public Map getActiveReservations(Set partitions) { + return store.getActiveReservations(partitions); } @Override - public void deleteDeadReservations() { - store.deleteDeadReservations(); + public void deleteDeadReservations(Set partitions) { + store.deleteDeadReservations(partitions); } @Override @@ -220,12 +225,12 @@ public CompletableFuture> attemptToSeedTransaction(FateOperatio FateKey fateKey, Repo repo, boolean autoCleanUp) { var future = this.seeder.attemptToSeedTransaction(fateOp, fateKey, repo, autoCleanUp); return future.whenComplete((optional, throwable) -> { - if (storeLog.isTraceEnabled()) { + if (storeLog.isDebugEnabled()) { optional.ifPresentOrElse(fateId -> { - storeLog.trace("{} seeded {} {} {}", fateId, fateKey, toLogString.apply(repo), + storeLog.debug("{} seeded {} {} {}", fateId, fateKey, toLogString.apply(repo), autoCleanUp); }, () -> { - storeLog.trace("Possibly unable to seed {} {} {}", fateKey, toLogString.apply(repo), + storeLog.debug("Possibly unable to seed {} {} {}", fateKey, toLogString.apply(repo), autoCleanUp); }); } diff --git a/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/FateWorkerService.java b/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/FateWorkerService.java index 2ef0da2c4f3..2ad296dafb1 100644 --- a/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/FateWorkerService.java +++ b/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/FateWorkerService.java @@ -33,6 +33,8 @@ public interface Iface { public boolean setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, long updateId, java.util.List desired) throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException; + public void seeded(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List tpartitions) throws org.apache.thrift.TException; + } public interface AsyncIface { @@ -41,6 +43,8 @@ public interface AsyncIface { public void setPartitions(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, long updateId, java.util.List desired, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void seeded(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List tpartitions, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + } public static class Client extends org.apache.thrift.TServiceClient implements Iface { @@ -123,6 +127,21 @@ public boolean recv_setPartitions() throws org.apache.accumulo.core.clientImpl.t throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "setPartitions failed: unknown result"); } + @Override + public void seeded(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List tpartitions) throws org.apache.thrift.TException + { + send_seeded(tinfo, credentials, tpartitions); + } + + public void send_seeded(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List tpartitions) throws org.apache.thrift.TException + { + seeded_args args = new seeded_args(); + args.setTinfo(tinfo); + args.setCredentials(credentials); + args.setTpartitions(tpartitions); + sendBaseOneway("seeded", args); + } + } public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface { public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { @@ -224,6 +243,47 @@ public java.lang.Boolean getResult() throws org.apache.accumulo.core.clientImpl. } } + @Override + public void seeded(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List tpartitions, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + seeded_call method_call = new seeded_call(tinfo, credentials, tpartitions, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class seeded_call extends org.apache.thrift.async.TAsyncMethodCall { + private org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo; + private org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials; + private java.util.List tpartitions; + public seeded_call(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List tpartitions, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, true); + this.tinfo = tinfo; + this.credentials = credentials; + this.tpartitions = tpartitions; + } + + @Override + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("seeded", org.apache.thrift.protocol.TMessageType.ONEWAY, 0)); + seeded_args args = new seeded_args(); + args.setTinfo(tinfo); + args.setCredentials(credentials); + args.setTpartitions(tpartitions); + args.write(prot); + prot.writeMessageEnd(); + } + + @Override + public Void getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new java.lang.IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return null; + } + } + } public static class Processor extends org.apache.thrift.TBaseProcessor implements org.apache.thrift.TProcessor { @@ -239,6 +299,7 @@ protected Processor(I iface, java.util.Map java.util.Map> getProcessMap(java.util.Map> processMap) { processMap.put("getPartitions", new getPartitions()); processMap.put("setPartitions", new setPartitions()); + processMap.put("seeded", new seeded()); return processMap; } @@ -307,6 +368,33 @@ public setPartitions_result getResult(I iface, setPartitions_args args) throws o } } + public static class seeded extends org.apache.thrift.ProcessFunction { + public seeded() { + super("seeded"); + } + + @Override + public seeded_args getEmptyArgsInstance() { + return new seeded_args(); + } + + @Override + protected boolean isOneway() { + return true; + } + + @Override + protected boolean rethrowUnhandledExceptions() { + return false; + } + + @Override + public org.apache.thrift.TBase getResult(I iface, seeded_args args) throws org.apache.thrift.TException { + iface.seeded(args.tinfo, args.credentials, args.tpartitions); + return null; + } + } + } public static class AsyncProcessor extends org.apache.thrift.TBaseAsyncProcessor { @@ -322,6 +410,7 @@ protected AsyncProcessor(I iface, java.util.Map java.util.Map> getProcessMap(java.util.Map> processMap) { processMap.put("getPartitions", new getPartitions()); processMap.put("setPartitions", new setPartitions()); + processMap.put("seeded", new seeded()); return processMap; } @@ -468,6 +557,46 @@ public void start(I iface, setPartitions_args args, org.apache.thrift.async.Asyn } } + public static class seeded extends org.apache.thrift.AsyncProcessFunction { + public seeded() { + super("seeded"); + } + + @Override + public seeded_args getEmptyArgsInstance() { + return new seeded_args(); + } + + @Override + public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new org.apache.thrift.async.AsyncMethodCallback() { + @Override + public void onComplete(Void o) { + } + @Override + public void onError(java.lang.Exception e) { + if (e instanceof org.apache.thrift.transport.TTransportException) { + _LOGGER.error("TTransportException inside handler", e); + fb.close(); + } else { + _LOGGER.error("Exception inside oneway handler", e); + } + } + }; + } + + @Override + protected boolean isOneway() { + return true; + } + + @Override + public void start(I iface, seeded_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.seeded(args.tinfo, args.credentials, args.tpartitions,resultHandler); + } + } + } @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) @@ -2690,5 +2819,659 @@ private static S scheme(org.apache. } } + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) + public static class seeded_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("seeded_args"); + + private static final org.apache.thrift.protocol.TField TINFO_FIELD_DESC = new org.apache.thrift.protocol.TField("tinfo", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField CREDENTIALS_FIELD_DESC = new org.apache.thrift.protocol.TField("credentials", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField TPARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("tpartitions", org.apache.thrift.protocol.TType.LIST, (short)3); + + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new seeded_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new seeded_argsTupleSchemeFactory(); + + public @org.apache.thrift.annotation.Nullable org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo; // required + public @org.apache.thrift.annotation.Nullable org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials; // required + public @org.apache.thrift.annotation.Nullable java.util.List tpartitions; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TINFO((short)1, "tinfo"), + CREDENTIALS((short)2, "credentials"), + TPARTITIONS((short)3, "tpartitions"); + + private static final java.util.Map byName = new java.util.HashMap(); + + static { + for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TINFO + return TINFO; + case 2: // CREDENTIALS + return CREDENTIALS; + case 3: // TPARTITIONS + return TPARTITIONS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByName(java.lang.String name) { + return byName.get(name); + } + + private final short _thriftId; + private final java.lang.String _fieldName; + + _Fields(short thriftId, java.lang.String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + @Override + public short getThriftFieldId() { + return _thriftId; + } + + @Override + public java.lang.String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TINFO, new org.apache.thrift.meta_data.FieldMetaData("tinfo", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.core.clientImpl.thrift.TInfo.class))); + tmpMap.put(_Fields.CREDENTIALS, new org.apache.thrift.meta_data.FieldMetaData("credentials", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.core.securityImpl.thrift.TCredentials.class))); + tmpMap.put(_Fields.TPARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("tpartitions", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TFatePartition.class)))); + metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(seeded_args.class, metaDataMap); + } + + public seeded_args() { + } + + public seeded_args( + org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, + org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, + java.util.List tpartitions) + { + this(); + this.tinfo = tinfo; + this.credentials = credentials; + this.tpartitions = tpartitions; + } + + /** + * Performs a deep copy on other. + */ + public seeded_args(seeded_args other) { + if (other.isSetTinfo()) { + this.tinfo = new org.apache.accumulo.core.clientImpl.thrift.TInfo(other.tinfo); + } + if (other.isSetCredentials()) { + this.credentials = new org.apache.accumulo.core.securityImpl.thrift.TCredentials(other.credentials); + } + if (other.isSetTpartitions()) { + java.util.List __this__tpartitions = new java.util.ArrayList(other.tpartitions.size()); + for (TFatePartition other_element : other.tpartitions) { + __this__tpartitions.add(new TFatePartition(other_element)); + } + this.tpartitions = __this__tpartitions; + } + } + + @Override + public seeded_args deepCopy() { + return new seeded_args(this); + } + + @Override + public void clear() { + this.tinfo = null; + this.credentials = null; + this.tpartitions = null; + } + + @org.apache.thrift.annotation.Nullable + public org.apache.accumulo.core.clientImpl.thrift.TInfo getTinfo() { + return this.tinfo; + } + + public seeded_args setTinfo(@org.apache.thrift.annotation.Nullable org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo) { + this.tinfo = tinfo; + return this; + } + + public void unsetTinfo() { + this.tinfo = null; + } + + /** Returns true if field tinfo is set (has been assigned a value) and false otherwise */ + public boolean isSetTinfo() { + return this.tinfo != null; + } + + public void setTinfoIsSet(boolean value) { + if (!value) { + this.tinfo = null; + } + } + + @org.apache.thrift.annotation.Nullable + public org.apache.accumulo.core.securityImpl.thrift.TCredentials getCredentials() { + return this.credentials; + } + + public seeded_args setCredentials(@org.apache.thrift.annotation.Nullable org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) { + this.credentials = credentials; + return this; + } + + public void unsetCredentials() { + this.credentials = null; + } + + /** Returns true if field credentials is set (has been assigned a value) and false otherwise */ + public boolean isSetCredentials() { + return this.credentials != null; + } + + public void setCredentialsIsSet(boolean value) { + if (!value) { + this.credentials = null; + } + } + + public int getTpartitionsSize() { + return (this.tpartitions == null) ? 0 : this.tpartitions.size(); + } + + @org.apache.thrift.annotation.Nullable + public java.util.Iterator getTpartitionsIterator() { + return (this.tpartitions == null) ? null : this.tpartitions.iterator(); + } + + public void addToTpartitions(TFatePartition elem) { + if (this.tpartitions == null) { + this.tpartitions = new java.util.ArrayList(); + } + this.tpartitions.add(elem); + } + + @org.apache.thrift.annotation.Nullable + public java.util.List getTpartitions() { + return this.tpartitions; + } + + public seeded_args setTpartitions(@org.apache.thrift.annotation.Nullable java.util.List tpartitions) { + this.tpartitions = tpartitions; + return this; + } + + public void unsetTpartitions() { + this.tpartitions = null; + } + + /** Returns true if field tpartitions is set (has been assigned a value) and false otherwise */ + public boolean isSetTpartitions() { + return this.tpartitions != null; + } + + public void setTpartitionsIsSet(boolean value) { + if (!value) { + this.tpartitions = null; + } + } + + @Override + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { + switch (field) { + case TINFO: + if (value == null) { + unsetTinfo(); + } else { + setTinfo((org.apache.accumulo.core.clientImpl.thrift.TInfo)value); + } + break; + + case CREDENTIALS: + if (value == null) { + unsetCredentials(); + } else { + setCredentials((org.apache.accumulo.core.securityImpl.thrift.TCredentials)value); + } + break; + + case TPARTITIONS: + if (value == null) { + unsetTpartitions(); + } else { + setTpartitions((java.util.List)value); + } + break; + + } + } + + @org.apache.thrift.annotation.Nullable + @Override + public java.lang.Object getFieldValue(_Fields field) { + switch (field) { + case TINFO: + return getTinfo(); + + case CREDENTIALS: + return getCredentials(); + + case TPARTITIONS: + return getTpartitions(); + + } + throw new java.lang.IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + @Override + public boolean isSet(_Fields field) { + if (field == null) { + throw new java.lang.IllegalArgumentException(); + } + + switch (field) { + case TINFO: + return isSetTinfo(); + case CREDENTIALS: + return isSetCredentials(); + case TPARTITIONS: + return isSetTpartitions(); + } + throw new java.lang.IllegalStateException(); + } + + @Override + public boolean equals(java.lang.Object that) { + if (that instanceof seeded_args) + return this.equals((seeded_args)that); + return false; + } + + public boolean equals(seeded_args that) { + if (that == null) + return false; + if (this == that) + return true; + + boolean this_present_tinfo = true && this.isSetTinfo(); + boolean that_present_tinfo = true && that.isSetTinfo(); + if (this_present_tinfo || that_present_tinfo) { + if (!(this_present_tinfo && that_present_tinfo)) + return false; + if (!this.tinfo.equals(that.tinfo)) + return false; + } + + boolean this_present_credentials = true && this.isSetCredentials(); + boolean that_present_credentials = true && that.isSetCredentials(); + if (this_present_credentials || that_present_credentials) { + if (!(this_present_credentials && that_present_credentials)) + return false; + if (!this.credentials.equals(that.credentials)) + return false; + } + + boolean this_present_tpartitions = true && this.isSetTpartitions(); + boolean that_present_tpartitions = true && that.isSetTpartitions(); + if (this_present_tpartitions || that_present_tpartitions) { + if (!(this_present_tpartitions && that_present_tpartitions)) + return false; + if (!this.tpartitions.equals(that.tpartitions)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + int hashCode = 1; + + hashCode = hashCode * 8191 + ((isSetTinfo()) ? 131071 : 524287); + if (isSetTinfo()) + hashCode = hashCode * 8191 + tinfo.hashCode(); + + hashCode = hashCode * 8191 + ((isSetCredentials()) ? 131071 : 524287); + if (isSetCredentials()) + hashCode = hashCode * 8191 + credentials.hashCode(); + + hashCode = hashCode * 8191 + ((isSetTpartitions()) ? 131071 : 524287); + if (isSetTpartitions()) + hashCode = hashCode * 8191 + tpartitions.hashCode(); + + return hashCode; + } + + @Override + public int compareTo(seeded_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = java.lang.Boolean.compare(isSetTinfo(), other.isSetTinfo()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTinfo()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tinfo, other.tinfo); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = java.lang.Boolean.compare(isSetCredentials(), other.isSetCredentials()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCredentials()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.credentials, other.credentials); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = java.lang.Boolean.compare(isSetTpartitions(), other.isSetTpartitions()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTpartitions()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tpartitions, other.tpartitions); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + @org.apache.thrift.annotation.Nullable + @Override + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + scheme(iprot).read(iprot, this); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + scheme(oprot).write(oprot, this); + } + + @Override + public java.lang.String toString() { + java.lang.StringBuilder sb = new java.lang.StringBuilder("seeded_args("); + boolean first = true; + + sb.append("tinfo:"); + if (this.tinfo == null) { + sb.append("null"); + } else { + sb.append(this.tinfo); + } + first = false; + if (!first) sb.append(", "); + sb.append("credentials:"); + if (this.credentials == null) { + sb.append("null"); + } else { + sb.append(this.credentials); + } + first = false; + if (!first) sb.append(", "); + sb.append("tpartitions:"); + if (this.tpartitions == null) { + sb.append("null"); + } else { + sb.append(this.tpartitions); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (tinfo != null) { + tinfo.validate(); + } + if (credentials != null) { + credentials.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class seeded_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public seeded_argsStandardScheme getScheme() { + return new seeded_argsStandardScheme(); + } + } + + private static class seeded_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot, seeded_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TINFO + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.tinfo = new org.apache.accumulo.core.clientImpl.thrift.TInfo(); + struct.tinfo.read(iprot); + struct.setTinfoIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // CREDENTIALS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.credentials = new org.apache.accumulo.core.securityImpl.thrift.TCredentials(); + struct.credentials.read(iprot); + struct.setCredentialsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // TPARTITIONS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list16 = iprot.readListBegin(); + struct.tpartitions = new java.util.ArrayList(_list16.size); + @org.apache.thrift.annotation.Nullable TFatePartition _elem17; + for (int _i18 = 0; _i18 < _list16.size; ++_i18) + { + _elem17 = new TFatePartition(); + _elem17.read(iprot); + struct.tpartitions.add(_elem17); + } + iprot.readListEnd(); + } + struct.setTpartitionsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot, seeded_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.tinfo != null) { + oprot.writeFieldBegin(TINFO_FIELD_DESC); + struct.tinfo.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.credentials != null) { + oprot.writeFieldBegin(CREDENTIALS_FIELD_DESC); + struct.credentials.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.tpartitions != null) { + oprot.writeFieldBegin(TPARTITIONS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tpartitions.size())); + for (TFatePartition _iter19 : struct.tpartitions) + { + _iter19.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class seeded_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public seeded_argsTupleScheme getScheme() { + return new seeded_argsTupleScheme(); + } + } + + private static class seeded_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, seeded_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet optionals = new java.util.BitSet(); + if (struct.isSetTinfo()) { + optionals.set(0); + } + if (struct.isSetCredentials()) { + optionals.set(1); + } + if (struct.isSetTpartitions()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetTinfo()) { + struct.tinfo.write(oprot); + } + if (struct.isSetCredentials()) { + struct.credentials.write(oprot); + } + if (struct.isSetTpartitions()) { + { + oprot.writeI32(struct.tpartitions.size()); + for (TFatePartition _iter20 : struct.tpartitions) + { + _iter20.write(oprot); + } + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, seeded_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.tinfo = new org.apache.accumulo.core.clientImpl.thrift.TInfo(); + struct.tinfo.read(iprot); + struct.setTinfoIsSet(true); + } + if (incoming.get(1)) { + struct.credentials = new org.apache.accumulo.core.securityImpl.thrift.TCredentials(); + struct.credentials.read(iprot); + struct.setCredentialsIsSet(true); + } + if (incoming.get(2)) { + { + org.apache.thrift.protocol.TList _list21 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.tpartitions = new java.util.ArrayList(_list21.size); + @org.apache.thrift.annotation.Nullable TFatePartition _elem22; + for (int _i23 = 0; _i23 < _list21.size; ++_i23) + { + _elem22 = new TFatePartition(); + _elem22.read(iprot); + struct.tpartitions.add(_elem22); + } + } + struct.setTpartitionsIsSet(true); + } + } + } + + private static S scheme(org.apache.thrift.protocol.TProtocol proto) { + return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); + } + } + private static void unusedMethod() {} } diff --git a/core/src/main/thrift-gen-java/org/apache/accumulo/core/manager/thrift/ManagerClientService.java b/core/src/main/thrift-gen-java/org/apache/accumulo/core/manager/thrift/ManagerClientService.java index 9e845fbdcd3..c0726205929 100644 --- a/core/src/main/thrift-gen-java/org/apache/accumulo/core/manager/thrift/ManagerClientService.java +++ b/core/src/main/thrift-gen-java/org/apache/accumulo/core/manager/thrift/ManagerClientService.java @@ -85,6 +85,8 @@ public interface Iface { public long getManagerTimeNanos(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException; + public void event(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) throws org.apache.thrift.TException; + } public interface AsyncIface { @@ -145,6 +147,8 @@ public interface AsyncIface { public void getManagerTimeNanos(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void event(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + } public static class Client extends org.apache.thrift.TServiceClient implements Iface { @@ -1069,6 +1073,20 @@ public long recv_getManagerTimeNanos() throws org.apache.accumulo.core.clientImp throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getManagerTimeNanos failed: unknown result"); } + @Override + public void event(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) throws org.apache.thrift.TException + { + send_event(tinfo, credentials); + } + + public void send_event(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) throws org.apache.thrift.TException + { + event_args args = new event_args(); + args.setTinfo(tinfo); + args.setCredentials(credentials); + sendBaseOneway("event", args); + } + } public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface { public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { @@ -2311,6 +2329,44 @@ public java.lang.Long getResult() throws org.apache.accumulo.core.clientImpl.thr } } + @Override + public void event(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + event_call method_call = new event_call(tinfo, credentials, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class event_call extends org.apache.thrift.async.TAsyncMethodCall { + private org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo; + private org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials; + public event_call(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, true); + this.tinfo = tinfo; + this.credentials = credentials; + } + + @Override + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("event", org.apache.thrift.protocol.TMessageType.ONEWAY, 0)); + event_args args = new event_args(); + args.setTinfo(tinfo); + args.setCredentials(credentials); + args.write(prot); + prot.writeMessageEnd(); + } + + @Override + public Void getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new java.lang.IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return null; + } + } + } public static class Processor extends org.apache.thrift.TBaseProcessor implements org.apache.thrift.TProcessor { @@ -2352,6 +2408,7 @@ protected Processor(I iface, java.util.Map extends org.apache.thrift.ProcessFunction { + public event() { + super("event"); + } + + @Override + public event_args getEmptyArgsInstance() { + return new event_args(); + } + + @Override + protected boolean isOneway() { + return true; + } + + @Override + protected boolean rethrowUnhandledExceptions() { + return false; + } + + @Override + public org.apache.thrift.TBase getResult(I iface, event_args args) throws org.apache.thrift.TException { + iface.event(args.tinfo, args.credentials); + return null; + } + } + } public static class AsyncProcessor extends org.apache.thrift.TBaseAsyncProcessor { @@ -3387,6 +3471,7 @@ protected AsyncProcessor(I iface, java.util.Map extends org.apache.thrift.AsyncProcessFunction { + public event() { + super("event"); + } + + @Override + public event_args getEmptyArgsInstance() { + return new event_args(); + } + + @Override + public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new org.apache.thrift.async.AsyncMethodCallback() { + @Override + public void onComplete(Void o) { + } + @Override + public void onError(java.lang.Exception e) { + if (e instanceof org.apache.thrift.transport.TTransportException) { + _LOGGER.error("TTransportException inside handler", e); + fb.close(); + } else { + _LOGGER.error("Exception inside oneway handler", e); + } + } + }; + } + + @Override + protected boolean isOneway() { + return true; + } + + @Override + public void start(I iface, event_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.event(args.tinfo, args.credentials,resultHandler); + } + } + } @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) @@ -40611,5 +40736,499 @@ private static S scheme(org.apache. } } + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) + public static class event_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("event_args"); + + private static final org.apache.thrift.protocol.TField TINFO_FIELD_DESC = new org.apache.thrift.protocol.TField("tinfo", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField CREDENTIALS_FIELD_DESC = new org.apache.thrift.protocol.TField("credentials", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new event_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new event_argsTupleSchemeFactory(); + + public @org.apache.thrift.annotation.Nullable org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo; // required + public @org.apache.thrift.annotation.Nullable org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TINFO((short)1, "tinfo"), + CREDENTIALS((short)2, "credentials"); + + private static final java.util.Map byName = new java.util.HashMap(); + + static { + for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TINFO + return TINFO; + case 2: // CREDENTIALS + return CREDENTIALS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByName(java.lang.String name) { + return byName.get(name); + } + + private final short _thriftId; + private final java.lang.String _fieldName; + + _Fields(short thriftId, java.lang.String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + @Override + public short getThriftFieldId() { + return _thriftId; + } + + @Override + public java.lang.String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TINFO, new org.apache.thrift.meta_data.FieldMetaData("tinfo", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.core.clientImpl.thrift.TInfo.class))); + tmpMap.put(_Fields.CREDENTIALS, new org.apache.thrift.meta_data.FieldMetaData("credentials", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.core.securityImpl.thrift.TCredentials.class))); + metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(event_args.class, metaDataMap); + } + + public event_args() { + } + + public event_args( + org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, + org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) + { + this(); + this.tinfo = tinfo; + this.credentials = credentials; + } + + /** + * Performs a deep copy on other. + */ + public event_args(event_args other) { + if (other.isSetTinfo()) { + this.tinfo = new org.apache.accumulo.core.clientImpl.thrift.TInfo(other.tinfo); + } + if (other.isSetCredentials()) { + this.credentials = new org.apache.accumulo.core.securityImpl.thrift.TCredentials(other.credentials); + } + } + + @Override + public event_args deepCopy() { + return new event_args(this); + } + + @Override + public void clear() { + this.tinfo = null; + this.credentials = null; + } + + @org.apache.thrift.annotation.Nullable + public org.apache.accumulo.core.clientImpl.thrift.TInfo getTinfo() { + return this.tinfo; + } + + public event_args setTinfo(@org.apache.thrift.annotation.Nullable org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo) { + this.tinfo = tinfo; + return this; + } + + public void unsetTinfo() { + this.tinfo = null; + } + + /** Returns true if field tinfo is set (has been assigned a value) and false otherwise */ + public boolean isSetTinfo() { + return this.tinfo != null; + } + + public void setTinfoIsSet(boolean value) { + if (!value) { + this.tinfo = null; + } + } + + @org.apache.thrift.annotation.Nullable + public org.apache.accumulo.core.securityImpl.thrift.TCredentials getCredentials() { + return this.credentials; + } + + public event_args setCredentials(@org.apache.thrift.annotation.Nullable org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) { + this.credentials = credentials; + return this; + } + + public void unsetCredentials() { + this.credentials = null; + } + + /** Returns true if field credentials is set (has been assigned a value) and false otherwise */ + public boolean isSetCredentials() { + return this.credentials != null; + } + + public void setCredentialsIsSet(boolean value) { + if (!value) { + this.credentials = null; + } + } + + @Override + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { + switch (field) { + case TINFO: + if (value == null) { + unsetTinfo(); + } else { + setTinfo((org.apache.accumulo.core.clientImpl.thrift.TInfo)value); + } + break; + + case CREDENTIALS: + if (value == null) { + unsetCredentials(); + } else { + setCredentials((org.apache.accumulo.core.securityImpl.thrift.TCredentials)value); + } + break; + + } + } + + @org.apache.thrift.annotation.Nullable + @Override + public java.lang.Object getFieldValue(_Fields field) { + switch (field) { + case TINFO: + return getTinfo(); + + case CREDENTIALS: + return getCredentials(); + + } + throw new java.lang.IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + @Override + public boolean isSet(_Fields field) { + if (field == null) { + throw new java.lang.IllegalArgumentException(); + } + + switch (field) { + case TINFO: + return isSetTinfo(); + case CREDENTIALS: + return isSetCredentials(); + } + throw new java.lang.IllegalStateException(); + } + + @Override + public boolean equals(java.lang.Object that) { + if (that instanceof event_args) + return this.equals((event_args)that); + return false; + } + + public boolean equals(event_args that) { + if (that == null) + return false; + if (this == that) + return true; + + boolean this_present_tinfo = true && this.isSetTinfo(); + boolean that_present_tinfo = true && that.isSetTinfo(); + if (this_present_tinfo || that_present_tinfo) { + if (!(this_present_tinfo && that_present_tinfo)) + return false; + if (!this.tinfo.equals(that.tinfo)) + return false; + } + + boolean this_present_credentials = true && this.isSetCredentials(); + boolean that_present_credentials = true && that.isSetCredentials(); + if (this_present_credentials || that_present_credentials) { + if (!(this_present_credentials && that_present_credentials)) + return false; + if (!this.credentials.equals(that.credentials)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + int hashCode = 1; + + hashCode = hashCode * 8191 + ((isSetTinfo()) ? 131071 : 524287); + if (isSetTinfo()) + hashCode = hashCode * 8191 + tinfo.hashCode(); + + hashCode = hashCode * 8191 + ((isSetCredentials()) ? 131071 : 524287); + if (isSetCredentials()) + hashCode = hashCode * 8191 + credentials.hashCode(); + + return hashCode; + } + + @Override + public int compareTo(event_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = java.lang.Boolean.compare(isSetTinfo(), other.isSetTinfo()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTinfo()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tinfo, other.tinfo); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = java.lang.Boolean.compare(isSetCredentials(), other.isSetCredentials()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCredentials()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.credentials, other.credentials); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + @org.apache.thrift.annotation.Nullable + @Override + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + scheme(iprot).read(iprot, this); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + scheme(oprot).write(oprot, this); + } + + @Override + public java.lang.String toString() { + java.lang.StringBuilder sb = new java.lang.StringBuilder("event_args("); + boolean first = true; + + sb.append("tinfo:"); + if (this.tinfo == null) { + sb.append("null"); + } else { + sb.append(this.tinfo); + } + first = false; + if (!first) sb.append(", "); + sb.append("credentials:"); + if (this.credentials == null) { + sb.append("null"); + } else { + sb.append(this.credentials); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (tinfo != null) { + tinfo.validate(); + } + if (credentials != null) { + credentials.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class event_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public event_argsStandardScheme getScheme() { + return new event_argsStandardScheme(); + } + } + + private static class event_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot, event_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TINFO + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.tinfo = new org.apache.accumulo.core.clientImpl.thrift.TInfo(); + struct.tinfo.read(iprot); + struct.setTinfoIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // CREDENTIALS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.credentials = new org.apache.accumulo.core.securityImpl.thrift.TCredentials(); + struct.credentials.read(iprot); + struct.setCredentialsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot, event_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.tinfo != null) { + oprot.writeFieldBegin(TINFO_FIELD_DESC); + struct.tinfo.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.credentials != null) { + oprot.writeFieldBegin(CREDENTIALS_FIELD_DESC); + struct.credentials.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class event_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public event_argsTupleScheme getScheme() { + return new event_argsTupleScheme(); + } + } + + private static class event_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, event_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet optionals = new java.util.BitSet(); + if (struct.isSetTinfo()) { + optionals.set(0); + } + if (struct.isSetCredentials()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetTinfo()) { + struct.tinfo.write(oprot); + } + if (struct.isSetCredentials()) { + struct.credentials.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, event_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.tinfo = new org.apache.accumulo.core.clientImpl.thrift.TInfo(); + struct.tinfo.read(iprot); + struct.setTinfoIsSet(true); + } + if (incoming.get(1)) { + struct.credentials = new org.apache.accumulo.core.securityImpl.thrift.TCredentials(); + struct.credentials.read(iprot); + struct.setCredentialsIsSet(true); + } + } + } + + private static S scheme(org.apache.thrift.protocol.TProtocol proto) { + return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); + } + } + private static void unusedMethod() {} } diff --git a/core/src/main/thrift/fate-worker.thrift b/core/src/main/thrift/fate-worker.thrift index 91c23aeb6b9..4f0d7f87a22 100644 --- a/core/src/main/thrift/fate-worker.thrift +++ b/core/src/main/thrift/fate-worker.thrift @@ -49,4 +49,10 @@ service FateWorkerService { ) throws ( 1:client.ThriftSecurityException sec ) -} \ No newline at end of file + + oneway void seeded( + 1:client.TInfo tinfo, + 2:security.TCredentials credentials, + 3:list tpartitions + ) +} diff --git a/core/src/main/thrift/manager.thrift b/core/src/main/thrift/manager.thrift index 436d365e979..ca830ef964d 100644 --- a/core/src/main/thrift/manager.thrift +++ b/core/src/main/thrift/manager.thrift @@ -537,4 +537,9 @@ service ManagerClientService { ) throws ( 1:client.ThriftSecurityException sec ) + + oneway void event( + 1:client.TInfo tinfo + 2:security.TCredentials credentials + ) } diff --git a/core/src/test/java/org/apache/accumulo/core/fate/TestStore.java b/core/src/test/java/org/apache/accumulo/core/fate/TestStore.java index 7fd4d5157e4..ff8594d133d 100644 --- a/core/src/test/java/org/apache/accumulo/core/fate/TestStore.java +++ b/core/src/test/java/org/apache/accumulo/core/fate/TestStore.java @@ -85,6 +85,9 @@ public FateTxStore reserve(FateId fateId) { return new TestFateTxStore(fateId); } + @Override + public void seeded() {} + @Override public Optional> tryReserve(FateId fateId) { synchronized (this) { @@ -97,13 +100,13 @@ public Optional> tryReserve(FateId fateId) { } @Override - public Map getActiveReservations() { + public Map getActiveReservations(Set partitions) { // This method only makes sense for the FateStores that don't store their reservations in memory throw new UnsupportedOperationException(); } @Override - public void deleteDeadReservations() { + public void deleteDeadReservations(Set partitions) { // This method only makes sense for the FateStores that don't store their reservations in memory throw new UnsupportedOperationException(); } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java index b5f73a2f722..4f93def03e8 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java @@ -109,6 +109,7 @@ import org.apache.accumulo.core.util.time.SteadyTime; import org.apache.accumulo.core.zookeeper.ZcStat; import org.apache.accumulo.manager.compaction.coordinator.CompactionCoordinator; +import org.apache.accumulo.manager.fate.FateManager; import org.apache.accumulo.manager.merge.FindMergeableRangeTask; import org.apache.accumulo.manager.metrics.ManagerMetrics; import org.apache.accumulo.manager.recovery.RecoveryManager; @@ -943,6 +944,11 @@ public void run() { throw new IllegalStateException("Unable to start server on host " + getBindAddress(), e); } + // TODO eventually stop this + // Start manager assistant before getting lock, this allows non primary manager processes to + // work on stuff. + new ManagerAssistant(getContext(), getBindAddress()).start(); + // block until we can obtain the ZK lock for the manager. Create the // initial lock using ThriftService.NONE. This will allow the lock // allocation to occur, but prevent any services from getting the @@ -1146,6 +1152,11 @@ boolean canSuspendTablets() { throw new IllegalStateException("Exception setting up FaTE cleanup thread", e); } + // TODO eventually stop this + var fateManager = new FateManager(getContext()); + fateManager.start(); + fate(FateInstanceType.USER).setSeedingConsumer(fateManager::notifySeeded); + producers.addAll(managerMetrics.getProducers(this)); metricsInfo.addMetricsProducers(producers.toArray(new MetricsProducer[0])); metricsInfo.init(MetricsInfo.serviceTags(getContext().getInstanceName(), getApplicationName(), @@ -1198,7 +1209,7 @@ boolean canSuspendTablets() { UUID uuid = sld.getServerUUID(ThriftService.NONE); ServiceDescriptors descriptors = new ServiceDescriptors(); for (ThriftService svc : new ThriftService[] {ThriftService.MANAGER, ThriftService.COORDINATOR, - ThriftService.FATE, ThriftService.FATE_WORKER}) { + ThriftService.FATE}) { descriptors.addService(new ServiceDescriptor(uuid, svc, getAdvertiseAddress().toString(), this.getResourceGroup())); } @@ -1286,8 +1297,15 @@ protected Fate initializeFateInstance(ServerContext context, FateStore< if (store.type() == FateInstanceType.META) { fateInstance.setPartitions(Set.of(FatePartition.all(FateInstanceType.META))); - } // else do not run user transactions for now in the manager... it will have an empty set of - // partitions + } else if (store.type() == FateInstanceType.USER) { + // Do not run user transactions for now in the manager... it will have an empty set of + // partitions. Ideally the primary manager would not need a fate instance, but it uses to seed + // work and wait for work. Would be best to pull these operations like seeding and waiting for + // work to an independent class. + fateInstance.setPartitions(Set.of()); + } else { + throw new IllegalStateException("Unknown fate type " + store.type()); + } return fateInstance; } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java similarity index 88% rename from server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java rename to server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java index 9fecefa7652..6ea89e1866a 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java @@ -49,17 +49,20 @@ */ // TODO because this does not extend abstract server it does not get some of the benefits like // monitoring of lock -public class ManagerWorker /* extends AbstractServer */ { +public class ManagerAssistant { - private static final Logger log = LoggerFactory.getLogger(ManagerWorker.class); + private static final Logger log = LoggerFactory.getLogger(ManagerAssistant.class); private final ServerContext context; private final String bindAddress; private volatile ServiceLock managerWorkerLock; private FateWorker fateWorker; private volatile ServerAddress thriftServer; - protected ManagerWorker(ServerContext context, String bindAddress) { - this.context = context; + protected ManagerAssistant(ServerContext context, String bindAddress) { + // create another server context because the server context has the lock... + // TODO creating another context instance in the process may cause problems, like duplicating + // some thread pools + this.context = new ServerContext(context.getSiteConfiguration()); this.bindAddress = bindAddress; } @@ -81,10 +84,10 @@ private HostAndPort startClientService() throws UnknownHostException { // TODO should the minthreads and timeout have their own props? Probably, do not expect this to // have lots of RPCs so could be less. - var thriftServer = - TServerUtils.createThriftServer(getContext(), bindAddress, Property.MANAGER_ASSISTANTPORT, - processor, this.getClass().getSimpleName(), null, Property.MANAGER_MINTHREADS, - Property.MANAGER_MINTHREADS_TIMEOUT, Property.MANAGER_THREADCHECK); + var thriftServer = TServerUtils.createThriftServer(getContext(), bindAddress, + Property.MANAGER_ASSISTANT_PORT, processor, this.getClass().getSimpleName(), + Property.MANAGER_ASSISTANT_PORTSEARCH, Property.MANAGER_MINTHREADS, + Property.MANAGER_MINTHREADS_TIMEOUT, Property.MANAGER_THREADCHECK); thriftServer.startThriftServer("Thrift Manager Assistant Server"); log.info("Starting {} Thrift server, listening on {}", this.getClass().getSimpleName(), thriftServer.address); @@ -141,6 +144,7 @@ public void start() { } announceExistence(advertiseAddress); + context.setServiceLock(getLock()); fateWorker.setLock(getLock()); } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerClientServiceHandler.java b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerClientServiceHandler.java index 6f8d284a240..8a47f54aebd 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerClientServiceHandler.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerClientServiceHandler.java @@ -805,6 +805,16 @@ public long getManagerTimeNanos(TInfo tinfo, TCredentials credentials) return manager.getSteadyTime().getNanos(); } + @Override + public void event(TInfo tinfo, TCredentials credentials) throws TException { + if (!security.canPerformSystemActions(credentials)) { + throw new ThriftSecurityException(credentials.getPrincipal(), + SecurityErrorCode.PERMISSION_DENIED); + } + + manager.getEventCoordinator().event("External event"); + } + protected TableId getTableId(ClientContext context, String tableName) throws ThriftTableOperationException { return ClientServiceHandler.checkTableId(context, tableName, null); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index 0eeec86eb1d..a46db29cfee 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -26,6 +26,7 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import org.apache.accumulo.core.fate.FateId; @@ -36,6 +37,7 @@ import org.apache.accumulo.core.rpc.ThriftUtil; import org.apache.accumulo.core.rpc.clients.ThriftClientTypes; import org.apache.accumulo.core.trace.TraceUtil; +import org.apache.accumulo.core.util.threads.Threads; import org.apache.accumulo.server.ServerContext; import org.apache.thrift.TException; import org.slf4j.Logger; @@ -44,6 +46,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Sets; import com.google.common.net.HostAndPort; +import com.google.common.util.concurrent.RateLimiter; /** * Partitions fate across manager assistant processes. This is done by assigning ranges of the fate @@ -60,10 +63,15 @@ public FateManager(ServerContext context) { this.context = context; } - // TODO remove, here for testing - public static final AtomicBoolean stop = new AtomicBoolean(false); + private final AtomicBoolean stop = new AtomicBoolean(false); - public void managerWorkers() throws Exception { + private final AtomicReference>> stableAssignments = + new AtomicReference<>(Map.of()); + + private final Map> pendingNotifications = new HashMap<>(); + + private void managerWorkers() throws TException, InterruptedException { + log.debug("Started Fate Manager"); outer: while (!stop.get()) { // TODO make configurable Thread.sleep(3_000); @@ -78,6 +86,12 @@ public void managerWorkers() throws Exception { Map> desired = computeDesiredAssignments(currentAssignments, desiredParititions); + if (desired.equals(currentAssignments)) { + stableAssignments.set(Map.copyOf(currentAssignments)); + } else { + stableAssignments.set(Map.of()); + } + // are there any workers with extra partitions? If so need to unload those first. int unloads = 0; for (Map.Entry> entry : desired.entrySet()) { @@ -123,6 +137,98 @@ public void managerWorkers() throws Exception { } } + private Thread thread = null; + private Thread ntfyThread = null; + + public synchronized void start() { + Preconditions.checkState(thread == null); + Preconditions.checkState(ntfyThread == null); + Preconditions.checkState(!stop.get()); + + thread = Threads.createCriticalThread("Fate Manager", () -> { + try { + managerWorkers(); + } catch (Exception e) { + throw new IllegalStateException(e); + } + }); + thread.start(); + + ntfyThread = Threads.createCriticalThread("Fate Notify", new NotifyTask()); + ntfyThread.start(); + } + + public synchronized void stop() throws InterruptedException { + stop.set(true); + if (thread != null) { + thread.join(); + } + if (ntfyThread != null) { + ntfyThread.join(); + } + } + + /** + * Makes a best effort to notify this fate operation was seeded. + */ + public void notifySeeded(FateId fateId) { + // TODO avoid linear search + for (Map.Entry> entry : stableAssignments.get().entrySet()) { + for (var parition : entry.getValue()) { + if (parition.contains(fateId)) { + synchronized (pendingNotifications) { + pendingNotifications.computeIfAbsent(entry.getKey(), k -> new HashSet<>()) + .add(parition); + pendingNotifications.notify(); + } + return; + } + } + } + } + + private class NotifyTask implements Runnable { + + private final RateLimiter rateLimiter = RateLimiter.create(100); + + @Override + public void run() { + while (!stop.get()) { + try { + Map> copy; + synchronized (pendingNotifications) { + if (pendingNotifications.isEmpty()) { + pendingNotifications.wait(100); + } + copy = Map.copyOf(pendingNotifications); + pendingNotifications.clear(); + } + + rateLimiter.acquire(); + + for (var entry : copy.entrySet()) { + HostAndPort address = entry.getKey(); + Set partitions = entry.getValue(); + FateWorkerService.Client client = + ThriftUtil.getClient(ThriftClientTypes.FATE_WORKER, address, context); + try { + log.debug("Notifying about seeding {} {}", address, partitions); + client.seeded(TraceUtil.traceInfo(), context.rpcCreds(), + partitions.stream().map(FatePartition::toThrift).toList()); + } finally { + ThriftUtil.returnClient(client, context); + } + } + + } catch (InterruptedException e) { + throw new IllegalStateException(e); + } catch (TException e) { + log.warn("Failed to send notification that fate was seeded", e); + } + } + } + } + /** * Sets the complete set of partitions a server should work on. It will only succeed if the update * id is valid. The update id avoids race conditions w/ previously queued network messages, it's a @@ -179,7 +285,7 @@ private Map> computeDesiredAssignments( }); desiredAssignments.forEach((hp, parts) -> { - log.debug(" desired " + hp + " " + parts.size() + " " + parts); + log.trace(" desired {} {} {}", hp, parts.size(), parts); }); return desiredAssignments; @@ -234,7 +340,7 @@ private Map getCurrentAssignments() throws TExcep var workers = context.getServerPaths().getManagerWorker(DEFAULT_RG_ONLY, AddressSelector.all(), true); - log.debug("workers : " + workers); + log.trace("workers : " + workers); Map currentAssignments = new HashMap<>(); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index 747a78f46ef..b0d11a7c500 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -18,12 +18,8 @@ */ package org.apache.accumulo.manager.fate; -import java.util.Collections; -import java.util.HashSet; import java.util.List; -import java.util.Set; import java.util.function.Predicate; -import java.util.function.Supplier; import java.util.stream.Collectors; import org.apache.accumulo.core.client.AccumuloSecurityException; @@ -45,6 +41,7 @@ import org.apache.accumulo.manager.tableOps.TraceRepo; import org.apache.accumulo.server.ServerContext; import org.apache.accumulo.server.security.AuditedSecurityOperation; +import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -69,7 +66,8 @@ public void setLock(ServiceLock lock) { new UserFateStore<>(context, SystemTables.FATE.tableName(), lock.getLockID(), isLockHeld); this.fate = new Fate<>(env, store, false, TraceRepo::toLogString, context.getConfiguration(), context.getScheduledExecutor()); - // TODO where will the 2 fate cleanup task run? Make dead reservation cleaner use partitions... cleanup can run in manager + // TODO where will the 2 fate cleanup task run? Make dead reservation cleaner use partitions... + // cleanup can run in manager } @@ -113,21 +111,35 @@ public boolean setPartitions(TInfo tinfo, TCredentials credentials, long updateI SecurityErrorCode.PERMISSION_DENIED).asThriftException(); } - synchronized (this) { var localFate = fate; if (localFate != null && expectedUpdateId != null && updateId == expectedUpdateId) { // Set to null which makes it so that an update id can only be used once. expectedUpdateId = null; - var desiredSet = desired.stream().map(FatePartition::from).collect(Collectors.toSet()); - var oldPartitions = localFate.setPartitions(desiredSet); - log.info("Changed partitions from {} to {}", oldPartitions, desiredSet); - return true; - }else { - log.debug("Did not change partitions to {} expectedUpdateId:{} updateId:{} localFate==null:{}", desired, - expectedUpdateId, updateId, localFate==null); + var desiredSet = desired.stream().map(FatePartition::from).collect(Collectors.toSet()); + var oldPartitions = localFate.setPartitions(desiredSet); + log.info("Changed partitions from {} to {}", oldPartitions, desiredSet); + return true; + } else { + log.debug( + "Did not change partitions to {} expectedUpdateId:{} updateId:{} localFate==null:{}", + desired, expectedUpdateId, updateId, localFate == null); return false; } } } + + @Override + public void seeded(TInfo tinfo, TCredentials credentials, List tpartitions) + throws TException { + // TODO check the partitions + Fate localFate; + synchronized (this) { + localFate = fate; + } + + if (localFate != null) { + localFate.seeded(tpartitions.stream().map(FatePartition::from).collect(Collectors.toSet())); + } + } } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java index b553a021d1b..f1bc45ca9ff 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java @@ -32,6 +32,10 @@ import org.apache.accumulo.core.metadata.TServerInstance; import org.apache.accumulo.core.metadata.schema.Ample; import org.apache.accumulo.core.metadata.schema.ExternalCompactionId; +import org.apache.accumulo.core.rpc.ThriftUtil; +import org.apache.accumulo.core.rpc.clients.ThriftClientTypes; +import org.apache.accumulo.core.trace.TraceUtil; +import org.apache.accumulo.core.util.threads.Threads; import org.apache.accumulo.core.util.time.SteadyTime; import org.apache.accumulo.manager.EventPublisher; import org.apache.accumulo.manager.split.SplitFileCache; @@ -40,6 +44,9 @@ import org.apache.accumulo.server.fs.VolumeManager; import org.apache.accumulo.server.manager.LiveTServerSet; import org.apache.accumulo.server.tables.TableManager; +import org.apache.thrift.TException; + +import com.google.common.util.concurrent.RateLimiter; public class FateWorkerEnv implements FateEnv { private final ServerContext ctx; @@ -48,6 +55,82 @@ public class FateWorkerEnv implements FateEnv { private final ServiceLock serviceLock; private final LiveTServerSet tservers; private final SplitFileCache splitCache; + private final EventHandler eventHandler; + + private final Object eventLockObj = new Object(); + private boolean eventQueued = false; + + private void queueEvent() { + synchronized (eventLockObj) { + eventQueued = true; + eventLockObj.notify(); + } + } + + private class EventSender implements Runnable { + private final RateLimiter rateLimiter = RateLimiter.create(20); + + @Override + public void run() { + while (true) { + try { + synchronized (eventLockObj) { + if (!eventQueued) { + eventLockObj.wait(); + } + } + + rateLimiter.acquire(); + + var client = ThriftClientTypes.MANAGER.getConnection(ctx); + try { + if (client != null) { + client.event(TraceUtil.traceInfo(), ctx.rpcCreds()); + } + } catch (TException e) { + // TODO + e.printStackTrace(); + } finally { + if (client != null) { + ThriftUtil.close(client, ctx); + } + } + + } catch (InterruptedException e) { + // TODO + e.printStackTrace(); + } + } + } + } + + private class EventHandler implements EventPublisher { + + @Override + public void event(String msg, Object... args) { + queueEvent(); + } + + @Override + public void event(Ample.DataLevel level, String msg, Object... args) { + queueEvent(); + } + + @Override + public void event(TableId tableId, String msg, Object... args) { + queueEvent(); + } + + @Override + public void event(KeyExtent extent, String msg, Object... args) { + queueEvent(); + } + + @Override + public void event(Collection extents, String msg, Object... args) { + queueEvent(); + } + } FateWorkerEnv(ServerContext ctx, ServiceLock lock) { this.ctx = ctx; @@ -57,6 +140,9 @@ public class FateWorkerEnv implements FateEnv { this.serviceLock = lock; this.tservers = new LiveTServerSet(ctx); this.splitCache = new SplitFileCache(ctx); + this.eventHandler = new EventHandler(); + + Threads.createCriticalThread("Fate Worker Event Sender", new EventSender()).start(); } @Override @@ -66,33 +152,7 @@ public ServerContext getContext() { @Override public EventPublisher getEventPublisher() { - // TODO do something w/ the events - return new EventPublisher() { - @Override - public void event(String msg, Object... args) { - - } - - @Override - public void event(Ample.DataLevel level, String msg, Object... args) { - - } - - @Override - public void event(TableId tableId, String msg, Object... args) { - - } - - @Override - public void event(KeyExtent extent, String msg, Object... args) { - - } - - @Override - public void event(Collection extents, String msg, Object... args) { - - } - }; + return eventHandler; } @Override diff --git a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java index ba7e68efaaa..c0457150846 100644 --- a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java +++ b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java @@ -32,8 +32,7 @@ import org.apache.accumulo.core.client.Accumulo; import org.apache.accumulo.core.client.admin.CompactionConfig; import org.apache.accumulo.core.data.Mutation; -import org.apache.accumulo.manager.ManagerWorker; -import org.apache.accumulo.manager.fate.FateManager; +import org.apache.accumulo.manager.Manager; import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl; import org.apache.accumulo.test.functional.ConfigurableMacBase; import org.apache.hadoop.conf.Configuration; @@ -44,6 +43,7 @@ public class MultipleManagerIT extends ConfigurableMacBase { @Override protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { // TODO add a way to start multiple managers to mini + cfg.getClusterServerConfiguration().setNumDefaultCompactors(8); super.configure(cfg, hadoopCoreSite); } @@ -51,31 +51,23 @@ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSit public void test() throws Exception { List managerWorkers = new ArrayList<>(); - // start two fate workers initially - for (int i = 0; i < 2; i++) { - managerWorkers.add(exec(ManagerWorker.class)); + for (int i = 0; i < 1; i++) { + managerWorkers.add(exec(Manager.class)); } var executor = Executors.newCachedThreadPool(); - // This assigns fate partitions to fate worker processes, run it in a background thread. - var fateMgr = new FateManager(getServerContext()); - var future = executor.submit(() -> { - fateMgr.managerWorkers(); - return null; - }); - Thread.sleep(30_000); - // start more fate workers, should see the partitions be shuffled eventually + // start more manager processes, should be assigned fate work for (int i = 0; i < 3; i++) { - managerWorkers.add(exec(ManagerWorker.class)); + managerWorkers.add(exec(Manager.class)); } try (var client = Accumulo.newClient().from(getClientProperties()).build()) { var splits = IntStream.range(1, 10).mapToObj(i -> String.format("%03d", i)).map(Text::new) .collect(Collectors.toCollection(TreeSet::new)); var tableOpFutures = new ArrayList>(); - for (int i = 0; i < 30; i++) { + for (int i = 0; i < 1; i++) { var table = "t" + i; // TODO seeing in the logs that fate operations for the same table are running on different // processes, however there is a 5 second delay because there is no notification mechanism @@ -121,10 +113,6 @@ public void test() throws Exception { } } - FateManager.stop.set(true); - - future.get(); - executor.shutdown(); System.out.println("DONE"); diff --git a/test/src/main/java/org/apache/accumulo/test/fate/MultipleStoresITBase.java b/test/src/main/java/org/apache/accumulo/test/fate/MultipleStoresITBase.java index d2c79855f4c..cb33e45e056 100644 --- a/test/src/main/java/org/apache/accumulo/test/fate/MultipleStoresITBase.java +++ b/test/src/main/java/org/apache/accumulo/test/fate/MultipleStoresITBase.java @@ -43,6 +43,7 @@ import org.apache.accumulo.core.conf.DefaultConfiguration; import org.apache.accumulo.core.fate.Fate; import org.apache.accumulo.core.fate.FateId; +import org.apache.accumulo.core.fate.FatePartition; import org.apache.accumulo.core.fate.FateStore; import org.apache.accumulo.core.fate.ReadOnlyFateStore; import org.apache.accumulo.core.fate.Repo; @@ -103,9 +104,9 @@ private void testReserveUnreserve(TestStoreFactory testStoreFac assertTrue(store1.tryReserve(fakeFateId).isEmpty()); assertTrue(store2.tryReserve(fakeFateId).isEmpty()); // Both stores should return the same reserved transactions - activeReservations = store1.getActiveReservations(); + activeReservations = store1.getActiveReservations(Set.of(FatePartition.all(store1.type()))); assertEquals(allIds, activeReservations.keySet()); - activeReservations = store2.getActiveReservations(); + activeReservations = store2.getActiveReservations(Set.of(FatePartition.all(store2.type()))); assertEquals(allIds, activeReservations.keySet()); // Test setting/getting the TStatus and unreserving the transactions @@ -120,8 +121,8 @@ private void testReserveUnreserve(TestStoreFactory testStoreFac assertThrows(IllegalStateException.class, () -> reservation.setStatus(ReadOnlyFateStore.TStatus.NEW)); } - assertTrue(store1.getActiveReservations().isEmpty()); - assertTrue(store2.getActiveReservations().isEmpty()); + assertTrue(store1.getActiveReservations(Set.of(FatePartition.all(store1.type()))).isEmpty()); + assertTrue(store2.getActiveReservations(Set.of(FatePartition.all(store2.type()))).isEmpty()); } } @@ -321,7 +322,8 @@ private void testDeadReservationsCleanup(TestStoreFactory testStor try { fate1 = new FastFate<>(testEnv1, store1, true, Object::toString, config); // Ensure nothing is reserved yet - assertTrue(store1.getActiveReservations().isEmpty()); + assertTrue( + store1.getActiveReservations(Set.of(FatePartition.all(store1.type()))).isEmpty()); // Create transactions for (int i = 0; i < numFateIds; i++) { @@ -337,7 +339,7 @@ private void testDeadReservationsCleanup(TestStoreFactory testStor // Each fate worker will be hung up working (IN_PROGRESS) on a single transaction // Verify store1 has the transactions reserved and that they were reserved with lock1 - reservations = store1.getActiveReservations(); + reservations = store1.getActiveReservations(Set.of(FatePartition.all(store1.type()))); assertEquals(allIds, reservations.keySet()); reservations.values().forEach(res -> assertEquals(lock1, res.getLockID())); @@ -345,7 +347,7 @@ private void testDeadReservationsCleanup(TestStoreFactory testStor // Verify store2 can see the reserved transactions even though they were reserved using // store1 - reservations = store2.getActiveReservations(); + reservations = store2.getActiveReservations(Set.of(FatePartition.all(store2.type()))); assertEquals(allIds, reservations.keySet()); reservations.values().forEach(res -> assertEquals(lock1, res.getLockID())); @@ -370,7 +372,7 @@ private void testDeadReservationsCleanup(TestStoreFactory testStor // the workers for fate1 are hung up Wait.waitFor(() -> { Map store2Reservations = - store2.getActiveReservations(); + store2.getActiveReservations(Set.of(FatePartition.all(store2.type()))); boolean allReservedWithLock2 = store2Reservations.values().stream() .allMatch(entry -> entry.getLockID().equals(lock2)); return store2Reservations.keySet().equals(allIds) && allReservedWithLock2; From a92b73c28d1d8b5b0f75ef026a569e5e2dc4fe6a Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Tue, 17 Feb 2026 19:58:50 +0000 Subject: [PATCH 20/38] WIP --- .../org/apache/accumulo/core/fate/Fate.java | 1 + .../core/fate/thrift/FateWorkerService.java | 317 ++++++++- .../manager/thrift/ManagerClientService.java | 599 ++++++++++++++++-- .../accumulo/core/manager/thrift/TEvent.java | 516 +++++++++++++++ core/src/main/thrift/fate-worker.thrift | 5 +- core/src/main/thrift/manager.thrift | 22 +- .../accumulo/manager/EventCoordinator.java | 49 +- .../manager/ManagerClientServiceHandler.java | 7 +- .../accumulo/manager/fate/FateManager.java | 7 +- .../accumulo/manager/fate/FateWorker.java | 7 +- .../accumulo/manager/fate/FateWorkerEnv.java | 51 +- .../manager/tableOps/split/PreSplit.java | 2 +- .../accumulo/test/MultipleManagerIT.java | 4 +- 13 files changed, 1462 insertions(+), 125 deletions(-) create mode 100644 core/src/main/thrift-gen-java/org/apache/accumulo/core/manager/thrift/TEvent.java diff --git a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java index c283d5b2702..4c245f8255f 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java @@ -441,6 +441,7 @@ public void seeded(Set partitions) { } } + log.trace("Notified of seeding for {}", partitions); store.seeded(); } diff --git a/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/FateWorkerService.java b/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/FateWorkerService.java index 2ad296dafb1..ff22db8c5f9 100644 --- a/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/FateWorkerService.java +++ b/core/src/main/thrift-gen-java/org/apache/accumulo/core/fate/thrift/FateWorkerService.java @@ -131,6 +131,7 @@ public boolean recv_setPartitions() throws org.apache.accumulo.core.clientImpl.t public void seeded(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List tpartitions) throws org.apache.thrift.TException { send_seeded(tinfo, credentials, tpartitions); + recv_seeded(); } public void send_seeded(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List tpartitions) throws org.apache.thrift.TException @@ -139,7 +140,14 @@ public void send_seeded(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, args.setTinfo(tinfo); args.setCredentials(credentials); args.setTpartitions(tpartitions); - sendBaseOneway("seeded", args); + sendBase("seeded", args); + } + + public void recv_seeded() throws org.apache.thrift.TException + { + seeded_result result = new seeded_result(); + receiveBase(result, "seeded"); + return; } } @@ -256,7 +264,7 @@ public static class seeded_call extends org.apache.thrift.async.TAsyncMethodCall private org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials; private java.util.List tpartitions; public seeded_call(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List tpartitions, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, true); + super(client, protocolFactory, transport, resultHandler, false); this.tinfo = tinfo; this.credentials = credentials; this.tpartitions = tpartitions; @@ -264,7 +272,7 @@ public seeded_call(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.a @Override public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("seeded", org.apache.thrift.protocol.TMessageType.ONEWAY, 0)); + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("seeded", org.apache.thrift.protocol.TMessageType.CALL, 0)); seeded_args args = new seeded_args(); args.setTinfo(tinfo); args.setCredentials(credentials); @@ -280,6 +288,7 @@ public Void getResult() throws org.apache.thrift.TException { } org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_seeded(); return null; } } @@ -380,7 +389,7 @@ public seeded_args getEmptyArgsInstance() { @Override protected boolean isOneway() { - return true; + return false; } @Override @@ -389,9 +398,10 @@ protected boolean rethrowUnhandledExceptions() { } @Override - public org.apache.thrift.TBase getResult(I iface, seeded_args args) throws org.apache.thrift.TException { + public seeded_result getResult(I iface, seeded_args args) throws org.apache.thrift.TException { + seeded_result result = new seeded_result(); iface.seeded(args.tinfo, args.credentials, args.tpartitions); - return null; + return result; } } @@ -573,14 +583,40 @@ public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final return new org.apache.thrift.async.AsyncMethodCallback() { @Override public void onComplete(Void o) { + seeded_result result = new seeded_result(); + try { + fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + } catch (org.apache.thrift.transport.TTransportException e) { + _LOGGER.error("TTransportException writing to internal frame buffer", e); + fb.close(); + } catch (java.lang.Exception e) { + _LOGGER.error("Exception writing to internal frame buffer", e); + onError(e); + } } @Override public void onError(java.lang.Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TSerializable msg; + seeded_result result = new seeded_result(); if (e instanceof org.apache.thrift.transport.TTransportException) { _LOGGER.error("TTransportException inside handler", e); fb.close(); + return; + } else if (e instanceof org.apache.thrift.TApplicationException) { + _LOGGER.error("TApplicationException inside handler", e); + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TApplicationException)e; } else { - _LOGGER.error("Exception inside oneway handler", e); + _LOGGER.error("Exception inside handler", e); + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + } catch (java.lang.Exception ex) { + _LOGGER.error("Exception writing to internal frame buffer", ex); + fb.close(); } } }; @@ -588,7 +624,7 @@ public void onError(java.lang.Exception e) { @Override protected boolean isOneway() { - return true; + return false; } @Override @@ -3473,5 +3509,270 @@ private static S scheme(org.apache. } } + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) + public static class seeded_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("seeded_result"); + + + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new seeded_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new seeded_resultTupleSchemeFactory(); + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final java.util.Map byName = new java.util.HashMap(); + + static { + for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByName(java.lang.String name) { + return byName.get(name); + } + + private final short _thriftId; + private final java.lang.String _fieldName; + + _Fields(short thriftId, java.lang.String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + @Override + public short getThriftFieldId() { + return _thriftId; + } + + @Override + public java.lang.String getFieldName() { + return _fieldName; + } + } + public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(seeded_result.class, metaDataMap); + } + + public seeded_result() { + } + + /** + * Performs a deep copy on other. + */ + public seeded_result(seeded_result other) { + } + + @Override + public seeded_result deepCopy() { + return new seeded_result(this); + } + + @Override + public void clear() { + } + + @Override + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { + switch (field) { + } + } + + @org.apache.thrift.annotation.Nullable + @Override + public java.lang.Object getFieldValue(_Fields field) { + switch (field) { + } + throw new java.lang.IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + @Override + public boolean isSet(_Fields field) { + if (field == null) { + throw new java.lang.IllegalArgumentException(); + } + + switch (field) { + } + throw new java.lang.IllegalStateException(); + } + + @Override + public boolean equals(java.lang.Object that) { + if (that instanceof seeded_result) + return this.equals((seeded_result)that); + return false; + } + + public boolean equals(seeded_result that) { + if (that == null) + return false; + if (this == that) + return true; + + return true; + } + + @Override + public int hashCode() { + int hashCode = 1; + + return hashCode; + } + + @Override + public int compareTo(seeded_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + @org.apache.thrift.annotation.Nullable + @Override + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + scheme(iprot).read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + scheme(oprot).write(oprot, this); + } + + @Override + public java.lang.String toString() { + java.lang.StringBuilder sb = new java.lang.StringBuilder("seeded_result("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class seeded_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public seeded_resultStandardScheme getScheme() { + return new seeded_resultStandardScheme(); + } + } + + private static class seeded_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot, seeded_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot, seeded_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class seeded_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public seeded_resultTupleScheme getScheme() { + return new seeded_resultTupleScheme(); + } + } + + private static class seeded_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, seeded_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, seeded_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + } + } + + private static S scheme(org.apache.thrift.protocol.TProtocol proto) { + return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); + } + } + private static void unusedMethod() {} } diff --git a/core/src/main/thrift-gen-java/org/apache/accumulo/core/manager/thrift/ManagerClientService.java b/core/src/main/thrift-gen-java/org/apache/accumulo/core/manager/thrift/ManagerClientService.java index c0726205929..4dba34f0079 100644 --- a/core/src/main/thrift-gen-java/org/apache/accumulo/core/manager/thrift/ManagerClientService.java +++ b/core/src/main/thrift-gen-java/org/apache/accumulo/core/manager/thrift/ManagerClientService.java @@ -85,7 +85,7 @@ public interface Iface { public long getManagerTimeNanos(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) throws org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException, org.apache.thrift.TException; - public void event(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) throws org.apache.thrift.TException; + public void processEvents(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List events) throws org.apache.thrift.TException; } @@ -147,7 +147,7 @@ public interface AsyncIface { public void getManagerTimeNanos(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void event(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void processEvents(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List events, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; } @@ -1074,17 +1074,26 @@ public long recv_getManagerTimeNanos() throws org.apache.accumulo.core.clientImp } @Override - public void event(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) throws org.apache.thrift.TException + public void processEvents(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List events) throws org.apache.thrift.TException { - send_event(tinfo, credentials); + send_processEvents(tinfo, credentials, events); + recv_processEvents(); } - public void send_event(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) throws org.apache.thrift.TException + public void send_processEvents(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List events) throws org.apache.thrift.TException { - event_args args = new event_args(); + processEvents_args args = new processEvents_args(); args.setTinfo(tinfo); args.setCredentials(credentials); - sendBaseOneway("event", args); + args.setEvents(events); + sendBase("processEvents", args); + } + + public void recv_processEvents() throws org.apache.thrift.TException + { + processEvents_result result = new processEvents_result(); + receiveBase(result, "processEvents"); + return; } } @@ -2330,28 +2339,31 @@ public java.lang.Long getResult() throws org.apache.accumulo.core.clientImpl.thr } @Override - public void event(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void processEvents(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List events, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - event_call method_call = new event_call(tinfo, credentials, resultHandler, this, ___protocolFactory, ___transport); + processEvents_call method_call = new processEvents_call(tinfo, credentials, events, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } - public static class event_call extends org.apache.thrift.async.TAsyncMethodCall { + public static class processEvents_call extends org.apache.thrift.async.TAsyncMethodCall { private org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo; private org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials; - public event_call(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, true); + private java.util.List events; + public processEvents_call(org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, java.util.List events, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); this.tinfo = tinfo; this.credentials = credentials; + this.events = events; } @Override public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("event", org.apache.thrift.protocol.TMessageType.ONEWAY, 0)); - event_args args = new event_args(); + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("processEvents", org.apache.thrift.protocol.TMessageType.CALL, 0)); + processEvents_args args = new processEvents_args(); args.setTinfo(tinfo); args.setCredentials(credentials); + args.setEvents(events); args.write(prot); prot.writeMessageEnd(); } @@ -2363,6 +2375,7 @@ public Void getResult() throws org.apache.thrift.TException { } org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_processEvents(); return null; } } @@ -2408,7 +2421,7 @@ protected Processor(I iface, java.util.Map extends org.apache.thrift.ProcessFunction { - public event() { - super("event"); + public static class processEvents extends org.apache.thrift.ProcessFunction { + public processEvents() { + super("processEvents"); } @Override - public event_args getEmptyArgsInstance() { - return new event_args(); + public processEvents_args getEmptyArgsInstance() { + return new processEvents_args(); } @Override protected boolean isOneway() { - return true; + return false; } @Override @@ -3424,9 +3437,10 @@ protected boolean rethrowUnhandledExceptions() { } @Override - public org.apache.thrift.TBase getResult(I iface, event_args args) throws org.apache.thrift.TException { - iface.event(args.tinfo, args.credentials); - return null; + public processEvents_result getResult(I iface, processEvents_args args) throws org.apache.thrift.TException { + processEvents_result result = new processEvents_result(); + iface.processEvents(args.tinfo, args.credentials, args.events); + return result; } } @@ -3471,7 +3485,7 @@ protected AsyncProcessor(I iface, java.util.Map extends org.apache.thrift.AsyncProcessFunction { - public event() { - super("event"); + public static class processEvents extends org.apache.thrift.AsyncProcessFunction { + public processEvents() { + super("processEvents"); } @Override - public event_args getEmptyArgsInstance() { - return new event_args(); + public processEvents_args getEmptyArgsInstance() { + return new processEvents_args(); } @Override @@ -5625,14 +5639,40 @@ public org.apache.thrift.async.AsyncMethodCallback getResultHandler(final return new org.apache.thrift.async.AsyncMethodCallback() { @Override public void onComplete(Void o) { + processEvents_result result = new processEvents_result(); + try { + fcall.sendResponse(fb, result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + } catch (org.apache.thrift.transport.TTransportException e) { + _LOGGER.error("TTransportException writing to internal frame buffer", e); + fb.close(); + } catch (java.lang.Exception e) { + _LOGGER.error("Exception writing to internal frame buffer", e); + onError(e); + } } @Override public void onError(java.lang.Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TSerializable msg; + processEvents_result result = new processEvents_result(); if (e instanceof org.apache.thrift.transport.TTransportException) { _LOGGER.error("TTransportException inside handler", e); fb.close(); + return; + } else if (e instanceof org.apache.thrift.TApplicationException) { + _LOGGER.error("TApplicationException inside handler", e); + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TApplicationException)e; } else { - _LOGGER.error("Exception inside oneway handler", e); + _LOGGER.error("Exception inside handler", e); + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + } catch (java.lang.Exception ex) { + _LOGGER.error("Exception writing to internal frame buffer", ex); + fb.close(); } } }; @@ -5640,12 +5680,12 @@ public void onError(java.lang.Exception e) { @Override protected boolean isOneway() { - return true; + return false; } @Override - public void start(I iface, event_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - iface.event(args.tinfo, args.credentials,resultHandler); + public void start(I iface, processEvents_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + iface.processEvents(args.tinfo, args.credentials, args.events,resultHandler); } } @@ -40737,22 +40777,25 @@ private static S scheme(org.apache. } @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) - public static class event_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("event_args"); + public static class processEvents_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("processEvents_args"); private static final org.apache.thrift.protocol.TField TINFO_FIELD_DESC = new org.apache.thrift.protocol.TField("tinfo", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField CREDENTIALS_FIELD_DESC = new org.apache.thrift.protocol.TField("credentials", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField EVENTS_FIELD_DESC = new org.apache.thrift.protocol.TField("events", org.apache.thrift.protocol.TType.LIST, (short)3); - private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new event_argsStandardSchemeFactory(); - private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new event_argsTupleSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new processEvents_argsStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new processEvents_argsTupleSchemeFactory(); public @org.apache.thrift.annotation.Nullable org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo; // required public @org.apache.thrift.annotation.Nullable org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials; // required + public @org.apache.thrift.annotation.Nullable java.util.List events; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { TINFO((short)1, "tinfo"), - CREDENTIALS((short)2, "credentials"); + CREDENTIALS((short)2, "credentials"), + EVENTS((short)3, "events"); private static final java.util.Map byName = new java.util.HashMap(); @@ -40772,6 +40815,8 @@ public static _Fields findByThriftId(int fieldId) { return TINFO; case 2: // CREDENTIALS return CREDENTIALS; + case 3: // EVENTS + return EVENTS; default: return null; } @@ -40822,43 +40867,56 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.core.clientImpl.thrift.TInfo.class))); tmpMap.put(_Fields.CREDENTIALS, new org.apache.thrift.meta_data.FieldMetaData("credentials", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.core.securityImpl.thrift.TCredentials.class))); + tmpMap.put(_Fields.EVENTS, new org.apache.thrift.meta_data.FieldMetaData("events", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TEvent.class)))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(event_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(processEvents_args.class, metaDataMap); } - public event_args() { + public processEvents_args() { } - public event_args( + public processEvents_args( org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo, - org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) + org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials, + java.util.List events) { this(); this.tinfo = tinfo; this.credentials = credentials; + this.events = events; } /** * Performs a deep copy on other. */ - public event_args(event_args other) { + public processEvents_args(processEvents_args other) { if (other.isSetTinfo()) { this.tinfo = new org.apache.accumulo.core.clientImpl.thrift.TInfo(other.tinfo); } if (other.isSetCredentials()) { this.credentials = new org.apache.accumulo.core.securityImpl.thrift.TCredentials(other.credentials); } + if (other.isSetEvents()) { + java.util.List __this__events = new java.util.ArrayList(other.events.size()); + for (TEvent other_element : other.events) { + __this__events.add(new TEvent(other_element)); + } + this.events = __this__events; + } } @Override - public event_args deepCopy() { - return new event_args(this); + public processEvents_args deepCopy() { + return new processEvents_args(this); } @Override public void clear() { this.tinfo = null; this.credentials = null; + this.events = null; } @org.apache.thrift.annotation.Nullable @@ -40866,7 +40924,7 @@ public org.apache.accumulo.core.clientImpl.thrift.TInfo getTinfo() { return this.tinfo; } - public event_args setTinfo(@org.apache.thrift.annotation.Nullable org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo) { + public processEvents_args setTinfo(@org.apache.thrift.annotation.Nullable org.apache.accumulo.core.clientImpl.thrift.TInfo tinfo) { this.tinfo = tinfo; return this; } @@ -40891,7 +40949,7 @@ public org.apache.accumulo.core.securityImpl.thrift.TCredentials getCredentials( return this.credentials; } - public event_args setCredentials(@org.apache.thrift.annotation.Nullable org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) { + public processEvents_args setCredentials(@org.apache.thrift.annotation.Nullable org.apache.accumulo.core.securityImpl.thrift.TCredentials credentials) { this.credentials = credentials; return this; } @@ -40911,6 +40969,47 @@ public void setCredentialsIsSet(boolean value) { } } + public int getEventsSize() { + return (this.events == null) ? 0 : this.events.size(); + } + + @org.apache.thrift.annotation.Nullable + public java.util.Iterator getEventsIterator() { + return (this.events == null) ? null : this.events.iterator(); + } + + public void addToEvents(TEvent elem) { + if (this.events == null) { + this.events = new java.util.ArrayList(); + } + this.events.add(elem); + } + + @org.apache.thrift.annotation.Nullable + public java.util.List getEvents() { + return this.events; + } + + public processEvents_args setEvents(@org.apache.thrift.annotation.Nullable java.util.List events) { + this.events = events; + return this; + } + + public void unsetEvents() { + this.events = null; + } + + /** Returns true if field events is set (has been assigned a value) and false otherwise */ + public boolean isSetEvents() { + return this.events != null; + } + + public void setEventsIsSet(boolean value) { + if (!value) { + this.events = null; + } + } + @Override public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { @@ -40930,6 +41029,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case EVENTS: + if (value == null) { + unsetEvents(); + } else { + setEvents((java.util.List)value); + } + break; + } } @@ -40943,6 +41050,9 @@ public java.lang.Object getFieldValue(_Fields field) { case CREDENTIALS: return getCredentials(); + case EVENTS: + return getEvents(); + } throw new java.lang.IllegalStateException(); } @@ -40959,18 +41069,20 @@ public boolean isSet(_Fields field) { return isSetTinfo(); case CREDENTIALS: return isSetCredentials(); + case EVENTS: + return isSetEvents(); } throw new java.lang.IllegalStateException(); } @Override public boolean equals(java.lang.Object that) { - if (that instanceof event_args) - return this.equals((event_args)that); + if (that instanceof processEvents_args) + return this.equals((processEvents_args)that); return false; } - public boolean equals(event_args that) { + public boolean equals(processEvents_args that) { if (that == null) return false; if (this == that) @@ -40994,6 +41106,15 @@ public boolean equals(event_args that) { return false; } + boolean this_present_events = true && this.isSetEvents(); + boolean that_present_events = true && that.isSetEvents(); + if (this_present_events || that_present_events) { + if (!(this_present_events && that_present_events)) + return false; + if (!this.events.equals(that.events)) + return false; + } + return true; } @@ -41009,11 +41130,15 @@ public int hashCode() { if (isSetCredentials()) hashCode = hashCode * 8191 + credentials.hashCode(); + hashCode = hashCode * 8191 + ((isSetEvents()) ? 131071 : 524287); + if (isSetEvents()) + hashCode = hashCode * 8191 + events.hashCode(); + return hashCode; } @Override - public int compareTo(event_args other) { + public int compareTo(processEvents_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -41040,6 +41165,16 @@ public int compareTo(event_args other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetEvents(), other.isSetEvents()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEvents()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.events, other.events); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -41061,7 +41196,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public java.lang.String toString() { - java.lang.StringBuilder sb = new java.lang.StringBuilder("event_args("); + java.lang.StringBuilder sb = new java.lang.StringBuilder("processEvents_args("); boolean first = true; sb.append("tinfo:"); @@ -41079,6 +41214,14 @@ public java.lang.String toString() { sb.append(this.credentials); } first = false; + if (!first) sb.append(", "); + sb.append("events:"); + if (this.events == null) { + sb.append("null"); + } else { + sb.append(this.events); + } + first = false; sb.append(")"); return sb.toString(); } @@ -41110,17 +41253,17 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class event_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + private static class processEvents_argsStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { @Override - public event_argsStandardScheme getScheme() { - return new event_argsStandardScheme(); + public processEvents_argsStandardScheme getScheme() { + return new processEvents_argsStandardScheme(); } } - private static class event_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { + private static class processEvents_argsStandardScheme extends org.apache.thrift.scheme.StandardScheme { @Override - public void read(org.apache.thrift.protocol.TProtocol iprot, event_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, processEvents_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -41148,6 +41291,25 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, event_args struct) org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // EVENTS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list130 = iprot.readListBegin(); + struct.events = new java.util.ArrayList(_list130.size); + @org.apache.thrift.annotation.Nullable TEvent _elem131; + for (int _i132 = 0; _i132 < _list130.size; ++_i132) + { + _elem131 = new TEvent(); + _elem131.read(iprot); + struct.events.add(_elem131); + } + iprot.readListEnd(); + } + struct.setEventsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -41160,7 +41322,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, event_args struct) } @Override - public void write(org.apache.thrift.protocol.TProtocol oprot, event_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, processEvents_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -41174,23 +41336,35 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, event_args struct) struct.credentials.write(oprot); oprot.writeFieldEnd(); } + if (struct.events != null) { + oprot.writeFieldBegin(EVENTS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.events.size())); + for (TEvent _iter133 : struct.events) + { + _iter133.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class event_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + private static class processEvents_argsTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { @Override - public event_argsTupleScheme getScheme() { - return new event_argsTupleScheme(); + public processEvents_argsTupleScheme getScheme() { + return new processEvents_argsTupleScheme(); } } - private static class event_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { + private static class processEvents_argsTupleScheme extends org.apache.thrift.scheme.TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, event_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, processEvents_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetTinfo()) { @@ -41199,19 +41373,31 @@ public void write(org.apache.thrift.protocol.TProtocol prot, event_args struct) if (struct.isSetCredentials()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetEvents()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetTinfo()) { struct.tinfo.write(oprot); } if (struct.isSetCredentials()) { struct.credentials.write(oprot); } + if (struct.isSetEvents()) { + { + oprot.writeI32(struct.events.size()); + for (TEvent _iter134 : struct.events) + { + _iter134.write(oprot); + } + } + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, event_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, processEvents_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; - java.util.BitSet incoming = iprot.readBitSet(2); + java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.tinfo = new org.apache.accumulo.core.clientImpl.thrift.TInfo(); struct.tinfo.read(iprot); @@ -41222,6 +41408,285 @@ public void read(org.apache.thrift.protocol.TProtocol prot, event_args struct) t struct.credentials.read(iprot); struct.setCredentialsIsSet(true); } + if (incoming.get(2)) { + { + org.apache.thrift.protocol.TList _list135 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.events = new java.util.ArrayList(_list135.size); + @org.apache.thrift.annotation.Nullable TEvent _elem136; + for (int _i137 = 0; _i137 < _list135.size; ++_i137) + { + _elem136 = new TEvent(); + _elem136.read(iprot); + struct.events.add(_elem136); + } + } + struct.setEventsIsSet(true); + } + } + } + + private static S scheme(org.apache.thrift.protocol.TProtocol proto) { + return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); + } + } + + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) + public static class processEvents_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("processEvents_result"); + + + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new processEvents_resultStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new processEvents_resultTupleSchemeFactory(); + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final java.util.Map byName = new java.util.HashMap(); + + static { + for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByName(java.lang.String name) { + return byName.get(name); + } + + private final short _thriftId; + private final java.lang.String _fieldName; + + _Fields(short thriftId, java.lang.String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + @Override + public short getThriftFieldId() { + return _thriftId; + } + + @Override + public java.lang.String getFieldName() { + return _fieldName; + } + } + public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(processEvents_result.class, metaDataMap); + } + + public processEvents_result() { + } + + /** + * Performs a deep copy on other. + */ + public processEvents_result(processEvents_result other) { + } + + @Override + public processEvents_result deepCopy() { + return new processEvents_result(this); + } + + @Override + public void clear() { + } + + @Override + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { + switch (field) { + } + } + + @org.apache.thrift.annotation.Nullable + @Override + public java.lang.Object getFieldValue(_Fields field) { + switch (field) { + } + throw new java.lang.IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + @Override + public boolean isSet(_Fields field) { + if (field == null) { + throw new java.lang.IllegalArgumentException(); + } + + switch (field) { + } + throw new java.lang.IllegalStateException(); + } + + @Override + public boolean equals(java.lang.Object that) { + if (that instanceof processEvents_result) + return this.equals((processEvents_result)that); + return false; + } + + public boolean equals(processEvents_result that) { + if (that == null) + return false; + if (this == that) + return true; + + return true; + } + + @Override + public int hashCode() { + int hashCode = 1; + + return hashCode; + } + + @Override + public int compareTo(processEvents_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + @org.apache.thrift.annotation.Nullable + @Override + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + scheme(iprot).read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + scheme(oprot).write(oprot, this); + } + + @Override + public java.lang.String toString() { + java.lang.StringBuilder sb = new java.lang.StringBuilder("processEvents_result("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class processEvents_resultStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public processEvents_resultStandardScheme getScheme() { + return new processEvents_resultStandardScheme(); + } + } + + private static class processEvents_resultStandardScheme extends org.apache.thrift.scheme.StandardScheme { + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot, processEvents_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot, processEvents_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class processEvents_resultTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public processEvents_resultTupleScheme getScheme() { + return new processEvents_resultTupleScheme(); + } + } + + private static class processEvents_resultTupleScheme extends org.apache.thrift.scheme.TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, processEvents_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, processEvents_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; } } diff --git a/core/src/main/thrift-gen-java/org/apache/accumulo/core/manager/thrift/TEvent.java b/core/src/main/thrift-gen-java/org/apache/accumulo/core/manager/thrift/TEvent.java new file mode 100644 index 00000000000..e6200c0a698 --- /dev/null +++ b/core/src/main/thrift-gen-java/org/apache/accumulo/core/manager/thrift/TEvent.java @@ -0,0 +1,516 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/** + * Autogenerated by Thrift Compiler (0.17.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.accumulo.core.manager.thrift; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) +public class TEvent implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TEvent"); + + private static final org.apache.thrift.protocol.TField LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("level", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField EXTENT_FIELD_DESC = new org.apache.thrift.protocol.TField("extent", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new TEventStandardSchemeFactory(); + private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new TEventTupleSchemeFactory(); + + public @org.apache.thrift.annotation.Nullable java.lang.String level; // required + public @org.apache.thrift.annotation.Nullable org.apache.accumulo.core.dataImpl.thrift.TKeyExtent extent; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + LEVEL((short)1, "level"), + EXTENT((short)2, "extent"); + + private static final java.util.Map byName = new java.util.HashMap(); + + static { + for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // LEVEL + return LEVEL; + case 2: // EXTENT + return EXTENT; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + @org.apache.thrift.annotation.Nullable + public static _Fields findByName(java.lang.String name) { + return byName.get(name); + } + + private final short _thriftId; + private final java.lang.String _fieldName; + + _Fields(short thriftId, java.lang.String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + @Override + public short getThriftFieldId() { + return _thriftId; + } + + @Override + public java.lang.String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.LEVEL, new org.apache.thrift.meta_data.FieldMetaData("level", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.EXTENT, new org.apache.thrift.meta_data.FieldMetaData("extent", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.accumulo.core.dataImpl.thrift.TKeyExtent.class))); + metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TEvent.class, metaDataMap); + } + + public TEvent() { + } + + public TEvent( + java.lang.String level, + org.apache.accumulo.core.dataImpl.thrift.TKeyExtent extent) + { + this(); + this.level = level; + this.extent = extent; + } + + /** + * Performs a deep copy on other. + */ + public TEvent(TEvent other) { + if (other.isSetLevel()) { + this.level = other.level; + } + if (other.isSetExtent()) { + this.extent = new org.apache.accumulo.core.dataImpl.thrift.TKeyExtent(other.extent); + } + } + + @Override + public TEvent deepCopy() { + return new TEvent(this); + } + + @Override + public void clear() { + this.level = null; + this.extent = null; + } + + @org.apache.thrift.annotation.Nullable + public java.lang.String getLevel() { + return this.level; + } + + public TEvent setLevel(@org.apache.thrift.annotation.Nullable java.lang.String level) { + this.level = level; + return this; + } + + public void unsetLevel() { + this.level = null; + } + + /** Returns true if field level is set (has been assigned a value) and false otherwise */ + public boolean isSetLevel() { + return this.level != null; + } + + public void setLevelIsSet(boolean value) { + if (!value) { + this.level = null; + } + } + + @org.apache.thrift.annotation.Nullable + public org.apache.accumulo.core.dataImpl.thrift.TKeyExtent getExtent() { + return this.extent; + } + + public TEvent setExtent(@org.apache.thrift.annotation.Nullable org.apache.accumulo.core.dataImpl.thrift.TKeyExtent extent) { + this.extent = extent; + return this; + } + + public void unsetExtent() { + this.extent = null; + } + + /** Returns true if field extent is set (has been assigned a value) and false otherwise */ + public boolean isSetExtent() { + return this.extent != null; + } + + public void setExtentIsSet(boolean value) { + if (!value) { + this.extent = null; + } + } + + @Override + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { + switch (field) { + case LEVEL: + if (value == null) { + unsetLevel(); + } else { + setLevel((java.lang.String)value); + } + break; + + case EXTENT: + if (value == null) { + unsetExtent(); + } else { + setExtent((org.apache.accumulo.core.dataImpl.thrift.TKeyExtent)value); + } + break; + + } + } + + @org.apache.thrift.annotation.Nullable + @Override + public java.lang.Object getFieldValue(_Fields field) { + switch (field) { + case LEVEL: + return getLevel(); + + case EXTENT: + return getExtent(); + + } + throw new java.lang.IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + @Override + public boolean isSet(_Fields field) { + if (field == null) { + throw new java.lang.IllegalArgumentException(); + } + + switch (field) { + case LEVEL: + return isSetLevel(); + case EXTENT: + return isSetExtent(); + } + throw new java.lang.IllegalStateException(); + } + + @Override + public boolean equals(java.lang.Object that) { + if (that instanceof TEvent) + return this.equals((TEvent)that); + return false; + } + + public boolean equals(TEvent that) { + if (that == null) + return false; + if (this == that) + return true; + + boolean this_present_level = true && this.isSetLevel(); + boolean that_present_level = true && that.isSetLevel(); + if (this_present_level || that_present_level) { + if (!(this_present_level && that_present_level)) + return false; + if (!this.level.equals(that.level)) + return false; + } + + boolean this_present_extent = true && this.isSetExtent(); + boolean that_present_extent = true && that.isSetExtent(); + if (this_present_extent || that_present_extent) { + if (!(this_present_extent && that_present_extent)) + return false; + if (!this.extent.equals(that.extent)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + int hashCode = 1; + + hashCode = hashCode * 8191 + ((isSetLevel()) ? 131071 : 524287); + if (isSetLevel()) + hashCode = hashCode * 8191 + level.hashCode(); + + hashCode = hashCode * 8191 + ((isSetExtent()) ? 131071 : 524287); + if (isSetExtent()) + hashCode = hashCode * 8191 + extent.hashCode(); + + return hashCode; + } + + @Override + public int compareTo(TEvent other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = java.lang.Boolean.compare(isSetLevel(), other.isSetLevel()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetLevel()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.level, other.level); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = java.lang.Boolean.compare(isSetExtent(), other.isSetExtent()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetExtent()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.extent, other.extent); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + @org.apache.thrift.annotation.Nullable + @Override + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + scheme(iprot).read(iprot, this); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + scheme(oprot).write(oprot, this); + } + + @Override + public java.lang.String toString() { + java.lang.StringBuilder sb = new java.lang.StringBuilder("TEvent("); + boolean first = true; + + sb.append("level:"); + if (this.level == null) { + sb.append("null"); + } else { + sb.append(this.level); + } + first = false; + if (!first) sb.append(", "); + sb.append("extent:"); + if (this.extent == null) { + sb.append("null"); + } else { + sb.append(this.extent); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (extent != null) { + extent.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TEventStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public TEventStandardScheme getScheme() { + return new TEventStandardScheme(); + } + } + + private static class TEventStandardScheme extends org.apache.thrift.scheme.StandardScheme { + + @Override + public void read(org.apache.thrift.protocol.TProtocol iprot, TEvent struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // LEVEL + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.level = iprot.readString(); + struct.setLevelIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // EXTENT + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.extent = new org.apache.accumulo.core.dataImpl.thrift.TKeyExtent(); + struct.extent.read(iprot); + struct.setExtentIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + @Override + public void write(org.apache.thrift.protocol.TProtocol oprot, TEvent struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.level != null) { + oprot.writeFieldBegin(LEVEL_FIELD_DESC); + oprot.writeString(struct.level); + oprot.writeFieldEnd(); + } + if (struct.extent != null) { + oprot.writeFieldBegin(EXTENT_FIELD_DESC); + struct.extent.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TEventTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { + @Override + public TEventTupleScheme getScheme() { + return new TEventTupleScheme(); + } + } + + private static class TEventTupleScheme extends org.apache.thrift.scheme.TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TEvent struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet optionals = new java.util.BitSet(); + if (struct.isSetLevel()) { + optionals.set(0); + } + if (struct.isSetExtent()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetLevel()) { + oprot.writeString(struct.level); + } + if (struct.isSetExtent()) { + struct.extent.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TEvent struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; + java.util.BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.level = iprot.readString(); + struct.setLevelIsSet(true); + } + if (incoming.get(1)) { + struct.extent = new org.apache.accumulo.core.dataImpl.thrift.TKeyExtent(); + struct.extent.read(iprot); + struct.setExtentIsSet(true); + } + } + } + + private static S scheme(org.apache.thrift.protocol.TProtocol proto) { + return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); + } + private static void unusedMethod() {} +} + diff --git a/core/src/main/thrift/fate-worker.thrift b/core/src/main/thrift/fate-worker.thrift index 4f0d7f87a22..8538d7d3b26 100644 --- a/core/src/main/thrift/fate-worker.thrift +++ b/core/src/main/thrift/fate-worker.thrift @@ -50,9 +50,10 @@ service FateWorkerService { 1:client.ThriftSecurityException sec ) - oneway void seeded( + // TODO drop oneway and rate limiter + void seeded( 1:client.TInfo tinfo, 2:security.TCredentials credentials, 3:list tpartitions - ) + ) } diff --git a/core/src/main/thrift/manager.thrift b/core/src/main/thrift/manager.thrift index ca830ef964d..318805430b7 100644 --- a/core/src/main/thrift/manager.thrift +++ b/core/src/main/thrift/manager.thrift @@ -182,6 +182,11 @@ struct TTabletMergeability { 2:i64 delay } +struct TEvent { + 1:string level + 2:data.TKeyExtent extent +} + service FateService { // register a fate operation by reserving an opid @@ -239,7 +244,7 @@ service FateService { 1:client.ThriftSecurityException sec 2:client.ThriftNotActiveServiceException tnase ) - + } service ManagerClientService { @@ -371,7 +376,7 @@ service ManagerClientService { 1:client.ThriftSecurityException sec 2:client.ThriftNotActiveServiceException tnase ) - + void tabletServerStopping( 1:client.TInfo tinfo 2:security.TCredentials credentials @@ -392,7 +397,7 @@ service ManagerClientService { 2:client.ThriftNotActiveServiceException tnase 3:ThriftPropertyException tpe ) - + void modifySystemProperties( 1:client.TInfo tinfo 2:security.TCredentials credentials @@ -421,7 +426,7 @@ service ManagerClientService { 1:client.ThriftSecurityException sec 2:client.ThriftNotActiveServiceException tnase ) - + void removeResourceGroupNode( 1:client.TInfo tinfo 2:security.TCredentials credentials @@ -429,9 +434,9 @@ service ManagerClientService { ) throws ( 1:client.ThriftSecurityException sec 2:client.ThriftNotActiveServiceException tnase - 3:client.ThriftResourceGroupNotExistsException rgne + 3:client.ThriftResourceGroupNotExistsException rgne ) - + void setResourceGroupProperty( 1:client.TInfo tinfo 2:security.TCredentials credentials @@ -444,7 +449,7 @@ service ManagerClientService { 3:ThriftPropertyException tpe 4:client.ThriftResourceGroupNotExistsException rgne ) - + void modifyResourceGroupProperties( 1:client.TInfo tinfo 2:security.TCredentials credentials @@ -538,8 +543,9 @@ service ManagerClientService { 1:client.ThriftSecurityException sec ) - oneway void event( + void processEvents( 1:client.TInfo tinfo 2:security.TCredentials credentials + 3:list events ) } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/EventCoordinator.java b/server/manager/src/main/java/org/apache/accumulo/manager/EventCoordinator.java index c1ada4b2371..9ab22e92262 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/EventCoordinator.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/EventCoordinator.java @@ -20,10 +20,12 @@ import java.util.Collection; import java.util.EnumMap; +import java.util.Iterator; import java.util.Map; import org.apache.accumulo.core.data.TableId; import org.apache.accumulo.core.dataImpl.KeyExtent; +import org.apache.accumulo.core.manager.thrift.TEvent; import org.apache.accumulo.core.metadata.schema.Ample; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,30 +66,61 @@ public static class Event { private final Ample.DataLevel level; private final KeyExtent extent; - Event(KeyExtent extent) { + public Event(KeyExtent extent) { this.scope = EventScope.TABLE_RANGE; this.level = Ample.DataLevel.of(extent.tableId()); this.extent = extent; } - Event(TableId tableId) { + public Event(TableId tableId) { this.scope = EventScope.TABLE; this.level = Ample.DataLevel.of(tableId); this.extent = new KeyExtent(tableId, null, null); } - Event(Ample.DataLevel level) { + public Event(Ample.DataLevel level) { this.scope = EventScope.DATA_LEVEL; this.level = level; this.extent = null; } - Event() { + public Event() { this.scope = EventScope.ALL; this.level = null; this.extent = null; } + public TEvent toThrift() { + switch (scope) { + case ALL: + return new TEvent(null, null); + case DATA_LEVEL: + return new TEvent(getLevel().toString(), null); + case TABLE: + case TABLE_RANGE: + return new TEvent(null, getExtent().toThrift()); + default: + throw new IllegalStateException("scope : " + scope); + } + } + + public static Event fromThrift(TEvent tEvent) { + if (tEvent.getLevel() == null && tEvent.getExtent() == null) { + return new Event(); + } else if (tEvent.getLevel() != null && tEvent.getExtent() == null) { + return new Event(Ample.DataLevel.valueOf(tEvent.getLevel())); + } else if (tEvent.getLevel() == null && tEvent.getExtent() != null) { + var extent = KeyExtent.fromThrift(tEvent.getExtent()); + if (extent.endRow() == null && extent.prevEndRow() == null) { + return new Event(extent.tableId()); + } else { + return new Event(extent); + } + } else { + throw new IllegalArgumentException("Illegal TEvent " + tEvent); + } + } + public EventScope getScope() { return scope; } @@ -106,6 +139,10 @@ public KeyExtent getExtent() { Preconditions.checkState(scope == EventScope.TABLE || scope == EventScope.TABLE_RANGE); return extent; } + + public String toString() { + return "{ scope:" + scope + ", level:" + level + ", extent:" + extent + " }"; + } } @Override @@ -132,6 +169,10 @@ public void event(KeyExtent extent, String msg, Object... args) { publish(new Event(extent)); } + public void events(Iterator events) { + events.forEachRemaining(this::publish); + } + @Override public void event(Collection extents, String msg, Object... args) { if (!extents.isEmpty()) { diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerClientServiceHandler.java b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerClientServiceHandler.java index 8a47f54aebd..a58ef5b1058 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerClientServiceHandler.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerClientServiceHandler.java @@ -67,6 +67,7 @@ import org.apache.accumulo.core.manager.thrift.ManagerGoalState; import org.apache.accumulo.core.manager.thrift.ManagerMonitorInfo; import org.apache.accumulo.core.manager.thrift.ManagerState; +import org.apache.accumulo.core.manager.thrift.TEvent; import org.apache.accumulo.core.manager.thrift.TTabletMergeability; import org.apache.accumulo.core.manager.thrift.TabletLoadState; import org.apache.accumulo.core.manager.thrift.ThriftPropertyException; @@ -806,13 +807,15 @@ public long getManagerTimeNanos(TInfo tinfo, TCredentials credentials) } @Override - public void event(TInfo tinfo, TCredentials credentials) throws TException { + public void processEvents(TInfo tinfo, TCredentials credentials, List tEvents) + throws TException { if (!security.canPerformSystemActions(credentials)) { throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED); } - manager.getEventCoordinator().event("External event"); + manager.getEventCoordinator().events(tEvents.stream().map(EventCoordinator.Event::fromThrift) + .peek(event -> log.trace("remote event : {}", event)).iterator()); } protected TableId getTableId(ClientContext context, String tableName) diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index a46db29cfee..b1978d9de54 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -46,7 +46,6 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Sets; import com.google.common.net.HostAndPort; -import com.google.common.util.concurrent.RateLimiter; /** * Partitions fate across manager assistant processes. This is done by assigning ranges of the fate @@ -189,8 +188,6 @@ public void notifySeeded(FateId fateId) { private class NotifyTask implements Runnable { - private final RateLimiter rateLimiter = RateLimiter.create(100); - @Override public void run() { while (!stop.get()) { @@ -204,15 +201,13 @@ public void run() { pendingNotifications.clear(); } - rateLimiter.acquire(); - for (var entry : copy.entrySet()) { HostAndPort address = entry.getKey(); Set partitions = entry.getValue(); FateWorkerService.Client client = ThriftUtil.getClient(ThriftClientTypes.FATE_WORKER, address, context); try { - log.debug("Notifying about seeding {} {}", address, partitions); + log.trace("Notifying about seeding {} {}", address, partitions); client.seeded(TraceUtil.traceInfo(), context.rpcCreds(), partitions.stream().map(FatePartition::toThrift).toList()); } finally { diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index b0d11a7c500..9bc578b1898 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -132,7 +132,12 @@ public boolean setPartitions(TInfo tinfo, TCredentials credentials, long updateI @Override public void seeded(TInfo tinfo, TCredentials credentials, List tpartitions) throws TException { - // TODO check the partitions + + if (!security.canPerformSystemActions(credentials)) { + throw new AccumuloSecurityException(credentials.getPrincipal(), + SecurityErrorCode.PERMISSION_DENIED).asThriftException(); + } + Fate localFate; synchronized (this) { localFate = fate; diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java index f1bc45ca9ff..72331c5c7c4 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java @@ -22,6 +22,7 @@ import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; import org.apache.accumulo.core.client.AccumuloException; import org.apache.accumulo.core.client.AccumuloSecurityException; @@ -37,7 +38,9 @@ import org.apache.accumulo.core.trace.TraceUtil; import org.apache.accumulo.core.util.threads.Threads; import org.apache.accumulo.core.util.time.SteadyTime; +import org.apache.accumulo.manager.EventCoordinator; import org.apache.accumulo.manager.EventPublisher; +import org.apache.accumulo.manager.EventQueue; import org.apache.accumulo.manager.split.SplitFileCache; import org.apache.accumulo.manager.tableOps.FateEnv; import org.apache.accumulo.server.ServerContext; @@ -45,10 +48,13 @@ import org.apache.accumulo.server.manager.LiveTServerSet; import org.apache.accumulo.server.tables.TableManager; import org.apache.thrift.TException; - -import com.google.common.util.concurrent.RateLimiter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class FateWorkerEnv implements FateEnv { + + private static final Logger log = LoggerFactory.getLogger(FateWorkerEnv.class); + private final ServerContext ctx; private final ExecutorService refreshPool; private final ExecutorService renamePool; @@ -57,35 +63,25 @@ public class FateWorkerEnv implements FateEnv { private final SplitFileCache splitCache; private final EventHandler eventHandler; - private final Object eventLockObj = new Object(); - private boolean eventQueued = false; - - private void queueEvent() { - synchronized (eventLockObj) { - eventQueued = true; - eventLockObj.notify(); - } - } + private final EventQueue queue = new EventQueue(); private class EventSender implements Runnable { - private final RateLimiter rateLimiter = RateLimiter.create(20); - @Override public void run() { + // TODO check for stop condition while (true) { try { - synchronized (eventLockObj) { - if (!eventQueued) { - eventLockObj.wait(); - } + var events = queue.poll(100, TimeUnit.MILLISECONDS); + if (events.isEmpty()) { + continue; } - rateLimiter.acquire(); + var tEvents = events.stream().map(EventCoordinator.Event::toThrift).toList(); var client = ThriftClientTypes.MANAGER.getConnection(ctx); try { if (client != null) { - client.event(TraceUtil.traceInfo(), ctx.rpcCreds()); + client.processEvents(TraceUtil.traceInfo(), ctx.rpcCreds(), tEvents); } } catch (TException e) { // TODO @@ -108,27 +104,34 @@ private class EventHandler implements EventPublisher { @Override public void event(String msg, Object... args) { - queueEvent(); + log.info(String.format(msg, args)); + queue.add(new EventCoordinator.Event()); } @Override public void event(Ample.DataLevel level, String msg, Object... args) { - queueEvent(); + log.info(String.format(msg, args)); + queue.add(new EventCoordinator.Event(level)); } @Override public void event(TableId tableId, String msg, Object... args) { - queueEvent(); + log.info(String.format(msg, args)); + queue.add(new EventCoordinator.Event(tableId)); } @Override public void event(KeyExtent extent, String msg, Object... args) { - queueEvent(); + log.debug(String.format(msg, args)); + queue.add(new EventCoordinator.Event(extent)); } @Override public void event(Collection extents, String msg, Object... args) { - queueEvent(); + if (!extents.isEmpty()) { + log.debug(String.format(msg, args)); + extents.forEach(extent -> queue.add(new EventCoordinator.Event(extent))); + } } } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/PreSplit.java b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/PreSplit.java index 9b1eaf6d682..ed6879eaffd 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/PreSplit.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/split/PreSplit.java @@ -107,7 +107,7 @@ public long isReady(FateId fateId, FateEnv env) throws Exception { // now that the operation id set, generate an event to unload the tablet or recover the // logs env.getEventPublisher().event(splitInfo.getOriginal(), - "Set operation id %s on tablet for split", fateId); + "Set operation id %s on tablet %s for split", fateId, splitInfo.getOriginal()); // the operation id was set, but a location is also set wait for it be unset return 1000; } diff --git a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java index c0457150846..36d14d4a65a 100644 --- a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java +++ b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java @@ -67,14 +67,14 @@ public void test() throws Exception { var splits = IntStream.range(1, 10).mapToObj(i -> String.format("%03d", i)).map(Text::new) .collect(Collectors.toCollection(TreeSet::new)); var tableOpFutures = new ArrayList>(); - for (int i = 0; i < 1; i++) { + for (int i = 0; i < 100; i++) { var table = "t" + i; // TODO seeing in the logs that fate operations for the same table are running on different // processes, however there is a 5 second delay because there is no notification mechanism // currently. // TODO its hard to find everything related to a table id in the logs, especially when the - // table id is like "b". Was tring to follow a single table across multiple manager workers + // table id is like "b". Was trying to follow a single table across multiple manager workers // processes. var tableOpsFuture = executor.submit(() -> { client.tableOperations().create(table); From ad14d56196274249b5cb0fbb03edbee78bb1226b Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Wed, 18 Feb 2026 00:10:53 +0000 Subject: [PATCH 21/38] WIP --- .../org/apache/accumulo/core/Constants.java | 2 +- .../core/clientImpl/ClientContext.java | 2 +- .../org/apache/accumulo/core/fate/Fate.java | 163 +------------- .../apache/accumulo/core/fate/FateClient.java | 205 ++++++++++++++++++ .../core/fate/zookeeper/MetaFateStore.java | 15 +- .../accumulo/core/lock/ServiceLockPaths.java | 16 +- core/src/main/thrift/fate-worker.thrift | 1 - .../server/init/ZooKeeperInitializer.java | 2 +- .../accumulo/manager/FateServiceHandler.java | 50 ++--- .../org/apache/accumulo/manager/Manager.java | 74 ++++--- .../manager/ManagerClientServiceHandler.java | 5 +- .../coordinator/CompactionCoordinator.java | 24 +- .../coordinator/DeadCompactionDetector.java | 20 +- .../accumulo/manager/fate/FateManager.java | 82 +++++-- .../manager/merge/FindMergeableRangeTask.java | 2 +- .../manager/metrics/ManagerMetrics.java | 13 +- .../metrics/fate/FateExecutorMetrics.java | 75 +++++++ .../manager/metrics/fate/FateMetrics.java | 30 +-- .../metrics/fate/meta/MetaFateMetrics.java | 8 +- .../metrics/fate/user/UserFateMetrics.java | 9 +- .../accumulo/manager/split/Splitter.java | 2 +- .../compaction/CompactionCoordinatorTest.java | 9 +- 22 files changed, 461 insertions(+), 348 deletions(-) create mode 100644 core/src/main/java/org/apache/accumulo/core/fate/FateClient.java create mode 100644 server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateExecutorMetrics.java diff --git a/core/src/main/java/org/apache/accumulo/core/Constants.java b/core/src/main/java/org/apache/accumulo/core/Constants.java index ea44de8676b..eb8ba1059eb 100644 --- a/core/src/main/java/org/apache/accumulo/core/Constants.java +++ b/core/src/main/java/org/apache/accumulo/core/Constants.java @@ -49,7 +49,7 @@ public class Constants { public static final String ZMANAGERS = "/managers"; public static final String ZMANAGER_LOCK = ZMANAGERS + "/lock"; - public static final String ZMANAGER_WORKER_LOCK = ZMANAGERS + "/workers"; + public static final String ZMANAGER_ASSISTANT_LOCK = ZMANAGERS + "/assistants"; public static final String ZMANAGER_GOAL_STATE = ZMANAGERS + "/goal_state"; public static final String ZMANAGER_TICK = ZMANAGERS + "/tick"; diff --git a/core/src/main/java/org/apache/accumulo/core/clientImpl/ClientContext.java b/core/src/main/java/org/apache/accumulo/core/clientImpl/ClientContext.java index efd4ea3ea08..ac75c7dc65a 100644 --- a/core/src/main/java/org/apache/accumulo/core/clientImpl/ClientContext.java +++ b/core/src/main/java/org/apache/accumulo/core/clientImpl/ClientContext.java @@ -1292,7 +1292,7 @@ private static Set createPersistentWatcherPaths() { Constants.ZMANAGER_LOCK, Constants.ZMINI_LOCK, Constants.ZMONITOR_LOCK, Constants.ZNAMESPACES, Constants.ZRECOVERY, Constants.ZSSERVERS, Constants.ZTABLES, Constants.ZTSERVERS, Constants.ZUSERS, RootTable.ZROOT_TABLET, Constants.ZTEST_LOCK, - Constants.ZMANAGER_WORKER_LOCK, Constants.ZRESOURCEGROUPS)) { + Constants.ZMANAGER_ASSISTANT_LOCK, Constants.ZRESOURCEGROUPS)) { pathsToWatch.add(path); } return pathsToWatch; diff --git a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java index 4c245f8255f..166c2abfa0d 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java @@ -19,12 +19,6 @@ package org.apache.accumulo.core.fate; import static java.util.concurrent.TimeUnit.SECONDS; -import static org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus.FAILED; -import static org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus.FAILED_IN_PROGRESS; -import static org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus.NEW; -import static org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus.SUBMITTED; -import static org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus.SUCCESSFUL; -import static org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus.UNKNOWN; import static org.apache.accumulo.core.util.threads.ThreadPoolNames.META_DEAD_RESERVATION_CLEANER_POOL; import static org.apache.accumulo.core.util.threads.ThreadPoolNames.USER_DEAD_RESERVATION_CLEANER_POOL; @@ -37,7 +31,6 @@ import java.util.HashSet; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.Set; import java.util.TreeSet; import java.util.concurrent.ExecutorService; @@ -47,23 +40,15 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; -import java.util.stream.Stream; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.conf.Property; -import org.apache.accumulo.core.fate.FateStore.FateTxStore; -import org.apache.accumulo.core.fate.FateStore.Seeder; -import org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus; import org.apache.accumulo.core.logging.FateLogger; import org.apache.accumulo.core.manager.thrift.TFateOperation; -import org.apache.accumulo.core.util.UtilWaitThread; import org.apache.accumulo.core.util.threads.ThreadPools; import org.apache.hadoop.util.Sets; -import org.apache.thrift.TApplicationException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -78,16 +63,15 @@ */ @SuppressFBWarnings(value = "CT_CONSTRUCTOR_THROW", justification = "Constructor validation is required for proper initialization") -public class Fate { +public class Fate extends FateClient { // TODO remove extension of FateClient - private static final Logger log = LoggerFactory.getLogger(Fate.class); + static final Logger log = LoggerFactory.getLogger(Fate.class); private final FateStore store; private final ScheduledFuture fatePoolsWatcherFuture; private final AtomicInteger needMoreThreadsWarnCount = new AtomicInteger(0); private final ExecutorService deadResCleanerExecutor; - private static final EnumSet FINISHED_STATES = EnumSet.of(FAILED, SUCCESSFUL, UNKNOWN); public static final Duration INITIAL_DELAY = Duration.ofSeconds(3); private static final Duration DEAD_RES_CLEANUP_DELAY = Duration.ofMinutes(3); public static final Duration POOL_WATCHER_DELAY = Duration.ofSeconds(30); @@ -274,6 +258,7 @@ public void run() { public Fate(T environment, FateStore store, boolean runDeadResCleaner, Function,String> toLogStrFunc, AccumuloConfiguration conf, ScheduledThreadPoolExecutor genSchedExecutor) { + super(store, toLogStrFunc); this.store = FateLogger.wrap(store, toLogStrFunc, false); fatePoolsWatcherFuture = @@ -283,8 +268,6 @@ public Fate(T environment, FateStore store, boolean runDeadResCleaner, ScheduledExecutorService deadResCleanerExecutor = null; if (runDeadResCleaner) { - // TODO make this use partitions - // Create a dead reservation cleaner for this store that will periodically clean up // reservations held by dead processes, if they exist. deadResCleanerExecutor = ThreadPools.getServerThreadPools().createScheduledExecutorService(1, @@ -396,44 +379,6 @@ public AtomicInteger getNeedMoreThreadsWarnCount() { return needMoreThreadsWarnCount; } - // get a transaction id back to the requester before doing any work - public FateId startTransaction() { - return store.create(); - } - - private AtomicReference> seedingConsumer = new AtomicReference<>(fid -> {}); - - // TODO move seeding and waiting operation into their own class, the primary manager will not need - // to create a user fate object. Fate could extend this class to ease the change. - - public void setSeedingConsumer(Consumer seedingConsumer) { - this.seedingConsumer.set(seedingConsumer); - } - - public Seeder beginSeeding() { - // TODO pass seeding consumer - return store.beginSeeding(); - } - - public void seedTransaction(FateOperation fateOp, FateKey fateKey, Repo repo, - boolean autoCleanUp) { - try (var seeder = store.beginSeeding()) { - seeder.attemptToSeedTransaction(fateOp, fateKey, repo, autoCleanUp) - .thenAccept(optionalFatId -> { - optionalFatId.ifPresent(seedingConsumer.get()); - }); - } - } - - // start work in the transaction.. it is safe to call this - // multiple times for a transaction... but it will only seed once - public void seedTransaction(FateOperation fateOp, FateId fateId, Repo repo, - boolean autoCleanUp, String goalMessage) { - log.info("[{}] Seeding {} {} {}", store.type(), fateOp, fateId, goalMessage); - store.seedTransaction(fateOp, fateId, repo, autoCleanUp); - seedingConsumer.get().accept(fateId); - } - public void seeded(Set partitions) { synchronized (fateExecutors) { if (Sets.intersection(currentPartitions, partitions).isEmpty()) { @@ -445,108 +390,6 @@ public void seeded(Set partitions) { store.seeded(); } - // check on the transaction - public TStatus waitForCompletion(FateId fateId) { - return store.read(fateId).waitForStatusChange(FINISHED_STATES); - } - - /** - * Attempts to cancel a running Fate transaction - * - * @param fateId fate transaction id - * @return true if transaction transitioned to a failed state or already in a completed state, - * false otherwise - */ - public boolean cancel(FateId fateId) { - for (int retries = 0; retries < 5; retries++) { - Optional> optionalTxStore = store.tryReserve(fateId); - if (optionalTxStore.isPresent()) { - var txStore = optionalTxStore.orElseThrow(); - try { - TStatus status = txStore.getStatus(); - log.info("[{}] status is: {}", store.type(), status); - if (status == NEW || status == SUBMITTED) { - txStore.setTransactionInfo(TxInfo.EXCEPTION, new TApplicationException( - TApplicationException.INTERNAL_ERROR, "Fate transaction cancelled by user")); - txStore.setStatus(FAILED_IN_PROGRESS); - log.info( - "[{}] Updated status for {} to FAILED_IN_PROGRESS because it was cancelled by user", - store.type(), fateId); - return true; - } else { - log.info("[{}] {} cancelled by user but already in progress or finished state", - store.type(), fateId); - return false; - } - } finally { - txStore.unreserve(Duration.ZERO); - } - } else { - // reserved, lets retry. - UtilWaitThread.sleep(500); - } - } - log.info("[{}] Unable to reserve transaction {} to cancel it", store.type(), fateId); - return false; - } - - // resource cleanup - public void delete(FateId fateId) { - FateTxStore txStore = store.reserve(fateId); - try { - switch (txStore.getStatus()) { - case NEW: - case SUBMITTED: - case FAILED: - case SUCCESSFUL: - txStore.delete(); - break; - case FAILED_IN_PROGRESS: - case IN_PROGRESS: - throw new IllegalStateException("Can not delete in progress transaction " + fateId); - case UNKNOWN: - // nothing to do, it does not exist - break; - } - } finally { - txStore.unreserve(Duration.ZERO); - } - } - - public String getReturn(FateId fateId) { - FateTxStore txStore = store.reserve(fateId); - try { - if (txStore.getStatus() != SUCCESSFUL) { - throw new IllegalStateException( - "Tried to get exception when transaction " + fateId + " not in successful state"); - } - return (String) txStore.getTransactionInfo(TxInfo.RETURN_VALUE); - } finally { - txStore.unreserve(Duration.ZERO); - } - } - - // get reportable failures - public Exception getException(FateId fateId) { - FateTxStore txStore = store.reserve(fateId); - try { - if (txStore.getStatus() != FAILED) { - throw new IllegalStateException( - "Tried to get exception when transaction " + fateId + " not in failed state"); - } - return (Exception) txStore.getTransactionInfo(TxInfo.EXCEPTION); - } finally { - txStore.unreserve(Duration.ZERO); - } - } - - /** - * Lists transctions for a given fate key type. - */ - public Stream list(FateKey.FateKeyType type) { - return store.list(type); - } - /** * Initiates shutdown of background threads that run fate operations and cleanup fate data and * optionally waits on them. Leaves the fate object in a state where it can still update and read diff --git a/core/src/main/java/org/apache/accumulo/core/fate/FateClient.java b/core/src/main/java/org/apache/accumulo/core/fate/FateClient.java new file mode 100644 index 00000000000..3e87c3cfb09 --- /dev/null +++ b/core/src/main/java/org/apache/accumulo/core/fate/FateClient.java @@ -0,0 +1,205 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.accumulo.core.fate; + +import static org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus.FAILED; +import static org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus.FAILED_IN_PROGRESS; +import static org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus.NEW; +import static org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus.SUBMITTED; +import static org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus.SUCCESSFUL; +import static org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus.UNKNOWN; + +import java.time.Duration; +import java.util.EnumSet; +import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.stream.Stream; + +import org.apache.accumulo.core.logging.FateLogger; +import org.apache.accumulo.core.util.UtilWaitThread; +import org.apache.thrift.TApplicationException; + +/** + * Supports initiating and checking status of fate operations. + * + */ +public class FateClient { + + private final FateStore store; + + private static final EnumSet FINISHED_STATES = + EnumSet.of(FAILED, SUCCESSFUL, UNKNOWN); + + private AtomicReference> seedingConsumer = new AtomicReference<>(fid -> {}); + + public FateClient(FateStore store, Function,String> toLogStrFunc) { + this.store = FateLogger.wrap(store, toLogStrFunc, false); + ; + } + + // get a transaction id back to the requester before doing any work + public FateId startTransaction() { + return store.create(); + } + + public FateStore.Seeder beginSeeding() { + var seeder = store.beginSeeding(); + return new FateStore.Seeder() { + @Override + public CompletableFuture> attemptToSeedTransaction(Fate.FateOperation fateOp, + FateKey fateKey, Repo repo, boolean autoCleanUp) { + var cfuture = seeder.attemptToSeedTransaction(fateOp, fateKey, repo, autoCleanUp); + return cfuture.thenApply(optional -> { + optional.ifPresent(seedingConsumer.get()); + return optional; + }); + } + + @Override + public void close() { + seeder.close(); + } + }; + } + + public void seedTransaction(Fate.FateOperation fateOp, FateKey fateKey, Repo repo, + boolean autoCleanUp) { + try (var seeder = store.beginSeeding()) { + seeder.attemptToSeedTransaction(fateOp, fateKey, repo, autoCleanUp); + } + } + + // start work in the transaction.. it is safe to call this + // multiple times for a transaction... but it will only seed once + public void seedTransaction(Fate.FateOperation fateOp, FateId fateId, Repo repo, + boolean autoCleanUp, String goalMessage) { + Fate.log.info("[{}] Seeding {} {} {}", store.type(), fateOp, fateId, goalMessage); + store.seedTransaction(fateOp, fateId, repo, autoCleanUp); + seedingConsumer.get().accept(fateId); + } + + // check on the transaction + public ReadOnlyFateStore.TStatus waitForCompletion(FateId fateId) { + return store.read(fateId).waitForStatusChange(FINISHED_STATES); + } + + /** + * Attempts to cancel a running Fate transaction + * + * @param fateId fate transaction id + * @return true if transaction transitioned to a failed state or already in a completed state, + * false otherwise + */ + public boolean cancel(FateId fateId) { + for (int retries = 0; retries < 5; retries++) { + Optional> optionalTxStore = store.tryReserve(fateId); + if (optionalTxStore.isPresent()) { + var txStore = optionalTxStore.orElseThrow(); + try { + ReadOnlyFateStore.TStatus status = txStore.getStatus(); + Fate.log.info("[{}] status is: {}", store.type(), status); + if (status == NEW || status == SUBMITTED) { + txStore.setTransactionInfo(Fate.TxInfo.EXCEPTION, new TApplicationException( + TApplicationException.INTERNAL_ERROR, "Fate transaction cancelled by user")); + txStore.setStatus(FAILED_IN_PROGRESS); + Fate.log.info( + "[{}] Updated status for {} to FAILED_IN_PROGRESS because it was cancelled by user", + store.type(), fateId); + return true; + } else { + Fate.log.info("[{}] {} cancelled by user but already in progress or finished state", + store.type(), fateId); + return false; + } + } finally { + txStore.unreserve(Duration.ZERO); + } + } else { + // reserved, lets retry. + UtilWaitThread.sleep(500); + } + } + Fate.log.info("[{}] Unable to reserve transaction {} to cancel it", store.type(), fateId); + return false; + } + + // resource cleanup + public void delete(FateId fateId) { + FateStore.FateTxStore txStore = store.reserve(fateId); + try { + switch (txStore.getStatus()) { + case NEW: + case SUBMITTED: + case FAILED: + case SUCCESSFUL: + txStore.delete(); + break; + case FAILED_IN_PROGRESS: + case IN_PROGRESS: + throw new IllegalStateException("Can not delete in progress transaction " + fateId); + case UNKNOWN: + // nothing to do, it does not exist + break; + } + } finally { + txStore.unreserve(Duration.ZERO); + } + } + + public String getReturn(FateId fateId) { + FateStore.FateTxStore txStore = store.reserve(fateId); + try { + if (txStore.getStatus() != SUCCESSFUL) { + throw new IllegalStateException( + "Tried to get exception when transaction " + fateId + " not in successful state"); + } + return (String) txStore.getTransactionInfo(Fate.TxInfo.RETURN_VALUE); + } finally { + txStore.unreserve(Duration.ZERO); + } + } + + // get reportable failures + public Exception getException(FateId fateId) { + FateStore.FateTxStore txStore = store.reserve(fateId); + try { + if (txStore.getStatus() != FAILED) { + throw new IllegalStateException( + "Tried to get exception when transaction " + fateId + " not in failed state"); + } + return (Exception) txStore.getTransactionInfo(Fate.TxInfo.EXCEPTION); + } finally { + txStore.unreserve(Duration.ZERO); + } + } + + /** + * Lists transctions for a given fate key type. + */ + public Stream list(FateKey.FateKeyType type) { + return store.list(type); + } + + public void setSeedingConsumer(Consumer seedingConsumer) { + this.seedingConsumer.set(seedingConsumer); + } +} diff --git a/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/MetaFateStore.java b/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/MetaFateStore.java index ae85e3f7179..a7cf3236af7 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/MetaFateStore.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/zookeeper/MetaFateStore.java @@ -72,6 +72,9 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.base.Suppliers; +import com.google.common.collect.Range; +import com.google.common.collect.RangeSet; +import com.google.common.collect.TreeRangeSet; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; @@ -620,13 +623,11 @@ public Optional getFateOperation() { @Override protected Stream getTransactions(Set partitions, EnumSet statuses) { - return getTransactions(statuses).filter(fis -> { - // TODO this could be inefficient - for (var p : partitions) { - return p.contains(fis.getFateId()); - } - return false; - }); + + RangeSet rangeSet = TreeRangeSet.create(); + partitions.forEach(partition -> rangeSet.add(Range.closed(partition.start(), partition.end()))); + + return getTransactions(statuses).filter(fis -> rangeSet.contains(fis.getFateId())); } @Override diff --git a/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockPaths.java b/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockPaths.java index cbf826ae43c..d07fe8cb770 100644 --- a/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockPaths.java +++ b/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockPaths.java @@ -78,7 +78,7 @@ private ServiceLockPath(String type) { || this.type.equals(Constants.ZMANAGER_LOCK) || this.type.equals(Constants.ZMONITOR_LOCK) || this.type.equals(Constants.ZTABLE_LOCKS) || this.type.equals(Constants.ZADMIN_LOCK) || this.type.equals(Constants.ZTEST_LOCK) - || this.type.equals(Constants.ZMANAGER_WORKER_LOCK), "Unsupported type: " + type); + || this.type.equals(Constants.ZMANAGER_ASSISTANT_LOCK), "Unsupported type: " + type); // These server types support only one active instance, so they use a lock at // a known path, not the server's address. this.resourceGroup = null; @@ -107,7 +107,7 @@ private ServiceLockPath(String type, ResourceGroupId resourceGroup, HostAndPort Preconditions.checkArgument( this.type.equals(Constants.ZCOMPACTORS) || this.type.equals(Constants.ZSSERVERS) || this.type.equals(Constants.ZTSERVERS) || this.type.equals(Constants.ZDEADTSERVERS) - || this.type.equals(Constants.ZMANAGER_WORKER_LOCK), + || this.type.equals(Constants.ZMANAGER_ASSISTANT_LOCK), "Unsupported type: " + type); this.resourceGroup = requireNonNull(resourceGroup); this.server = requireNonNull(server).toString(); @@ -172,8 +172,8 @@ private static String determineServerType(final String path) { return Constants.ZGC_LOCK; } else if (pathStartsWith(path, Constants.ZMANAGER_LOCK)) { return Constants.ZMANAGER_LOCK; - } else if (pathStartsWith(path, Constants.ZMANAGER_WORKER_LOCK)) { - return Constants.ZMANAGER_WORKER_LOCK; + } else if (pathStartsWith(path, Constants.ZMANAGER_ASSISTANT_LOCK)) { + return Constants.ZMANAGER_ASSISTANT_LOCK; } else if (pathStartsWith(path, Constants.ZMONITOR_LOCK)) { return Constants.ZMONITOR_LOCK; } else if (pathStartsWith(path, Constants.ZMINI_LOCK)) { @@ -223,7 +223,7 @@ public static ServiceLockPath parse(Optional serverType, String path) { return switch (type) { case Constants.ZMINI_LOCK -> new ServiceLockPath(type, server); case Constants.ZCOMPACTORS, Constants.ZSSERVERS, Constants.ZTSERVERS, - Constants.ZDEADTSERVERS, Constants.ZMANAGER_WORKER_LOCK -> + Constants.ZDEADTSERVERS, Constants.ZMANAGER_ASSISTANT_LOCK -> new ServiceLockPath(type, ResourceGroupId.of(resourceGroup), HostAndPort.fromString(server)); default -> @@ -244,7 +244,7 @@ public ServiceLockPath createManagerPath() { public ServiceLockPath createManagerWorkerPath(ResourceGroupId resourceGroup, HostAndPort advertiseAddress) { - return new ServiceLockPath(Constants.ZMANAGER_WORKER_LOCK, resourceGroup, advertiseAddress); + return new ServiceLockPath(Constants.ZMANAGER_ASSISTANT_LOCK, resourceGroup, advertiseAddress); } public ServiceLockPath createMiniPath(String miniUUID) { @@ -298,7 +298,7 @@ public Set getCompactor(ResourceGroupPredicate resourceGroupPre public Set getManagerWorker(ResourceGroupPredicate resourceGroupPredicate, AddressSelector address, boolean withLock) { - return get(Constants.ZMANAGER_WORKER_LOCK, resourceGroupPredicate, address, withLock); + return get(Constants.ZMANAGER_ASSISTANT_LOCK, resourceGroupPredicate, address, withLock); } /** @@ -446,7 +446,7 @@ private Set get(final String serverType, } } else if (serverType.equals(Constants.ZCOMPACTORS) || serverType.equals(Constants.ZSSERVERS) || serverType.equals(Constants.ZTSERVERS) || serverType.equals(Constants.ZDEADTSERVERS) - || serverType.equals(Constants.ZMANAGER_WORKER_LOCK)) { + || serverType.equals(Constants.ZMANAGER_ASSISTANT_LOCK)) { final List resourceGroups = zooCache.getChildren(typePath); for (final String group : resourceGroups) { if (resourceGroupPredicate.test(ResourceGroupId.of(group))) { diff --git a/core/src/main/thrift/fate-worker.thrift b/core/src/main/thrift/fate-worker.thrift index 8538d7d3b26..01e1475d688 100644 --- a/core/src/main/thrift/fate-worker.thrift +++ b/core/src/main/thrift/fate-worker.thrift @@ -50,7 +50,6 @@ service FateWorkerService { 1:client.ThriftSecurityException sec ) - // TODO drop oneway and rate limiter void seeded( 1:client.TInfo tinfo, 2:security.TCredentials credentials, diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java b/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java index 2477b65b13a..3e29f2135d9 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java +++ b/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java @@ -177,7 +177,7 @@ void initialize(final ServerContext context, final String rootTabletDirName, zrwChroot.putPersistentData(Constants.ZCOMPACTIONS, EMPTY_BYTE_ARRAY, ZooUtil.NodeExistsPolicy.FAIL); // TODO would need to create in upgrade - zrwChroot.putPersistentData(Constants.ZMANAGER_WORKER_LOCK, EMPTY_BYTE_ARRAY, + zrwChroot.putPersistentData(Constants.ZMANAGER_ASSISTANT_LOCK, EMPTY_BYTE_ARRAY, ZooUtil.NodeExistsPolicy.FAIL); } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/FateServiceHandler.java b/server/manager/src/main/java/org/apache/accumulo/manager/FateServiceHandler.java index 6c31e9174b7..615ea1a0d22 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/FateServiceHandler.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/FateServiceHandler.java @@ -132,7 +132,7 @@ public TFateId beginFateOperation(TInfo tinfo, TCredentials credentials, TFateIn throws ThriftSecurityException { authenticate(credentials); return new TFateId(type, - manager.fate(FateInstanceType.fromThrift(type)).startTransaction().getTxUUIDStr()); + manager.fateClient(FateInstanceType.fromThrift(type)).startTransaction().getTxUUIDStr()); } @Override @@ -157,7 +157,7 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat } goalMessage += "Create " + namespace + " namespace."; - manager.fate(type).seedTransaction(op, fateId, + manager.fateClient(type).seedTransaction(op, fateId, new TraceRepo<>(new CreateNamespace(c.getPrincipal(), namespace, options)), autoCleanup, goalMessage); break; @@ -176,7 +176,7 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat } goalMessage += "Rename " + oldName + " namespace to " + newName; - manager.fate(type).seedTransaction(op, fateId, + manager.fateClient(type).seedTransaction(op, fateId, new TraceRepo<>(new RenameNamespace(namespaceId, oldName, newName)), autoCleanup, goalMessage); break; @@ -194,7 +194,7 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat } goalMessage += "Delete namespace Id: " + namespaceId; - manager.fate(type).seedTransaction(op, fateId, + manager.fateClient(type).seedTransaction(op, fateId, new TraceRepo<>(new DeleteNamespace(namespaceId)), autoCleanup, goalMessage); break; } @@ -252,7 +252,7 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat goalMessage += "Create table " + tableName + " " + initialTableState + " with " + splitCount + " splits and initial tabletAvailability of " + initialTabletAvailability; - manager.fate(type).seedTransaction(op, fateId, + manager.fateClient(type).seedTransaction(op, fateId, new TraceRepo<>(new CreateTable(c.getPrincipal(), tableName, timeType, options, splitsPath, splitCount, splitsDirsPath, initialTableState, // Set the default tablet to be auto-mergeable with other tablets if it is split @@ -288,7 +288,7 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat goalMessage += "Rename table " + oldTableName + "(" + tableId + ") to " + oldTableName; try { - manager.fate(type).seedTransaction(op, fateId, + manager.fateClient(type).seedTransaction(op, fateId, new TraceRepo<>(new RenameTable(namespaceId, tableId, oldTableName, newTableName)), autoCleanup, goalMessage); } catch (NamespaceNotFoundException e) { @@ -370,7 +370,7 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat goalMessage += " and keep offline."; } - manager.fate(type).seedTransaction(op, fateId, + manager.fateClient(type).seedTransaction(op, fateId, new TraceRepo<>(new CloneTable(c.getPrincipal(), srcNamespaceId, srcTableId, namespaceId, tableName, propertiesToSet, propertiesToExclude, keepOffline)), autoCleanup, goalMessage); @@ -400,7 +400,7 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat } goalMessage += "Delete table " + tableName + "(" + tableId + ")"; - manager.fate(type).seedTransaction(op, fateId, + manager.fateClient(type).seedTransaction(op, fateId, new TraceRepo<>(new PreDeleteTable(namespaceId, tableId)), autoCleanup, goalMessage); break; } @@ -427,7 +427,7 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat goalMessage += "Online table " + tableId; final EnumSet expectedCurrStates = EnumSet.of(TableState.ONLINE, TableState.OFFLINE); - manager.fate(type).seedTransaction(op, fateId, + manager.fateClient(type).seedTransaction(op, fateId, new TraceRepo<>( new ChangeTableState(namespaceId, tableId, tableOp, expectedCurrStates)), autoCleanup, goalMessage); @@ -456,7 +456,7 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat goalMessage += "Offline table " + tableId; final EnumSet expectedCurrStates = EnumSet.of(TableState.ONLINE, TableState.OFFLINE); - manager.fate(type).seedTransaction(op, fateId, + manager.fateClient(type).seedTransaction(op, fateId, new TraceRepo<>( new ChangeTableState(namespaceId, tableId, tableOp, expectedCurrStates)), autoCleanup, goalMessage); @@ -492,7 +492,7 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat startRowStr, endRowStr); goalMessage += "Merge table " + tableName + "(" + tableId + ") splits from " + startRowStr + " to " + endRowStr; - manager.fate(type).seedTransaction(op, fateId, new TraceRepo<>( + manager.fateClient(type).seedTransaction(op, fateId, new TraceRepo<>( new TableRangeOp(MergeInfo.Operation.MERGE, namespaceId, tableId, startRow, endRow)), autoCleanup, goalMessage); break; @@ -524,7 +524,7 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat goalMessage += "Delete table " + tableName + "(" + tableId + ") range " + startRow + " to " + endRow; - manager.fate(type).seedTransaction(op, fateId, new TraceRepo<>( + manager.fateClient(type).seedTransaction(op, fateId, new TraceRepo<>( new TableRangeOp(MergeInfo.Operation.DELETE, namespaceId, tableId, startRow, endRow)), autoCleanup, goalMessage); break; @@ -550,7 +550,7 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat } goalMessage += "Compact table (" + tableId + ") with config " + compactionConfig; - manager.fate(type).seedTransaction(op, fateId, + manager.fateClient(type).seedTransaction(op, fateId, new TraceRepo<>(new CompactRange(namespaceId, tableId, compactionConfig)), autoCleanup, goalMessage); break; @@ -574,7 +574,7 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat } goalMessage += "Cancel compaction of table (" + tableId + ")"; - manager.fate(type).seedTransaction(op, fateId, + manager.fateClient(type).seedTransaction(op, fateId, new TraceRepo<>(new CancelCompactions(namespaceId, tableId)), autoCleanup, goalMessage); break; } @@ -609,7 +609,7 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat } goalMessage += "Import table with new name: " + tableName + " from " + exportDirs; - manager.fate(type) + manager.fateClient(type) .seedTransaction(op, fateId, new TraceRepo<>(new ImportTable(c.getPrincipal(), tableName, exportDirs, namespaceId, keepMappings, keepOffline)), autoCleanup, goalMessage); @@ -639,7 +639,7 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat } goalMessage += "Export table " + tableName + "(" + tableId + ") to " + exportDir; - manager.fate(type).seedTransaction(op, fateId, + manager.fateClient(type).seedTransaction(op, fateId, new TraceRepo<>(new ExportTable(namespaceId, tableName, tableId, exportDir)), autoCleanup, goalMessage); break; @@ -676,7 +676,7 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat manager.updateBulkImportStatus(dir, BulkImportState.INITIAL); goalMessage += "Bulk import (v2) " + dir + " to " + tableName + "(" + tableId + ")"; - manager.fate(type).seedTransaction(op, fateId, + manager.fateClient(type).seedTransaction(op, fateId, new TraceRepo<>(new ComputeBulkRange(tableId, dir, setTime)), autoCleanup, goalMessage); break; } @@ -720,7 +720,7 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat goalMessage += "Set availability for table: " + tableName + "(" + tableId + ") range: " + tRange + " to: " + tabletAvailability.name(); - manager.fate(type).seedTransaction(op, fateId, + manager.fateClient(type).seedTransaction(op, fateId, new TraceRepo<>(new LockTable(tableId, namespaceId, tRange, tabletAvailability)), autoCleanup, goalMessage); break; @@ -794,8 +794,8 @@ public void executeFateOperation(TInfo tinfo, TCredentials c, TFateId opid, TFat } goalMessage = "Splitting " + extent + " for user into " + (splits.size() + 1) + " tablets"; - manager.fate(type).seedTransaction(op, fateId, new PreSplit(extent, splits), autoCleanup, - goalMessage); + manager.fateClient(type).seedTransaction(op, fateId, new PreSplit(extent, splits), + autoCleanup, goalMessage); break; } default: @@ -847,9 +847,9 @@ public String waitForFateOperation(TInfo tinfo, TCredentials credentials, TFateI FateId fateId = FateId.fromThrift(opid); FateInstanceType type = fateId.getType(); - TStatus status = manager.fate(type).waitForCompletion(fateId); + TStatus status = manager.fateClient(type).waitForCompletion(fateId); if (status == TStatus.FAILED) { - Exception e = manager.fate(type).getException(fateId); + Exception e = manager.fateClient(type).getException(fateId); if (e instanceof ThriftTableOperationException) { throw (ThriftTableOperationException) e; } else if (e instanceof ThriftSecurityException) { @@ -861,7 +861,7 @@ public String waitForFateOperation(TInfo tinfo, TCredentials credentials, TFateI } } - String ret = manager.fate(type).getReturn(fateId); + String ret = manager.fateClient(type).getReturn(fateId); if (ret == null) { ret = ""; // thrift does not like returning null } @@ -873,7 +873,7 @@ public void finishFateOperation(TInfo tinfo, TCredentials credentials, TFateId o throws ThriftSecurityException { authenticate(credentials); FateId fateId = FateId.fromThrift(opid); - manager.fate(fateId.getType()).delete(fateId); + manager.fateClient(fateId.getType()).delete(fateId); } protected void authenticate(TCredentials credentials) throws ThriftSecurityException { @@ -987,6 +987,6 @@ public boolean cancelFateOperation(TInfo tinfo, TCredentials credentials, TFateI SecurityErrorCode.PERMISSION_DENIED); } - return manager.fate(fateId.getType()).cancel(fateId); + return manager.fateClient(fateId.getType()).cancel(fateId); } } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java index 6d424329ebd..6b6396a4408 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java @@ -68,6 +68,7 @@ import org.apache.accumulo.core.dataImpl.KeyExtent; import org.apache.accumulo.core.fate.Fate; import org.apache.accumulo.core.fate.FateCleaner; +import org.apache.accumulo.core.fate.FateClient; import org.apache.accumulo.core.fate.FateId; import org.apache.accumulo.core.fate.FateInstanceType; import org.apache.accumulo.core.fate.FatePartition; @@ -206,8 +207,11 @@ public class Manager extends AbstractServer implements LiveTServerSet.Listener, // should already have been set; ConcurrentHashMap will guarantee that all threads will see // the initialized fate references after the latch is ready private final CountDownLatch fateReadyLatch = new CountDownLatch(1); + private final AtomicReference>> fateClients = + new AtomicReference<>(); private final AtomicReference>> fateRefs = new AtomicReference<>(); + private volatile FateManager fateManager; private final ManagerMetrics managerMetrics = new ManagerMetrics(); @@ -296,17 +300,7 @@ public boolean stillManager() { return getManagerState() != ManagerState.STOP; } - /** - * Retrieve the Fate object, blocking until it is ready. This could cause problems if Fate - * operations are attempted to be used prior to the Manager being ready for them. If these - * operations are triggered by a client side request from a tserver or client, it should be safe - * to wait to handle those until Fate is ready, but if it occurs during an upgrade, or some other - * time in the Manager before Fate is started, that may result in a deadlock and will need to be - * fixed. - * - * @return the Fate object, only after the fate components are running and ready - */ - public Fate fate(FateInstanceType type) { + private void waitForFate() { try { // block up to 30 seconds until it's ready; if it's still not ready, introduce some logging if (!fateReadyLatch.await(30, SECONDS)) { @@ -327,7 +321,28 @@ public Fate fate(FateInstanceType type) { Thread.currentThread().interrupt(); throw new IllegalStateException("Thread was interrupted; cannot proceed"); } - return getFateRefs().get(type); + } + + /** + * Retrieve the Fate object, blocking until it is ready. This could cause problems if Fate + * operations are attempted to be used prior to the Manager being ready for them. If these + * operations are triggered by a client side request from a tserver or client, it should be safe + * to wait to handle those until Fate is ready, but if it occurs during an upgrade, or some other + * time in the Manager before Fate is started, that may result in a deadlock and will need to be + * fixed. + * + * @return the Fate object, only after the fate components are running and ready + */ + public Fate fate(FateInstanceType type) { + waitForFate(); + var fate = Objects.requireNonNull(fateRefs.get(), "fateRefs is not set yet").get(type); + return Objects.requireNonNull(fate, () -> "fate type " + type + " is not present"); + } + + public FateClient fateClient(FateInstanceType type) { + waitForFate(); + var client = Objects.requireNonNull(fateClients.get(), "fateClients is not set yet").get(type); + return Objects.requireNonNull(client, () -> "fate client type " + type + " is not present"); } static final boolean X = true; @@ -696,9 +711,7 @@ public void run() { case CLEAN_STOP: switch (getManagerState()) { case NORMAL: - // USER fate stores its data in a user table and its operations may interact with - // all tables, need to completely shut it down before unloading user tablets - fate(FateInstanceType.USER).shutdown(1, MINUTES); + fateManager.stop(); setManagerState(ManagerState.SAFE_MODE); break; case SAFE_MODE: { @@ -930,7 +943,7 @@ public void run() { // Start the Manager's Fate Service fateServiceHandler = new FateServiceHandler(this); managerClientHandler = new ManagerClientServiceHandler(this); - compactionCoordinator = new CompactionCoordinator(this, fateRefs); + compactionCoordinator = new CompactionCoordinator(this, this::fateClient); var processor = ThriftProcessorTypes.getManagerTProcessor(this, fateServiceHandler, compactionCoordinator.getThriftService(), managerClientHandler, getContext()); @@ -1138,24 +1151,28 @@ boolean canSuspendTablets() { lock -> ServiceLock.isLockHeld(context.getZooCache(), lock); var metaInstance = initializeFateInstance(context, new MetaFateStore<>(context.getZooSession(), managerLock.getLockID(), isLockHeld)); - var userInstance = initializeFateInstance(context, new UserFateStore<>(context, - SystemTables.FATE.tableName(), managerLock.getLockID(), isLockHeld)); + var userFateClient = + new FateClient(new UserFateStore<>(context, SystemTables.FATE.tableName(), + managerLock.getLockID(), isLockHeld), TraceRepo::toLogString); - if (!fateRefs.compareAndSet(null, - Map.of(FateInstanceType.META, metaInstance, FateInstanceType.USER, userInstance))) { + if (!fateClients.compareAndSet(null, + Map.of(FateInstanceType.META, metaInstance, FateInstanceType.USER, userFateClient))) { + throw new IllegalStateException( + "Unexpected previous fateClient reference map already initialized"); + } + if (!fateRefs.compareAndSet(null, Map.of(FateInstanceType.META, metaInstance))) { throw new IllegalStateException( "Unexpected previous fate reference map already initialized"); } - managerMetrics.configureFateMetrics(getConfiguration(), this, fateRefs.get()); + managerMetrics.configureFateMetrics(getConfiguration(), this); fateReadyLatch.countDown(); } catch (KeeperException | InterruptedException e) { throw new IllegalStateException("Exception setting up FaTE cleanup thread", e); } - // TODO eventually stop this - var fateManager = new FateManager(getContext()); + fateManager = new FateManager(getContext()); fateManager.start(); - fate(FateInstanceType.USER).setSeedingConsumer(fateManager::notifySeeded); + fateClient(FateInstanceType.USER).setSeedingConsumer(fateManager::notifySeeded); producers.addAll(managerMetrics.getProducers(this)); metricsInfo.addMetricsProducers(producers.toArray(new MetricsProducer[0])); @@ -1242,7 +1259,8 @@ boolean canSuspendTablets() { } log.debug("Shutting down fate."); - getFateRefs().keySet().forEach(type -> fate(type).close()); + fate(FateInstanceType.META).close(); + fateManager.stop(); splitter.stop(); @@ -1659,12 +1677,6 @@ public void registerMetrics(MeterRegistry registry) { compactionCoordinator.registerMetrics(registry); } - private Map> getFateRefs() { - var fateRefs = this.fateRefs.get(); - Preconditions.checkState(fateRefs != null, "Unexpected null fate references map"); - return fateRefs; - } - @Override public ServiceLock getLock() { return managerLock; diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerClientServiceHandler.java b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerClientServiceHandler.java index a58ef5b1058..38bbf54c04e 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerClientServiceHandler.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerClientServiceHandler.java @@ -59,6 +59,7 @@ import org.apache.accumulo.core.dataImpl.KeyExtent; import org.apache.accumulo.core.dataImpl.thrift.TKeyExtent; import org.apache.accumulo.core.fate.Fate; +import org.apache.accumulo.core.fate.FateClient; import org.apache.accumulo.core.fate.FateId; import org.apache.accumulo.core.fate.FateInstanceType; import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter; @@ -334,7 +335,7 @@ public void shutdownTabletServer(TInfo info, TCredentials c, String tabletServer } } - Fate fate = manager.fate(FateInstanceType.META); + FateClient fate = manager.fateClient(FateInstanceType.META); FateId fateId = fate.startTransaction(); String msg = "Shutdown tserver " + tabletServer; @@ -362,7 +363,7 @@ public void tabletServerStopping(TInfo tinfo, TCredentials credentials, String t if (manager.shutdownTServer(tserver)) { // If there is an exception seeding the fate tx this should cause the RPC to fail which should // cause the tserver to halt. Because of that not making an attempt to handle failure here. - Fate fate = manager.fate(FateInstanceType.META); + FateClient fate = manager.fateClient(FateInstanceType.META); var tid = fate.startTransaction(); String msg = "Shutdown tserver " + tabletServer; diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/compaction/coordinator/CompactionCoordinator.java b/server/manager/src/main/java/org/apache/accumulo/manager/compaction/coordinator/CompactionCoordinator.java index e6c0c86f2e5..d6915b5155c 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/compaction/coordinator/CompactionCoordinator.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/compaction/coordinator/CompactionCoordinator.java @@ -56,8 +56,8 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -90,6 +90,7 @@ import org.apache.accumulo.core.dataImpl.KeyExtent; import org.apache.accumulo.core.dataImpl.thrift.TKeyExtent; import org.apache.accumulo.core.fate.Fate; +import org.apache.accumulo.core.fate.FateClient; import org.apache.accumulo.core.fate.FateId; import org.apache.accumulo.core.fate.FateInstanceType; import org.apache.accumulo.core.fate.FateKey; @@ -121,7 +122,6 @@ import org.apache.accumulo.core.tabletserver.thrift.TCompactionKind; import org.apache.accumulo.core.tabletserver.thrift.TCompactionStats; import org.apache.accumulo.core.tabletserver.thrift.TExternalCompactionJob; -import org.apache.accumulo.core.util.UtilWaitThread; import org.apache.accumulo.core.util.cache.Caches.CacheName; import org.apache.accumulo.core.util.compaction.CompactionPlannerInitParams; import org.apache.accumulo.core.util.compaction.CompactionServicesConfig; @@ -271,7 +271,7 @@ static FailureCounts incrementSuccess(Object key, FailureCounts counts) { private final ServerContext ctx; private final AuditedSecurityOperation security; private final CompactionJobQueues jobQueues; - private final AtomicReference>> fateInstances; + private final Function> fateClients; // Exposed for tests protected final CountDownLatch shutdown = new CountDownLatch(1); @@ -291,7 +291,7 @@ static FailureCounts incrementSuccess(Object key, FailureCounts counts) { private final Set activeCompactorReservationRequest = ConcurrentHashMap.newKeySet(); public CompactionCoordinator(Manager manager, - AtomicReference>> fateInstances) { + Function> fateClients) { this.ctx = manager.getContext(); this.security = ctx.getSecurityOperation(); this.manager = Objects.requireNonNull(manager); @@ -303,7 +303,7 @@ public CompactionCoordinator(Manager manager, this.queueMetrics = new QueueMetrics(jobQueues); - this.fateInstances = fateInstances; + this.fateClients = fateClients; completed = ctx.getCaches().createNewBuilder(CacheName.COMPACTIONS_COMPLETED, true) .maximumSize(200).expireAfterWrite(10, TimeUnit.MINUTES).build(); @@ -326,7 +326,7 @@ public CompactionCoordinator(Manager manager, .maximumWeight(10485760L).weigher(weigher).build(); deadCompactionDetector = - new DeadCompactionDetector(this.ctx, this, ctx.getScheduledExecutor(), fateInstances); + new DeadCompactionDetector(this.ctx, this, ctx.getScheduledExecutor(), fateClients); var rootReservationPool = ThreadPools.getServerThreadPools().createExecutorService( ctx.getConfiguration(), Property.COMPACTION_COORDINATOR_RESERVATION_THREADS_ROOT, true); @@ -789,17 +789,9 @@ public void compactionCompleted(TInfo tinfo, TCredentials credentials, } // maybe fate has not started yet - var localFates = fateInstances.get(); - while (localFates == null) { - UtilWaitThread.sleep(100); - if (shutdown.getCount() == 0) { - return; - } - localFates = fateInstances.get(); - } - var extent = KeyExtent.fromThrift(textent); - var localFate = localFates.get(FateInstanceType.fromTableId(extent.tableId())); + var fateType = FateInstanceType.fromTableId(extent.tableId()); + var localFate = fateClients.apply(fateType); LOG.info("Compaction completed, id: {}, stats: {}, extent: {}", externalCompactionId, stats, extent); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/compaction/coordinator/DeadCompactionDetector.java b/server/manager/src/main/java/org/apache/accumulo/manager/compaction/coordinator/DeadCompactionDetector.java index da852f1bb1a..ce04296a615 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/compaction/coordinator/DeadCompactionDetector.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/compaction/coordinator/DeadCompactionDetector.java @@ -18,6 +18,7 @@ */ package org.apache.accumulo.manager.compaction.coordinator; +import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; @@ -27,14 +28,14 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.data.TableId; import org.apache.accumulo.core.dataImpl.KeyExtent; -import org.apache.accumulo.core.fate.Fate; +import org.apache.accumulo.core.fate.FateClient; import org.apache.accumulo.core.fate.FateInstanceType; import org.apache.accumulo.core.fate.FateKey; import org.apache.accumulo.core.metadata.schema.Ample.DataLevel; @@ -62,16 +63,16 @@ public class DeadCompactionDetector { private final ScheduledThreadPoolExecutor schedExecutor; private final ConcurrentHashMap deadCompactions; private final Set tablesWithUnreferencedTmpFiles = new HashSet<>(); - private final AtomicReference>> fateInstances; + private final Function> fateClients; public DeadCompactionDetector(ServerContext context, CompactionCoordinator coordinator, ScheduledThreadPoolExecutor stpe, - AtomicReference>> fateInstances) { + Function> fateClients) { this.context = context; this.coordinator = coordinator; this.schedExecutor = stpe; this.deadCompactions = new ConcurrentHashMap<>(); - this.fateInstances = fateInstances; + this.fateClients = fateClients; } public void addTableId(TableId tableWithUnreferencedTmpFiles) { @@ -196,13 +197,8 @@ private void detectDeadCompactions() { if (!tabletCompactions.isEmpty()) { // look for any compactions committing in fate and remove those - var fateMap = fateInstances.get(); - if (fateMap == null) { - log.warn("Fate is not present, can not look for dead compactions"); - return; - } - try (Stream keyStream = fateMap.values().stream() - .flatMap(fate -> fate.list(FateKey.FateKeyType.COMPACTION_COMMIT))) { + try (Stream keyStream = Arrays.stream(FateInstanceType.values()).map(fateClients) + .flatMap(fateClient -> fateClient.list(FateKey.FateKeyType.COMPACTION_COMMIT))) { keyStream.map(fateKey -> fateKey.getCompactionId().orElseThrow()).forEach(ecid -> { if (tabletCompactions.remove(ecid) != null) { log.debug("Ignoring compaction {} that is committing in a fate", ecid); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index b1978d9de54..201d8942dff 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -44,7 +44,10 @@ import org.slf4j.LoggerFactory; import com.google.common.base.Preconditions; +import com.google.common.collect.Range; +import com.google.common.collect.RangeMap; import com.google.common.collect.Sets; +import com.google.common.collect.TreeRangeMap; import com.google.common.net.HostAndPort; /** @@ -64,8 +67,11 @@ public FateManager(ServerContext context) { private final AtomicBoolean stop = new AtomicBoolean(false); - private final AtomicReference>> stableAssignments = - new AtomicReference<>(Map.of()); + record FateHostPartition(HostAndPort hostPort, FatePartition partition) { + } + + private final AtomicReference> stableAssignments = + new AtomicReference<>(TreeRangeMap.create()); private final Map> pendingNotifications = new HashMap<>(); @@ -86,9 +92,16 @@ private void managerWorkers() throws TException, InterruptedException { computeDesiredAssignments(currentAssignments, desiredParititions); if (desired.equals(currentAssignments)) { - stableAssignments.set(Map.copyOf(currentAssignments)); + RangeMap rangeMap = TreeRangeMap.create(); + currentAssignments.forEach((hostAndPort, partitions) -> { + partitions.forEach(partition -> { + rangeMap.put(Range.closed(partition.start(), partition.end()), + new FateHostPartition(hostAndPort, partition)); + }); + }); + stableAssignments.set(rangeMap); } else { - stableAssignments.set(Map.of()); + stableAssignments.set(TreeRangeMap.create()); } // are there any workers with extra partitions? If so need to unload those first. @@ -157,31 +170,57 @@ public synchronized void start() { ntfyThread.start(); } - public synchronized void stop() throws InterruptedException { - stop.set(true); - if (thread != null) { - thread.join(); + public synchronized void stop() { + if (!stop.compareAndSet(false, true)) { + return; + } + + try { + if (thread != null) { + thread.join(); + } + if (ntfyThread != null) { + ntfyThread.join(); + } + } catch (InterruptedException e) { + throw new IllegalStateException(e); } - if (ntfyThread != null) { - ntfyThread.join(); + // Try to set every assistant manager to nothing. + Map currentAssignments = null; + try { + currentAssignments = getCurrentAssignments(); + } catch (TException e) { + log.warn("Failed to get current assignments", e); + currentAssignments = Map.of(); } + for (var entry : currentAssignments.entrySet()) { + var hostPort = entry.getKey(); + var currentPartitions = entry.getValue(); + if (!currentPartitions.partitions.isEmpty()) { + try { + setWorkerPartitions(hostPort, currentPartitions.updateId(), Set.of()); + } catch (TException e) { + log.warn("Failed to unassign fate partitions {}", hostPort, e); + } + } + } + + // TODO could wait for each assitant to finish any current operations + + stableAssignments.set(TreeRangeMap.create()); + } /** * Makes a best effort to notify this fate operation was seeded. */ public void notifySeeded(FateId fateId) { - // TODO avoid linear search - for (Map.Entry> entry : stableAssignments.get().entrySet()) { - for (var parition : entry.getValue()) { - if (parition.contains(fateId)) { - synchronized (pendingNotifications) { - pendingNotifications.computeIfAbsent(entry.getKey(), k -> new HashSet<>()) - .add(parition); - pendingNotifications.notify(); - } - return; - } + var hostPartition = stableAssignments.get().get(fateId); + if (hostPartition != null) { + synchronized (pendingNotifications) { + pendingNotifications.computeIfAbsent(hostPartition.hostPort(), k -> new HashSet<>()) + .add(hostPartition.partition()); + pendingNotifications.notify(); } } } @@ -238,7 +277,6 @@ public void run() { */ private boolean setWorkerPartitions(HostAndPort address, long updateId, Set desired) throws TException { - // TODO make a compare and set type RPC that uses the current and desired FateWorkerService.Client client = ThriftUtil.getClient(ThriftClientTypes.FATE_WORKER, address, context); try { diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/merge/FindMergeableRangeTask.java b/server/manager/src/main/java/org/apache/accumulo/manager/merge/FindMergeableRangeTask.java index e4b39229ef1..e5e8c850725 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/merge/FindMergeableRangeTask.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/merge/FindMergeableRangeTask.java @@ -158,7 +158,7 @@ void submit(MergeableRange range, FateInstanceType type, Entry t tableId, startRowStr, endRowStr); var fateKey = FateKey.forMerge(new KeyExtent(tableId, range.endRow, range.startRow)); - manager.fate(type).seedTransaction(FateOperation.SYSTEM_MERGE, fateKey, + manager.fateClient(type).seedTransaction(FateOperation.SYSTEM_MERGE, fateKey, new TraceRepo<>( new TableRangeOp(Operation.SYSTEM_MERGE, namespaceId, tableId, startRow, endRow)), true); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/metrics/ManagerMetrics.java b/server/manager/src/main/java/org/apache/accumulo/manager/metrics/ManagerMetrics.java index 77689cea028..9e6f9c4f2dd 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/metrics/ManagerMetrics.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/metrics/ManagerMetrics.java @@ -27,14 +27,11 @@ import java.util.ArrayList; import java.util.List; -import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.conf.Property; -import org.apache.accumulo.core.fate.Fate; -import org.apache.accumulo.core.fate.FateInstanceType; import org.apache.accumulo.core.manager.thrift.ManagerGoalState; import org.apache.accumulo.core.metadata.schema.Ample.DataLevel; import org.apache.accumulo.core.metrics.MetricsProducer; @@ -42,7 +39,6 @@ import org.apache.accumulo.manager.metrics.fate.FateMetrics; import org.apache.accumulo.manager.metrics.fate.meta.MetaFateMetrics; import org.apache.accumulo.manager.metrics.fate.user.UserFateMetrics; -import org.apache.accumulo.manager.tableOps.FateEnv; import io.micrometer.core.instrument.Gauge; import io.micrometer.core.instrument.MeterRegistry; @@ -66,17 +62,14 @@ public void updateManagerGoalState(ManagerGoalState goal) { goalState.set(newValue); } - public void configureFateMetrics(final AccumuloConfiguration conf, final Manager manager, - Map> fateRefs) { + public void configureFateMetrics(final AccumuloConfiguration conf, final Manager manager) { requireNonNull(conf, "AccumuloConfiguration must not be null"); requireNonNull(conf, "Manager must not be null"); fateMetrics = List.of( new MetaFateMetrics(manager.getContext(), - conf.getTimeInMillis(Property.MANAGER_FATE_METRICS_MIN_UPDATE_INTERVAL), - fateRefs.get(FateInstanceType.META).getFateExecutors()), + conf.getTimeInMillis(Property.MANAGER_FATE_METRICS_MIN_UPDATE_INTERVAL)), new UserFateMetrics(manager.getContext(), - conf.getTimeInMillis(Property.MANAGER_FATE_METRICS_MIN_UPDATE_INTERVAL), - fateRefs.get(FateInstanceType.USER).getFateExecutors())); + conf.getTimeInMillis(Property.MANAGER_FATE_METRICS_MIN_UPDATE_INTERVAL))); } public void incrementTabletGroupWatcherError(DataLevel level) { diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateExecutorMetrics.java b/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateExecutorMetrics.java new file mode 100644 index 00000000000..ba1b16b848b --- /dev/null +++ b/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateExecutorMetrics.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.accumulo.manager.metrics.fate; + +import static org.apache.accumulo.manager.metrics.fate.FateMetrics.DEFAULT_MIN_REFRESH_DELAY; + +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import org.apache.accumulo.core.fate.FateExecutor; +import org.apache.accumulo.core.metrics.MetricsProducer; +import org.apache.accumulo.core.util.threads.ThreadPools; +import org.apache.accumulo.manager.tableOps.FateEnv; +import org.apache.accumulo.server.ServerContext; + +import io.micrometer.core.instrument.MeterRegistry; + +// TODO use this +public class FateExecutorMetrics implements MetricsProducer { + private final Set> fateExecutors; + private final ServerContext context; + private final long refreshDelay; + private MeterRegistry registry; + + public FateExecutorMetrics(ServerContext context, Set> fateExecutors, + long minimumRefreshDelay) { + this.context = context; + this.fateExecutors = fateExecutors; + this.refreshDelay = Math.max(DEFAULT_MIN_REFRESH_DELAY, minimumRefreshDelay); + } + + protected void update() { + // there may have been new fate executors added, so these need to be registered. + // fate executors removed will have their metrics removed from the registry before they are + // removed from the set. + if (registry != null) { + synchronized (fateExecutors) { + fateExecutors.forEach(fe -> { + var feMetrics = fe.getFateExecutorMetrics(); + if (!feMetrics.isRegistered()) { + feMetrics.registerMetrics(registry); + } + }); + } + } + } + + @Override + public void registerMetrics(final MeterRegistry registry) { + this.registry = registry; + synchronized (fateExecutors) { + fateExecutors.forEach(fe -> fe.getFateExecutorMetrics().registerMetrics(registry)); + } + + var future = context.getScheduledExecutor().scheduleAtFixedRate(this::update, refreshDelay, + refreshDelay, TimeUnit.MILLISECONDS); + ThreadPools.watchCriticalScheduledTask(future); + } +} diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateMetrics.java b/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateMetrics.java index bf69885b9a1..16af60b4723 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateMetrics.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateMetrics.java @@ -25,18 +25,15 @@ import java.util.EnumMap; import java.util.Map.Entry; import java.util.Objects; -import java.util.Set; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; -import org.apache.accumulo.core.fate.FateExecutor; import org.apache.accumulo.core.fate.ReadOnlyFateStore; import org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus; import org.apache.accumulo.core.metrics.MetricsProducer; import org.apache.accumulo.core.util.threads.ThreadPools; -import org.apache.accumulo.manager.tableOps.FateEnv; import org.apache.accumulo.server.ServerContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -51,25 +48,21 @@ public abstract class FateMetrics implements Metrics private static final Logger log = LoggerFactory.getLogger(FateMetrics.class); // limit calls to update fate counters to guard against hammering zookeeper. - private static final long DEFAULT_MIN_REFRESH_DELAY = TimeUnit.SECONDS.toMillis(5); + static final long DEFAULT_MIN_REFRESH_DELAY = TimeUnit.SECONDS.toMillis(5); private static final String OP_TYPE_TAG = "op.type"; protected final ServerContext context; protected final ReadOnlyFateStore> readOnlyFateStore; protected final long refreshDelay; - private final Set> fateExecutors; - private MeterRegistry registry; protected final AtomicLong totalCurrentOpsCount = new AtomicLong(0); private final EnumMap txStatusCounters = new EnumMap<>(TStatus.class); - public FateMetrics(final ServerContext context, final long minimumRefreshDelay, - Set> fateExecutors) { + public FateMetrics(final ServerContext context, final long minimumRefreshDelay) { this.context = context; this.refreshDelay = Math.max(DEFAULT_MIN_REFRESH_DELAY, minimumRefreshDelay); this.readOnlyFateStore = Objects.requireNonNull(buildReadOnlyStore(context)); - this.fateExecutors = fateExecutors; for (TStatus status : TStatus.values()) { txStatusCounters.put(status, new AtomicLong(0)); @@ -98,25 +91,10 @@ protected void update(T metricValues) { metricValues.getOpTypeCounters().forEach((name, count) -> Metrics .gauge(FATE_TYPE_IN_PROGRESS.getName(), Tags.of(OP_TYPE_TAG, name), count)); - - // there may have been new fate executors added, so these need to be registered. - // fate executors removed will have their metrics removed from the registry before they are - // removed from the set. - if (registry != null) { - synchronized (fateExecutors) { - fateExecutors.forEach(fe -> { - var feMetrics = fe.getFateExecutorMetrics(); - if (!feMetrics.isRegistered()) { - feMetrics.registerMetrics(registry); - } - }); - } - } } @Override public void registerMetrics(final MeterRegistry registry) { - this.registry = registry; String type = readOnlyFateStore.type().name().toLowerCase(); Gauge.builder(FATE_OPS.getName(), totalCurrentOpsCount, AtomicLong::get) @@ -126,10 +104,6 @@ public void registerMetrics(final MeterRegistry registry) { .builder(FATE_TX.getName(), counter, AtomicLong::get).description(FATE_TX.getDescription()) .tags("state", status.name().toLowerCase(), "instanceType", type).register(registry)); - synchronized (fateExecutors) { - fateExecutors.forEach(fe -> fe.getFateExecutorMetrics().registerMetrics(registry)); - } - // get fate status is read only operation - no reason to be nice on shutdown. ScheduledExecutorService scheduler = ThreadPools.getServerThreadPools() .createScheduledExecutorService(1, type + "FateMetricsPoller"); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/meta/MetaFateMetrics.java b/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/meta/MetaFateMetrics.java index 7e19d70e847..9d87f9a9cc2 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/meta/MetaFateMetrics.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/meta/MetaFateMetrics.java @@ -21,15 +21,12 @@ import static org.apache.accumulo.core.metrics.Metric.FATE_ERRORS; import static org.apache.accumulo.core.metrics.Metric.FATE_OPS_ACTIVITY; -import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import org.apache.accumulo.core.Constants; -import org.apache.accumulo.core.fate.FateExecutor; import org.apache.accumulo.core.fate.ReadOnlyFateStore; import org.apache.accumulo.core.fate.zookeeper.MetaFateStore; import org.apache.accumulo.manager.metrics.fate.FateMetrics; -import org.apache.accumulo.manager.tableOps.FateEnv; import org.apache.accumulo.server.ServerContext; import org.apache.zookeeper.KeeperException; @@ -41,9 +38,8 @@ public class MetaFateMetrics extends FateMetrics { private final AtomicLong totalOpsGauge = new AtomicLong(0); private final AtomicLong fateErrorsGauge = new AtomicLong(0); - public MetaFateMetrics(ServerContext context, long minimumRefreshDelay, - Set> fateExecutors) { - super(context, minimumRefreshDelay, fateExecutors); + public MetaFateMetrics(ServerContext context, long minimumRefreshDelay) { + super(context, minimumRefreshDelay); } @Override diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/user/UserFateMetrics.java b/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/user/UserFateMetrics.java index 7fab73944d8..4f1df05762a 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/user/UserFateMetrics.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/user/UserFateMetrics.java @@ -18,21 +18,16 @@ */ package org.apache.accumulo.manager.metrics.fate.user; -import java.util.Set; - -import org.apache.accumulo.core.fate.FateExecutor; import org.apache.accumulo.core.fate.ReadOnlyFateStore; import org.apache.accumulo.core.fate.user.UserFateStore; import org.apache.accumulo.core.metadata.SystemTables; import org.apache.accumulo.manager.metrics.fate.FateMetrics; -import org.apache.accumulo.manager.tableOps.FateEnv; import org.apache.accumulo.server.ServerContext; public class UserFateMetrics extends FateMetrics { - public UserFateMetrics(ServerContext context, long minimumRefreshDelay, - Set> fateExecutors) { - super(context, minimumRefreshDelay, fateExecutors); + public UserFateMetrics(ServerContext context, long minimumRefreshDelay) { + super(context, minimumRefreshDelay); } @Override diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/split/Splitter.java b/server/manager/src/main/java/org/apache/accumulo/manager/split/Splitter.java index 1f21fde170e..93d4c1cf03e 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/split/Splitter.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/split/Splitter.java @@ -89,7 +89,7 @@ public void run() { private void seedSplits(FateInstanceType instanceType, Map splits) { if (!splits.isEmpty()) { - try (var seeder = manager.fate(instanceType).beginSeeding()) { + try (var seeder = manager.fateClient(instanceType).beginSeeding()) { for (KeyExtent extent : splits.values()) { @SuppressWarnings("unused") var unused = seeder.attemptToSeedTransaction(Fate.FateOperation.SYSTEM_SPLIT, diff --git a/server/manager/src/test/java/org/apache/accumulo/manager/compaction/CompactionCoordinatorTest.java b/server/manager/src/test/java/org/apache/accumulo/manager/compaction/CompactionCoordinatorTest.java index b121faf2b39..487d32c74bf 100644 --- a/server/manager/src/test/java/org/apache/accumulo/manager/compaction/CompactionCoordinatorTest.java +++ b/server/manager/src/test/java/org/apache/accumulo/manager/compaction/CompactionCoordinatorTest.java @@ -39,7 +39,6 @@ import java.util.UUID; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; import org.apache.accumulo.core.Constants; import org.apache.accumulo.core.client.admin.CompactionConfig; @@ -55,7 +54,6 @@ import org.apache.accumulo.core.data.TableId; import org.apache.accumulo.core.dataImpl.KeyExtent; import org.apache.accumulo.core.dataImpl.thrift.TKeyExtent; -import org.apache.accumulo.core.fate.Fate; import org.apache.accumulo.core.fate.FateId; import org.apache.accumulo.core.fate.FateInstanceType; import org.apache.accumulo.core.iteratorsImpl.system.SystemIteratorUtil; @@ -79,7 +77,6 @@ import org.apache.accumulo.manager.compaction.coordinator.CompactionCoordinator; import org.apache.accumulo.manager.compaction.queue.CompactionJobPriorityQueue; import org.apache.accumulo.manager.compaction.queue.ResolvedCompactionJob; -import org.apache.accumulo.manager.tableOps.FateEnv; import org.apache.accumulo.server.ServerContext; import org.apache.accumulo.server.conf.TableConfiguration; import org.apache.accumulo.server.security.AuditedSecurityOperation; @@ -94,10 +91,6 @@ public class CompactionCoordinatorTest { - // Need a non-null fateInstances reference for CompactionCoordinator.compactionCompleted - private static final AtomicReference>> fateInstances = - new AtomicReference<>(Map.of()); - private static final ResourceGroupId GROUP_ID = ResourceGroupId.of("R2DQ"); private final HostAndPort tserverAddr = HostAndPort.fromParts("192.168.1.1", 9090); @@ -118,7 +111,7 @@ public class TestCoordinator extends CompactionCoordinator { private Set metadataCompactionIds = null; public TestCoordinator(Manager manager, List runningCompactions) { - super(manager, fateInstances); + super(manager, fit -> null); this.runningCompactions = runningCompactions; } From c541ec2e5e049211a9f27b0119c4167b85a6c505 Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Wed, 18 Feb 2026 00:38:02 +0000 Subject: [PATCH 22/38] WIP --- .../src/main/java/org/apache/accumulo/manager/Manager.java | 6 ++++++ .../java/org/apache/accumulo/manager/fate/FateWorker.java | 3 ++- ...ExecutorMetrics.java => FateExecutorMetricsWatcher.java} | 5 ++--- 3 files changed, 10 insertions(+), 4 deletions(-) rename server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/{FateExecutorMetrics.java => FateExecutorMetricsWatcher.java} (93%) diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java index 6b6396a4408..66ce34accef 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java @@ -113,6 +113,7 @@ import org.apache.accumulo.manager.fate.FateManager; import org.apache.accumulo.manager.merge.FindMergeableRangeTask; import org.apache.accumulo.manager.metrics.ManagerMetrics; +import org.apache.accumulo.manager.metrics.fate.FateExecutorMetricsWatcher; import org.apache.accumulo.manager.recovery.RecoveryManager; import org.apache.accumulo.manager.split.SplitFileCache; import org.apache.accumulo.manager.split.Splitter; @@ -1174,6 +1175,11 @@ boolean canSuspendTablets() { fateManager.start(); fateClient(FateInstanceType.USER).setSeedingConsumer(fateManager::notifySeeded); + var metaFateExecutorMetrics = + new FateExecutorMetricsWatcher(context, fate(FateInstanceType.META).getFateExecutors(), + getConfiguration().getTimeInMillis(Property.MANAGER_FATE_METRICS_MIN_UPDATE_INTERVAL)); + producers.add(metaFateExecutorMetrics); + producers.addAll(managerMetrics.getProducers(this)); metricsInfo.addMetricsProducers(producers.toArray(new MetricsProducer[0])); metricsInfo.init(MetricsInfo.serviceTags(getContext().getInstanceName(), getApplicationName(), diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index 9bc578b1898..cafeb8ee521 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -56,7 +56,8 @@ public FateWorker(ServerContext ctx) { this.context = ctx; this.security = ctx.getSecurityOperation(); this.fate = null; - // TODO fate metrics + // TODO fate metrics... in the manager process it does not setup metrics until after it gets the + // lock... also may want these metrics tagged differently for the server } public void setLock(ServiceLock lock) { diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateExecutorMetrics.java b/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateExecutorMetricsWatcher.java similarity index 93% rename from server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateExecutorMetrics.java rename to server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateExecutorMetricsWatcher.java index ba1b16b848b..3e5c289c445 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateExecutorMetrics.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateExecutorMetricsWatcher.java @@ -31,14 +31,13 @@ import io.micrometer.core.instrument.MeterRegistry; -// TODO use this -public class FateExecutorMetrics implements MetricsProducer { +public class FateExecutorMetricsWatcher implements MetricsProducer { private final Set> fateExecutors; private final ServerContext context; private final long refreshDelay; private MeterRegistry registry; - public FateExecutorMetrics(ServerContext context, Set> fateExecutors, + public FateExecutorMetricsWatcher(ServerContext context, Set> fateExecutors, long minimumRefreshDelay) { this.context = context; this.fateExecutors = fateExecutors; From f06b98d9a42fcc097551bc81e8cb0369b96fc21b Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Wed, 18 Feb 2026 16:39:37 +0000 Subject: [PATCH 23/38] WIP --- .../java/org/apache/accumulo/core/fate/Fate.java | 5 ++++- .../accumulo/server/init/ZooKeeperInitializer.java | 1 - .../server/util/adminCommand/ServiceStatus.java | 3 +++ .../java/org/apache/accumulo/manager/Manager.java | 6 ++++-- .../apache/accumulo/manager/fate/FateManager.java | 1 - .../org/apache/accumulo/manager/fate/FateWorker.java | 3 --- .../apache/accumulo/manager/fate/FateWorkerEnv.java | 12 +++++++++--- .../accumulo/manager/upgrade/Upgrader11to12.java | 11 +++++++++++ 8 files changed, 31 insertions(+), 11 deletions(-) diff --git a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java index 166c2abfa0d..699b98dd368 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java @@ -63,7 +63,10 @@ */ @SuppressFBWarnings(value = "CT_CONSTRUCTOR_THROW", justification = "Constructor validation is required for proper initialization") -public class Fate extends FateClient { // TODO remove extension of FateClient +public class Fate extends FateClient { // FOLLOW_ON remove extension of FateClient. This + // extenstion was added to keep existing test code + // working. Would be cleaner to not extend and refactor + // all code. static final Logger log = LoggerFactory.getLogger(Fate.class); diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java b/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java index 3e29f2135d9..464a6dae5a6 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java +++ b/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java @@ -176,7 +176,6 @@ void initialize(final ServerContext context, final String rootTabletDirName, ZooUtil.NodeExistsPolicy.FAIL); zrwChroot.putPersistentData(Constants.ZCOMPACTIONS, EMPTY_BYTE_ARRAY, ZooUtil.NodeExistsPolicy.FAIL); - // TODO would need to create in upgrade zrwChroot.putPersistentData(Constants.ZMANAGER_ASSISTANT_LOCK, EMPTY_BYTE_ARRAY, ZooUtil.NodeExistsPolicy.FAIL); } diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/adminCommand/ServiceStatus.java b/server/base/src/main/java/org/apache/accumulo/server/util/adminCommand/ServiceStatus.java index 3301b7906ea..2a791313c8d 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/util/adminCommand/ServiceStatus.java +++ b/server/base/src/main/java/org/apache/accumulo/server/util/adminCommand/ServiceStatus.java @@ -96,6 +96,9 @@ public void execute(JCommander cl, ServiceStatusCmdOpts options) throws Exceptio final Map services = new TreeMap<>(); + // FOLLOW_ON display information about multiple managers. Could display which is primary. Also, + // could potentially display the additional port that is being listened on by the assistant + // manager. services.put(ServiceStatusReport.ReportKey.MANAGER, getManagerStatus(context)); services.put(ServiceStatusReport.ReportKey.MONITOR, getMonitorStatus(context)); services.put(ServiceStatusReport.ReportKey.T_SERVER, getTServerStatus(context)); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java index 66ce34accef..7377dd71667 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java @@ -213,6 +213,7 @@ public class Manager extends AbstractServer implements LiveTServerSet.Listener, private final AtomicReference>> fateRefs = new AtomicReference<>(); private volatile FateManager fateManager; + private volatile ManagerAssistant assitantManager; private final ManagerMetrics managerMetrics = new ManagerMetrics(); @@ -958,10 +959,10 @@ public void run() { throw new IllegalStateException("Unable to start server on host " + getBindAddress(), e); } - // TODO eventually stop this // Start manager assistant before getting lock, this allows non primary manager processes to // work on stuff. - new ManagerAssistant(getContext(), getBindAddress()).start(); + assitantManager = new ManagerAssistant(getContext(), getBindAddress()); + assitantManager.start(); // block until we can obtain the ZK lock for the manager. Create the // initial lock using ThriftService.NONE. This will allow the lock @@ -1267,6 +1268,7 @@ boolean canSuspendTablets() { log.debug("Shutting down fate."); fate(FateInstanceType.META).close(); fateManager.stop(); + assitantManager.stop(); splitter.stop(); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index 201d8942dff..e34f805b190 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -81,7 +81,6 @@ private void managerWorkers() throws TException, InterruptedException { // TODO make configurable Thread.sleep(3_000); - // TODO could support RG... could user ServerId // This map will contain all current workers even their partitions are empty Map currentPartitions = getCurrentAssignments(); Map> currentAssignments = new HashMap<>(); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index cafeb8ee521..6566f3c2e8b 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -67,9 +67,6 @@ public void setLock(ServiceLock lock) { new UserFateStore<>(context, SystemTables.FATE.tableName(), lock.getLockID(), isLockHeld); this.fate = new Fate<>(env, store, false, TraceRepo::toLogString, context.getConfiguration(), context.getScheduledExecutor()); - // TODO where will the 2 fate cleanup task run? Make dead reservation cleaner use partitions... - // cleanup can run in manager - } private Long expectedUpdateId = null; diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java index 72331c5c7c4..53c4baf8eea 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java @@ -160,7 +160,10 @@ public EventPublisher getEventPublisher() { @Override public void recordCompactionCompletion(ExternalCompactionId ecid) { - // TODO do something w/ this + // FOLLOW_ON This data is stored in memory on the manager. This entire feature needs to be + // examined and potentially reworked. One solution would be to send an RPC to the manager to + // update it's in memory state. A better solution would be to move away from in memory state + // that is lost when the manager restarts. } @Override @@ -180,12 +183,15 @@ public VolumeManager getVolumeManager() { @Override public void updateBulkImportStatus(String string, BulkImportState bulkImportState) { - // TODO + // FOLLOW_ON This data is stored in memory on the manager. This entire feature needs to be + // examined and potentially reworked. One solution would be to send an RPC to the manager to + // update it's in memory state. A better solution would be to move away from in memory state + // that is lost when the manager restarts. } @Override public void removeBulkImportStatus(String sourceDir) { - // TODO + // FOLLOW_ON } @Override diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader11to12.java b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader11to12.java index 2464c9476bf..a230dc7149a 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader11to12.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/upgrade/Upgrader11to12.java @@ -253,6 +253,8 @@ public void upgradeZookeeper(ServerContext context) { addDefaultResourceGroupConfigNode(context); LOG.info("Moving table properties from system to namespaces"); moveTableProperties(context); + LOG.info("Add assistant manager node"); + addAssistantManager(context); } @Override @@ -297,6 +299,15 @@ public void upgradeMetadata(ServerContext context) { removeBulkFileColumnsFromTable(context, SystemTables.METADATA.tableName()); } + private static void addAssistantManager(ServerContext context) { + try { + context.getZooSession().asReaderWriter().putPersistentData(Constants.ZMANAGER_ASSISTANT_LOCK, + new byte[0], ZooUtil.NodeExistsPolicy.SKIP); + } catch (KeeperException | InterruptedException e) { + throw new IllegalStateException(e); + } + } + private static void addCompactionsNode(ServerContext context) { try { context.getZooSession().asReaderWriter().putPersistentData(Constants.ZCOMPACTIONS, From 5f0a04a7a2be859bb570f9391a858805f0c79ac2 Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Wed, 18 Feb 2026 17:01:36 +0000 Subject: [PATCH 24/38] WIP --- .../accumulo/manager/ManagerAssistant.java | 1 + .../accumulo/manager/fate/FateWorker.java | 14 +++++-- .../accumulo/manager/fate/FateWorkerEnv.java | 41 ++++++++++++++----- 3 files changed, 42 insertions(+), 14 deletions(-) diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java index 6ea89e1866a..7929741d8bd 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java @@ -150,6 +150,7 @@ public void start() { public void stop() { thriftServer.server.stop(); + fateWorker.stop(); } public ServiceLock getLock() { diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index 6566f3c2e8b..e317d5144a7 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -19,6 +19,7 @@ package org.apache.accumulo.manager.fate; import java.util.List; +import java.util.concurrent.TimeUnit; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -51,6 +52,7 @@ public class FateWorker implements FateWorkerService.Iface { private final ServerContext context; private final AuditedSecurityOperation security; private volatile Fate fate; + private volatile FateWorkerEnv fateWorkerEnv; public FateWorker(ServerContext ctx) { this.context = ctx; @@ -61,12 +63,12 @@ public FateWorker(ServerContext ctx) { } public void setLock(ServiceLock lock) { - FateEnv env = new FateWorkerEnv(context, lock); + fateWorkerEnv = new FateWorkerEnv(context, lock); Predicate isLockHeld = l -> ServiceLock.isLockHeld(context.getZooCache(), l); UserFateStore store = new UserFateStore<>(context, SystemTables.FATE.tableName(), lock.getLockID(), isLockHeld); - this.fate = new Fate<>(env, store, false, TraceRepo::toLogString, context.getConfiguration(), - context.getScheduledExecutor()); + this.fate = new Fate<>(fateWorkerEnv, store, false, TraceRepo::toLogString, + context.getConfiguration(), context.getScheduledExecutor()); } private Long expectedUpdateId = null; @@ -145,4 +147,10 @@ public void seeded(TInfo tinfo, TCredentials credentials, List t localFate.seeded(tpartitions.stream().map(FatePartition::from).collect(Collectors.toSet())); } } + + public void stop() { + fate.shutdown(1, TimeUnit.MINUTES); + fate.close(); + fateWorkerEnv.stop(); + } } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java index 53c4baf8eea..124a1326aa0 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java @@ -18,14 +18,17 @@ */ package org.apache.accumulo.manager.fate; +import static org.apache.accumulo.core.util.threads.ThreadPoolNames.IMPORT_TABLE_RENAME_POOL; + import java.util.Collection; import java.util.Set; import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.accumulo.core.client.AccumuloException; import org.apache.accumulo.core.client.AccumuloSecurityException; +import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.data.TableId; import org.apache.accumulo.core.dataImpl.KeyExtent; import org.apache.accumulo.core.lock.ServiceLock; @@ -36,6 +39,7 @@ import org.apache.accumulo.core.rpc.ThriftUtil; import org.apache.accumulo.core.rpc.clients.ThriftClientTypes; import org.apache.accumulo.core.trace.TraceUtil; +import org.apache.accumulo.core.util.threads.ThreadPools; import org.apache.accumulo.core.util.threads.Threads; import org.apache.accumulo.core.util.time.SteadyTime; import org.apache.accumulo.manager.EventCoordinator; @@ -64,12 +68,22 @@ public class FateWorkerEnv implements FateEnv { private final EventHandler eventHandler; private final EventQueue queue = new EventQueue(); + private final AtomicBoolean stopped = new AtomicBoolean(false); + private final Thread eventSendThread; + + public void stop() { + stopped.set(true); + try { + eventSendThread.join(); + } catch (InterruptedException e) { + throw new IllegalStateException(e); + } + } private class EventSender implements Runnable { @Override public void run() { - // TODO check for stop condition - while (true) { + while (!stopped.get()) { try { var events = queue.poll(100, TimeUnit.MILLISECONDS); if (events.isEmpty()) { @@ -84,8 +98,7 @@ public void run() { client.processEvents(TraceUtil.traceInfo(), ctx.rpcCreds(), tEvents); } } catch (TException e) { - // TODO - e.printStackTrace(); + log.warn("Failed to send events to manager", e); } finally { if (client != null) { ThriftUtil.close(client, ctx); @@ -93,8 +106,7 @@ public void run() { } } catch (InterruptedException e) { - // TODO - e.printStackTrace(); + throw new IllegalStateException(e); } } } @@ -137,15 +149,22 @@ public void event(Collection extents, String msg, Object... args) { FateWorkerEnv(ServerContext ctx, ServiceLock lock) { this.ctx = ctx; - // TODO create the proper way - this.refreshPool = Executors.newFixedThreadPool(2); - this.renamePool = Executors.newFixedThreadPool(2); + this.refreshPool = ThreadPools.getServerThreadPools().getPoolBuilder("Tablet refresh ") + .numCoreThreads(ctx.getConfiguration().getCount(Property.MANAGER_TABLET_REFRESH_MINTHREADS)) + .numMaxThreads(ctx.getConfiguration().getCount(Property.MANAGER_TABLET_REFRESH_MAXTHREADS)) + .build(); + int poolSize = ctx.getConfiguration().getCount(Property.MANAGER_RENAME_THREADS); + // FOLLOW_ON this import table name is not correct for the thread pool name, fix in stand alone + // PR + this.renamePool = ThreadPools.getServerThreadPools() + .getPoolBuilder(IMPORT_TABLE_RENAME_POOL.poolName).numCoreThreads(poolSize).build(); this.serviceLock = lock; this.tservers = new LiveTServerSet(ctx); this.splitCache = new SplitFileCache(ctx); this.eventHandler = new EventHandler(); - Threads.createCriticalThread("Fate Worker Event Sender", new EventSender()).start(); + eventSendThread = Threads.createCriticalThread("Fate Worker Event Sender", new EventSender()); + eventSendThread.start(); } @Override From deab1357f17f6732733a4686bc2921c8a2ede58f Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Wed, 18 Feb 2026 19:58:22 +0000 Subject: [PATCH 25/38] WIP --- .../core/fate/FateExecutorMetrics.java | 4 +- .../org/apache/accumulo/manager/Manager.java | 121 +++++++++--------- .../accumulo/manager/ManagerAssistant.java | 21 ++- .../accumulo/manager/fate/FateWorker.java | 55 +++++--- .../accumulo/manager/fate/FateWorkerEnv.java | 8 +- ....java => FateExecutorMetricsProducer.java} | 6 +- .../accumulo/test/fate/FlakyFateManager.java | 5 +- .../test/fate/SlowFateSplitManager.java | 5 +- 8 files changed, 131 insertions(+), 94 deletions(-) rename server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/{FateExecutorMetricsWatcher.java => FateExecutorMetricsProducer.java} (92%) diff --git a/core/src/main/java/org/apache/accumulo/core/fate/FateExecutorMetrics.java b/core/src/main/java/org/apache/accumulo/core/fate/FateExecutorMetrics.java index 4edc70fe7a8..f9753a4ab6e 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/FateExecutorMetrics.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/FateExecutorMetrics.java @@ -22,14 +22,13 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.accumulo.core.metrics.Metric; -import org.apache.accumulo.core.metrics.MetricsProducer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.micrometer.core.instrument.Gauge; import io.micrometer.core.instrument.MeterRegistry; -public class FateExecutorMetrics implements MetricsProducer { +public class FateExecutorMetrics { private static final Logger log = LoggerFactory.getLogger(FateExecutorMetrics.class); private final FateInstanceType type; private final String poolName; @@ -49,7 +48,6 @@ protected FateExecutorMetrics(FateInstanceType type, String poolName, this.idleWorkerCount = idleWorkerCount; } - @Override public void registerMetrics(MeterRegistry registry) { // noop if already registered or cleared if (state == State.UNREGISTERED) { diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java index 7377dd71667..87c46814592 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java @@ -113,7 +113,7 @@ import org.apache.accumulo.manager.fate.FateManager; import org.apache.accumulo.manager.merge.FindMergeableRangeTask; import org.apache.accumulo.manager.metrics.ManagerMetrics; -import org.apache.accumulo.manager.metrics.fate.FateExecutorMetricsWatcher; +import org.apache.accumulo.manager.metrics.fate.FateExecutorMetricsProducer; import org.apache.accumulo.manager.recovery.RecoveryManager; import org.apache.accumulo.manager.split.SplitFileCache; import org.apache.accumulo.manager.split.Splitter; @@ -148,6 +148,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Comparators; import com.google.common.collect.ImmutableSortedMap; @@ -959,10 +960,20 @@ public void run() { throw new IllegalStateException("Unable to start server on host " + getBindAddress(), e); } + tserverSet.startListeningForTabletServerChanges(this); + + MetricsInfo metricsInfo = getContext().getMetricsInfo(); + // Start manager assistant before getting lock, this allows non primary manager processes to // work on stuff. - assitantManager = new ManagerAssistant(getContext(), getBindAddress()); + assitantManager = + new ManagerAssistant(getContext(), getBindAddress(), tserverSet, this::createFateInstance); assitantManager.start(); + metricsInfo + .addMetricsProducers(assitantManager.getMetricsProducers().toArray(new MetricsProducer[0])); + + metricsInfo.init(MetricsInfo.serviceTags(getContext().getInstanceName(), getApplicationName(), + getAdvertiseAddress(), getResourceGroup())); // block until we can obtain the ZK lock for the manager. Create the // initial lock using ThriftService.NONE. This will allow the lock @@ -999,7 +1010,6 @@ public void run() { Thread statusThread = Threads.createCriticalThread("Status Thread", new StatusThread()); statusThread.start(); - tserverSet.startListeningForTabletServerChanges(this); try { blockForTservers(); } catch (InterruptedException ex) { @@ -1024,9 +1034,7 @@ public void process(WatchedEvent event) { throw new IllegalStateException("Unable to read " + Constants.ZRECOVERY, e); } - MetricsInfo metricsInfo = getContext().getMetricsInfo(); - List producers = new ArrayList<>(); - producers.add(balanceManager.getMetrics()); + metricsInfo.addMetricsProducers(balanceManager.getMetrics()); final TabletGroupWatcher userTableTGW = new TabletGroupWatcher(this, this.userTabletStore, null, managerMetrics) { @@ -1148,43 +1156,14 @@ boolean canSuspendTablets() { this.splitter.start(); this.splitFileCache = new SplitFileCache(context); - try { - Predicate isLockHeld = - lock -> ServiceLock.isLockHeld(context.getZooCache(), lock); - var metaInstance = initializeFateInstance(context, - new MetaFateStore<>(context.getZooSession(), managerLock.getLockID(), isLockHeld)); - var userFateClient = - new FateClient(new UserFateStore<>(context, SystemTables.FATE.tableName(), - managerLock.getLockID(), isLockHeld), TraceRepo::toLogString); - - if (!fateClients.compareAndSet(null, - Map.of(FateInstanceType.META, metaInstance, FateInstanceType.USER, userFateClient))) { - throw new IllegalStateException( - "Unexpected previous fateClient reference map already initialized"); - } - if (!fateRefs.compareAndSet(null, Map.of(FateInstanceType.META, metaInstance))) { - throw new IllegalStateException( - "Unexpected previous fate reference map already initialized"); - } - managerMetrics.configureFateMetrics(getConfiguration(), this); - fateReadyLatch.countDown(); - } catch (KeeperException | InterruptedException e) { - throw new IllegalStateException("Exception setting up FaTE cleanup thread", e); - } + setupFate(context, metricsInfo); fateManager = new FateManager(getContext()); fateManager.start(); fateClient(FateInstanceType.USER).setSeedingConsumer(fateManager::notifySeeded); - var metaFateExecutorMetrics = - new FateExecutorMetricsWatcher(context, fate(FateInstanceType.META).getFateExecutors(), - getConfiguration().getTimeInMillis(Property.MANAGER_FATE_METRICS_MIN_UPDATE_INTERVAL)); - producers.add(metaFateExecutorMetrics); - - producers.addAll(managerMetrics.getProducers(this)); - metricsInfo.addMetricsProducers(producers.toArray(new MetricsProducer[0])); - metricsInfo.init(MetricsInfo.serviceTags(getContext().getInstanceName(), getApplicationName(), - getAdvertiseAddress(), getResourceGroup())); + metricsInfo + .addMetricsProducers(managerMetrics.getProducers(this).toArray(new MetricsProducer[0])); ThreadPools.watchCriticalScheduledTask(context.getScheduledExecutor() .scheduleWithFixedDelay(() -> ScanServerMetadataEntries.clean(context), 10, 10, MINUTES)); @@ -1312,28 +1291,56 @@ public void mainWait() throws InterruptedException { Thread.sleep(500); } - protected Fate initializeFateInstance(ServerContext context, FateStore store) { + /** + * This method exist so test can hook creating a fate instance. + */ + @VisibleForTesting + protected Fate createFateInstance(FateEnv env, FateStore store, + ServerContext context) { + return new Fate<>(env, store, true, TraceRepo::toLogString, getConfiguration(), + context.getScheduledExecutor()); + } - final Fate fateInstance = new Fate<>(this, store, true, TraceRepo::toLogString, - getConfiguration(), context.getScheduledExecutor()); + private void setupFate(ServerContext context, MetricsInfo metricsInfo) { + try { + Predicate isLockHeld = + lock -> ServiceLock.isLockHeld(context.getZooCache(), lock); + var metaStore = + new MetaFateStore(context.getZooSession(), managerLock.getLockID(), isLockHeld); + var metaInstance = createFateInstance(this, metaStore, context); + // configure this instance to process all data + metaInstance.setPartitions(Set.of(FatePartition.all(FateInstanceType.META))); + var userStore = new UserFateStore(context, SystemTables.FATE.tableName(), + managerLock.getLockID(), isLockHeld); + var userFateClient = new FateClient(userStore, TraceRepo::toLogString); + + var metaCleaner = new FateCleaner<>(metaStore, Duration.ofHours(8), this::getSteadyTime); + ThreadPools.watchCriticalScheduledTask(context.getScheduledExecutor() + .scheduleWithFixedDelay(metaCleaner::ageOff, 10, 4 * 60, MINUTES)); + var userCleaner = new FateCleaner<>(userStore, Duration.ofHours(8), this::getSteadyTime); + ThreadPools.watchCriticalScheduledTask(context.getScheduledExecutor() + .scheduleWithFixedDelay(userCleaner::ageOff, 10, 4 * 60, MINUTES)); - var fateCleaner = new FateCleaner<>(store, Duration.ofHours(8), this::getSteadyTime); - ThreadPools.watchCriticalScheduledTask(context.getScheduledExecutor() - .scheduleWithFixedDelay(fateCleaner::ageOff, 10, 4 * 60, MINUTES)); - - if (store.type() == FateInstanceType.META) { - fateInstance.setPartitions(Set.of(FatePartition.all(FateInstanceType.META))); - } else if (store.type() == FateInstanceType.USER) { - // Do not run user transactions for now in the manager... it will have an empty set of - // partitions. Ideally the primary manager would not need a fate instance, but it uses to seed - // work and wait for work. Would be best to pull these operations like seeding and waiting for - // work to an independent class. - fateInstance.setPartitions(Set.of()); - } else { - throw new IllegalStateException("Unknown fate type " + store.type()); - } + if (!fateClients.compareAndSet(null, + Map.of(FateInstanceType.META, metaInstance, FateInstanceType.USER, userFateClient))) { + throw new IllegalStateException( + "Unexpected previous fateClient reference map already initialized"); + } + if (!fateRefs.compareAndSet(null, Map.of(FateInstanceType.META, metaInstance))) { + throw new IllegalStateException( + "Unexpected previous fate reference map already initialized"); + } - return fateInstance; + managerMetrics.configureFateMetrics(getConfiguration(), this); + fateReadyLatch.countDown(); + + var metaFateExecutorMetrics = new FateExecutorMetricsProducer(context, + fate(FateInstanceType.META).getFateExecutors(), + getConfiguration().getTimeInMillis(Property.MANAGER_FATE_METRICS_MIN_UPDATE_INTERVAL)); + metricsInfo.addMetricsProducers(metaFateExecutorMetrics); + } catch (KeeperException | InterruptedException e) { + throw new IllegalStateException("Exception setting up FaTE cleanup thread", e); + } } /** diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java index 7929741d8bd..48fc021384c 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java @@ -21,6 +21,7 @@ import static com.google.common.util.concurrent.Uninterruptibles.sleepUninterruptibly; import java.net.UnknownHostException; +import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; @@ -33,8 +34,11 @@ import org.apache.accumulo.core.lock.ServiceLockData; import org.apache.accumulo.core.lock.ServiceLockPaths; import org.apache.accumulo.core.lock.ServiceLockSupport; +import org.apache.accumulo.core.metrics.MetricsProducer; import org.apache.accumulo.manager.fate.FateWorker; +import org.apache.accumulo.manager.fate.FateWorker.FateFactory; import org.apache.accumulo.server.ServerContext; +import org.apache.accumulo.server.manager.LiveTServerSet; import org.apache.accumulo.server.rpc.ServerAddress; import org.apache.accumulo.server.rpc.TServerUtils; import org.apache.accumulo.server.rpc.ThriftProcessorTypes; @@ -54,16 +58,21 @@ public class ManagerAssistant { private static final Logger log = LoggerFactory.getLogger(ManagerAssistant.class); private final ServerContext context; private final String bindAddress; + private final LiveTServerSet liveTServerSet; + private final FateFactory fateFactory; private volatile ServiceLock managerWorkerLock; private FateWorker fateWorker; - private volatile ServerAddress thriftServer; + private ServerAddress thriftServer; - protected ManagerAssistant(ServerContext context, String bindAddress) { + protected ManagerAssistant(ServerContext context, String bindAddress, + LiveTServerSet liveTServerSet, FateFactory fateFactory) { // create another server context because the server context has the lock... // TODO creating another context instance in the process may cause problems, like duplicating // some thread pools this.context = new ServerContext(context.getSiteConfiguration()); this.bindAddress = bindAddress; + this.liveTServerSet = liveTServerSet; + this.fateFactory = fateFactory; } public ServerContext getContext() { @@ -75,7 +84,7 @@ private ResourceGroupId getResourceGroup() { } private HostAndPort startClientService() throws UnknownHostException { - fateWorker = new FateWorker(getContext()); + fateWorker = new FateWorker(getContext(), liveTServerSet, fateFactory); // This class implements TabletClientService.Iface and then delegates calls. Be sure // to set up the ThriftProcessor using this class, not the delegate. @@ -84,7 +93,7 @@ private HostAndPort startClientService() throws UnknownHostException { // TODO should the minthreads and timeout have their own props? Probably, do not expect this to // have lots of RPCs so could be less. - var thriftServer = TServerUtils.createThriftServer(getContext(), bindAddress, + thriftServer = TServerUtils.createThriftServer(getContext(), bindAddress, Property.MANAGER_ASSISTANT_PORT, processor, this.getClass().getSimpleName(), Property.MANAGER_ASSISTANT_PORTSEARCH, Property.MANAGER_MINTHREADS, Property.MANAGER_MINTHREADS_TIMEOUT, Property.MANAGER_THREADCHECK); @@ -156,4 +165,8 @@ public void stop() { public ServiceLock getLock() { return managerWorkerLock; } + + public List getMetricsProducers() { + return fateWorker.getMetricsProducers(); + } } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index e317d5144a7..32ce08d2467 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -27,8 +27,10 @@ import org.apache.accumulo.core.clientImpl.thrift.SecurityErrorCode; import org.apache.accumulo.core.clientImpl.thrift.TInfo; import org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException; +import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.fate.Fate; import org.apache.accumulo.core.fate.FatePartition; +import org.apache.accumulo.core.fate.FateStore; import org.apache.accumulo.core.fate.thrift.FateWorkerService; import org.apache.accumulo.core.fate.thrift.TFatePartition; import org.apache.accumulo.core.fate.thrift.TFatePartitions; @@ -36,39 +38,48 @@ import org.apache.accumulo.core.fate.zookeeper.ZooUtil; import org.apache.accumulo.core.lock.ServiceLock; import org.apache.accumulo.core.metadata.SystemTables; +import org.apache.accumulo.core.metrics.MetricsProducer; import org.apache.accumulo.core.securityImpl.thrift.TCredentials; import org.apache.accumulo.core.util.LazySingletons; +import org.apache.accumulo.manager.metrics.fate.FateExecutorMetricsProducer; import org.apache.accumulo.manager.tableOps.FateEnv; -import org.apache.accumulo.manager.tableOps.TraceRepo; import org.apache.accumulo.server.ServerContext; +import org.apache.accumulo.server.manager.LiveTServerSet; import org.apache.accumulo.server.security.AuditedSecurityOperation; import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.base.Preconditions; + public class FateWorker implements FateWorkerService.Iface { private static final Logger log = LoggerFactory.getLogger(FateWorker.class); private final ServerContext context; private final AuditedSecurityOperation security; - private volatile Fate fate; - private volatile FateWorkerEnv fateWorkerEnv; + private final LiveTServerSet liveTserverSet; + private final FateFactory fateFactory; + private Fate fate; + private FateWorkerEnv fateWorkerEnv; + + public interface FateFactory { + Fate create(FateEnv env, FateStore store, ServerContext context); + } - public FateWorker(ServerContext ctx) { + public FateWorker(ServerContext ctx, LiveTServerSet liveTServerSet, FateFactory fateFactory) { this.context = ctx; this.security = ctx.getSecurityOperation(); this.fate = null; - // TODO fate metrics... in the manager process it does not setup metrics until after it gets the - // lock... also may want these metrics tagged differently for the server + this.liveTserverSet = liveTServerSet; + this.fateFactory = fateFactory; } - public void setLock(ServiceLock lock) { - fateWorkerEnv = new FateWorkerEnv(context, lock); + public synchronized void setLock(ServiceLock lock) { + fateWorkerEnv = new FateWorkerEnv(context, lock, liveTserverSet); Predicate isLockHeld = l -> ServiceLock.isLockHeld(context.getZooCache(), l); UserFateStore store = new UserFateStore<>(context, SystemTables.FATE.tableName(), lock.getLockID(), isLockHeld); - this.fate = new Fate<>(fateWorkerEnv, store, false, TraceRepo::toLogString, - context.getConfiguration(), context.getScheduledExecutor()); + this.fate = fateFactory.create(fateWorkerEnv, store, context); } private Long expectedUpdateId = null; @@ -81,8 +92,6 @@ public TFatePartitions getPartitions(TInfo tinfo, TCredentials credentials) SecurityErrorCode.PERMISSION_DENIED).asThriftException(); } - var localFate = fate; - // generate a new one time use update id long updateId = LazySingletons.RANDOM.get().nextLong(); @@ -94,11 +103,11 @@ public TFatePartitions getPartitions(TInfo tinfo, TCredentials credentials) // id expectedUpdateId = updateId; - if (localFate == null) { + if (fate == null) { return new TFatePartitions(updateId, List.of()); } else { return new TFatePartitions(updateId, - localFate.getPartitions().stream().map(FatePartition::toThrift).toList()); + fate.getPartitions().stream().map(FatePartition::toThrift).toList()); } } } @@ -112,18 +121,17 @@ public boolean setPartitions(TInfo tinfo, TCredentials credentials, long updateI } synchronized (this) { - var localFate = fate; - if (localFate != null && expectedUpdateId != null && updateId == expectedUpdateId) { + if (fate != null && expectedUpdateId != null && updateId == expectedUpdateId) { // Set to null which makes it so that an update id can only be used once. expectedUpdateId = null; var desiredSet = desired.stream().map(FatePartition::from).collect(Collectors.toSet()); - var oldPartitions = localFate.setPartitions(desiredSet); + var oldPartitions = fate.setPartitions(desiredSet); log.info("Changed partitions from {} to {}", oldPartitions, desiredSet); return true; } else { log.debug( "Did not change partitions to {} expectedUpdateId:{} updateId:{} localFate==null:{}", - desired, expectedUpdateId, updateId, localFate == null); + desired, expectedUpdateId, updateId, fate == null); return false; } } @@ -148,9 +156,18 @@ public void seeded(TInfo tinfo, TCredentials credentials, List t } } - public void stop() { + public synchronized void stop() { fate.shutdown(1, TimeUnit.MINUTES); fate.close(); fateWorkerEnv.stop(); + fate = null; + fateWorkerEnv = null; + } + + public synchronized List getMetricsProducers() { + Preconditions.checkState(fate != null, "Not started yet"); + return List.of(new FateExecutorMetricsProducer(context, fate.getFateExecutors(), context + .getConfiguration().getTimeInMillis(Property.MANAGER_FATE_METRICS_MIN_UPDATE_INTERVAL))); + } } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java index 124a1326aa0..d3238c12584 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java @@ -63,9 +63,9 @@ public class FateWorkerEnv implements FateEnv { private final ExecutorService refreshPool; private final ExecutorService renamePool; private final ServiceLock serviceLock; - private final LiveTServerSet tservers; private final SplitFileCache splitCache; private final EventHandler eventHandler; + private final LiveTServerSet liveTServerSet; private final EventQueue queue = new EventQueue(); private final AtomicBoolean stopped = new AtomicBoolean(false); @@ -147,7 +147,7 @@ public void event(Collection extents, String msg, Object... args) { } } - FateWorkerEnv(ServerContext ctx, ServiceLock lock) { + FateWorkerEnv(ServerContext ctx, ServiceLock lock, LiveTServerSet liveTserverSet) { this.ctx = ctx; this.refreshPool = ThreadPools.getServerThreadPools().getPoolBuilder("Tablet refresh ") .numCoreThreads(ctx.getConfiguration().getCount(Property.MANAGER_TABLET_REFRESH_MINTHREADS)) @@ -159,9 +159,9 @@ public void event(Collection extents, String msg, Object... args) { this.renamePool = ThreadPools.getServerThreadPools() .getPoolBuilder(IMPORT_TABLE_RENAME_POOL.poolName).numCoreThreads(poolSize).build(); this.serviceLock = lock; - this.tservers = new LiveTServerSet(ctx); this.splitCache = new SplitFileCache(ctx); this.eventHandler = new EventHandler(); + this.liveTServerSet = liveTserverSet; eventSendThread = Threads.createCriticalThread("Fate Worker Event Sender", new EventSender()); eventSendThread.start(); @@ -187,7 +187,7 @@ public void recordCompactionCompletion(ExternalCompactionId ecid) { @Override public Set onlineTabletServers() { - return tservers.getSnapshot().getTservers(); + return liveTServerSet.getSnapshot().getTservers(); } @Override diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateExecutorMetricsWatcher.java b/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateExecutorMetricsProducer.java similarity index 92% rename from server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateExecutorMetricsWatcher.java rename to server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateExecutorMetricsProducer.java index 3e5c289c445..43b8508c410 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateExecutorMetricsWatcher.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/metrics/fate/FateExecutorMetricsProducer.java @@ -31,14 +31,14 @@ import io.micrometer.core.instrument.MeterRegistry; -public class FateExecutorMetricsWatcher implements MetricsProducer { +public class FateExecutorMetricsProducer implements MetricsProducer { private final Set> fateExecutors; private final ServerContext context; private final long refreshDelay; private MeterRegistry registry; - public FateExecutorMetricsWatcher(ServerContext context, Set> fateExecutors, - long minimumRefreshDelay) { + public FateExecutorMetricsProducer(ServerContext context, + Set> fateExecutors, long minimumRefreshDelay) { this.context = context; this.fateExecutors = fateExecutors; this.refreshDelay = Math.max(DEFAULT_MIN_REFRESH_DELAY, minimumRefreshDelay); diff --git a/test/src/main/java/org/apache/accumulo/test/fate/FlakyFateManager.java b/test/src/main/java/org/apache/accumulo/test/fate/FlakyFateManager.java index d8ea9578346..df5be7784cf 100644 --- a/test/src/main/java/org/apache/accumulo/test/fate/FlakyFateManager.java +++ b/test/src/main/java/org/apache/accumulo/test/fate/FlakyFateManager.java @@ -35,10 +35,11 @@ protected FlakyFateManager(ServerOpts opts, String[] args) throws IOException { } @Override - protected Fate initializeFateInstance(ServerContext context, FateStore store) { + protected Fate createFateInstance(FateEnv env, FateStore store, + ServerContext context) { LoggerFactory.getLogger(FlakyFateManager.class).info("Creating Flaky Fate for {}", store.type()); - return new FlakyFate<>(this, store, TraceRepo::toLogString, getConfiguration()); + return new FlakyFate<>(env, store, TraceRepo::toLogString, getConfiguration()); } public static void main(String[] args) throws Exception { diff --git a/test/src/main/java/org/apache/accumulo/test/fate/SlowFateSplitManager.java b/test/src/main/java/org/apache/accumulo/test/fate/SlowFateSplitManager.java index 1b68fec1c8e..9e69845b27f 100644 --- a/test/src/main/java/org/apache/accumulo/test/fate/SlowFateSplitManager.java +++ b/test/src/main/java/org/apache/accumulo/test/fate/SlowFateSplitManager.java @@ -45,9 +45,10 @@ protected SlowFateSplitManager(ServerOpts opts, String[] args) throws IOExceptio } @Override - protected Fate initializeFateInstance(ServerContext context, FateStore store) { + protected Fate createFateInstance(FateEnv env, FateStore store, + ServerContext context) { log.info("Creating Slow Split Fate for {}", store.type()); - return new SlowFateSplit<>(this, store, TraceRepo::toLogString, getConfiguration()); + return new SlowFateSplit<>(env, store, TraceRepo::toLogString, getConfiguration()); } public static void main(String[] args) throws Exception { From 5141239247ab6044639e655636332fb058329fcc Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Wed, 18 Feb 2026 20:39:09 +0000 Subject: [PATCH 26/38] WIP --- .../core/client/admin/servers/ServerId.java | 2 +- .../apache/accumulo/core/conf/Property.java | 6 +++++ .../accumulo/server/AbstractServer.java | 4 ---- .../org/apache/accumulo/manager/Manager.java | 5 ++-- .../accumulo/manager/ManagerAssistant.java | 23 ++++++++++--------- 5 files changed, 22 insertions(+), 18 deletions(-) diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/servers/ServerId.java b/core/src/main/java/org/apache/accumulo/core/client/admin/servers/ServerId.java index 63c375d2fac..beef589c07f 100644 --- a/core/src/main/java/org/apache/accumulo/core/client/admin/servers/ServerId.java +++ b/core/src/main/java/org/apache/accumulo/core/client/admin/servers/ServerId.java @@ -38,7 +38,7 @@ public final class ServerId implements Comparable { * @since 4.0.0 */ public enum Type { - MANAGER, MONITOR, GARBAGE_COLLECTOR, COMPACTOR, SCAN_SERVER, TABLET_SERVER, MANAGER_ASSISTANT; + MANAGER, MANAGER_ASSISTANT, MONITOR, GARBAGE_COLLECTOR, COMPACTOR, SCAN_SERVER, TABLET_SERVER; } private final Type type; diff --git a/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/core/src/main/java/org/apache/accumulo/core/conf/Property.java index 94a9a75257e..89d8ee6d39e 100644 --- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java +++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java @@ -406,6 +406,12 @@ was changed and it now can accept multiple class names. The metrics spi was intr MANAGER_ASSISTANT_PORTSEARCH("manager.assistant.port.search", "true", PropertyType.BOOLEAN, "if the manager.assistant.port ports are in use, search higher ports until one is available.", "4.0.0"), + MANAGER_ASSISTANT_MINTHREADS("manager.assistant.server.threads.minimum", "20", PropertyType.COUNT, + "The minimum number of threads to use to handle incoming requests.", "4.0.0"), + MANAGER_ASSISTANT_MINTHREADS_TIMEOUT("manager.assistant.server.threads.timeout", "0s", + PropertyType.TIMEDURATION, + "The time after which incoming request threads terminate with no work available. Zero (0) will keep the threads alive indefinitely.", + "4.0.0"), MANAGER_TABLET_BALANCER("manager.tablet.balancer", "org.apache.accumulo.core.spi.balancer.TableLoadBalancer", PropertyType.CLASSNAME, "The balancer class that accumulo will use to make tablet assignment and " diff --git a/server/base/src/main/java/org/apache/accumulo/server/AbstractServer.java b/server/base/src/main/java/org/apache/accumulo/server/AbstractServer.java index 844931b2830..6c599468913 100644 --- a/server/base/src/main/java/org/apache/accumulo/server/AbstractServer.java +++ b/server/base/src/main/java/org/apache/accumulo/server/AbstractServer.java @@ -174,10 +174,6 @@ protected AbstractServer(ServerId.Type serverType, ServerOpts opts, case TABLET_SERVER: metricSource = MetricSource.TABLET_SERVER; break; - case MANAGER_ASSISTANT: - // TODO create a new source? - metricSource = MetricSource.MANAGER; - break; default: throw new IllegalArgumentException("Unhandled server type: " + serverType); } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java index 87c46814592..d6967d8c5de 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java @@ -966,8 +966,9 @@ public void run() { // Start manager assistant before getting lock, this allows non primary manager processes to // work on stuff. - assitantManager = - new ManagerAssistant(getContext(), getBindAddress(), tserverSet, this::createFateInstance); + var shutdownComplete = getShutdownComplete(); + assitantManager = new ManagerAssistant(getContext(), getBindAddress(), tserverSet, + this::createFateInstance, shutdownComplete::get); assitantManager.start(); metricsInfo .addMetricsProducers(assitantManager.getMetricsProducers().toArray(new MetricsProducer[0])); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java index 48fc021384c..4db4a39b940 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java @@ -24,6 +24,7 @@ import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; import org.apache.accumulo.core.client.admin.servers.ServerId; import org.apache.accumulo.core.conf.Property; @@ -51,7 +52,7 @@ /** * An assistant to the manager */ -// TODO because this does not extend abstract server it does not get some of the benefits like +// FOLLOW_ON because this does not extend abstract server it does not get some of the benefits like // monitoring of lock public class ManagerAssistant { @@ -60,19 +61,22 @@ public class ManagerAssistant { private final String bindAddress; private final LiveTServerSet liveTServerSet; private final FateFactory fateFactory; + private final Supplier shutdownComplete; private volatile ServiceLock managerWorkerLock; private FateWorker fateWorker; private ServerAddress thriftServer; protected ManagerAssistant(ServerContext context, String bindAddress, - LiveTServerSet liveTServerSet, FateFactory fateFactory) { - // create another server context because the server context has the lock... - // TODO creating another context instance in the process may cause problems, like duplicating - // some thread pools + LiveTServerSet liveTServerSet, FateFactory fateFactory, Supplier shutdownComplete) { + // Create another server context because the server context has the lock info and this class + // creates another lock separate from the manager lock. + // FOLLOW_ON creating another context instance in the process may cause problems, like + // duplicating some thread pools this.context = new ServerContext(context.getSiteConfiguration()); this.bindAddress = bindAddress; this.liveTServerSet = liveTServerSet; this.fateFactory = fateFactory; + this.shutdownComplete = shutdownComplete; } public ServerContext getContext() { @@ -91,12 +95,10 @@ private HostAndPort startClientService() throws UnknownHostException { TProcessor processor = ThriftProcessorTypes.getManagerWorkerTProcessor(fateWorker, getContext()); - // TODO should the minthreads and timeout have their own props? Probably, do not expect this to - // have lots of RPCs so could be less. thriftServer = TServerUtils.createThriftServer(getContext(), bindAddress, Property.MANAGER_ASSISTANT_PORT, processor, this.getClass().getSimpleName(), - Property.MANAGER_ASSISTANT_PORTSEARCH, Property.MANAGER_MINTHREADS, - Property.MANAGER_MINTHREADS_TIMEOUT, Property.MANAGER_THREADCHECK); + Property.MANAGER_ASSISTANT_PORTSEARCH, Property.MANAGER_ASSISTANT_MINTHREADS, + Property.MANAGER_ASSISTANT_MINTHREADS_TIMEOUT, Property.MANAGER_THREADCHECK); thriftServer.startThriftServer("Thrift Manager Assistant Server"); log.info("Starting {} Thrift server, listening on {}", this.getClass().getSimpleName(), thriftServer.address); @@ -113,9 +115,8 @@ private void announceExistence(HostAndPort advertiseAddress) { zLockPath); var serverLockUUID = UUID.randomUUID(); managerWorkerLock = new ServiceLock(getContext().getZooSession(), zLockPath, serverLockUUID); - // TODO shutdown supplier, anything to do here? ServiceLock.LockWatcher lw = new ServiceLockSupport.ServiceLockWatcher( - ServerId.Type.MANAGER_ASSISTANT, () -> false, + ServerId.Type.MANAGER_ASSISTANT, shutdownComplete, (type) -> getContext().getLowMemoryDetector().logGCInfo(getContext().getConfiguration())); for (int i = 0; i < 120 / 5; i++) { From 903eb47a6aa0809bf5d18f3fe0765ce634dd4c50 Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Wed, 18 Feb 2026 20:44:15 +0000 Subject: [PATCH 27/38] WIP --- core/src/main/spotbugs/exclude-filter.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/core/src/main/spotbugs/exclude-filter.xml b/core/src/main/spotbugs/exclude-filter.xml index cfaac3ea6f0..b8e08762165 100644 --- a/core/src/main/spotbugs/exclude-filter.xml +++ b/core/src/main/spotbugs/exclude-filter.xml @@ -30,6 +30,7 @@ + From 3f14811c7b440661cfa19cf3fffdc18e1c741800 Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Wed, 18 Feb 2026 20:57:00 +0000 Subject: [PATCH 28/38] WIP --- .../org/apache/accumulo/manager/fate/FateManager.java | 9 ++++++--- .../org/apache/accumulo/manager/fate/FateWorker.java | 5 +++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index e34f805b190..05ffed14ac6 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -77,9 +77,11 @@ record FateHostPartition(HostAndPort hostPort, FatePartition partition) { private void managerWorkers() throws TException, InterruptedException { log.debug("Started Fate Manager"); + long stableCount = 0; outer: while (!stop.get()) { - // TODO make configurable - Thread.sleep(3_000); + + long sleepTime = Math.min(stableCount * 100, 5_000); + Thread.sleep(sleepTime); // This map will contain all current workers even their partitions are empty Map currentPartitions = getCurrentAssignments(); @@ -99,8 +101,10 @@ private void managerWorkers() throws TException, InterruptedException { }); }); stableAssignments.set(rangeMap); + stableCount++; } else { stableAssignments.set(TreeRangeMap.create()); + stableCount = 0; } // are there any workers with extra partitions? If so need to unload those first. @@ -272,7 +276,6 @@ public void run() { * @param desired The new set of fate partitions this server should start working. It should only * work on these and nothing else. * @return true if the partitions were set false if they were not set. - * @throws TException */ private boolean setWorkerPartitions(HostAndPort address, long updateId, Set desired) throws TException { diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java index 32ce08d2467..61e356c7b7b 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorker.java @@ -18,6 +18,8 @@ */ package org.apache.accumulo.manager.fate; +import static org.apache.accumulo.core.util.LazySingletons.RANDOM; + import java.util.List; import java.util.concurrent.TimeUnit; import java.util.function.Predicate; @@ -40,7 +42,6 @@ import org.apache.accumulo.core.metadata.SystemTables; import org.apache.accumulo.core.metrics.MetricsProducer; import org.apache.accumulo.core.securityImpl.thrift.TCredentials; -import org.apache.accumulo.core.util.LazySingletons; import org.apache.accumulo.manager.metrics.fate.FateExecutorMetricsProducer; import org.apache.accumulo.manager.tableOps.FateEnv; import org.apache.accumulo.server.ServerContext; @@ -93,7 +94,7 @@ public TFatePartitions getPartitions(TInfo tinfo, TCredentials credentials) } // generate a new one time use update id - long updateId = LazySingletons.RANDOM.get().nextLong(); + long updateId = RANDOM.get().nextLong(); // Getting the partitions and setting the new update id must be mutually exclusive with any // updates of the partitions concurrently executing. This ensures the new update id goes with From 4a1a09017e4b19c902e653c5e65c9eb5b301aaee Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Wed, 18 Feb 2026 21:57:59 +0000 Subject: [PATCH 29/38] WIP --- .../accumulo/manager/fate/FateWorkerEnv.java | 8 +-- .../test/ComprehensiveMultiManagerIT.java | 59 +++++++++++++++++++ .../accumulo/test/MultipleManagerIT.java | 4 +- 3 files changed, 63 insertions(+), 8 deletions(-) create mode 100644 test/src/main/java/org/apache/accumulo/test/ComprehensiveMultiManagerIT.java diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java index d3238c12584..da151f7154f 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java @@ -222,13 +222,9 @@ public ServiceLock getServiceLock() { public SteadyTime getSteadyTime() { try { return SteadyTime.from(ctx.instanceOperations().getManagerTime()); - } catch (AccumuloException e) { - // TODO exceptions, add to to method signature or use a diff type?? - throw new RuntimeException(e); - } catch (AccumuloSecurityException e) { - throw new RuntimeException(e); + } catch (AccumuloException | AccumuloSecurityException e) { + throw new IllegalStateException(e); } - // return ctx.get } @Override diff --git a/test/src/main/java/org/apache/accumulo/test/ComprehensiveMultiManagerIT.java b/test/src/main/java/org/apache/accumulo/test/ComprehensiveMultiManagerIT.java new file mode 100644 index 00000000000..a3d50853cd7 --- /dev/null +++ b/test/src/main/java/org/apache/accumulo/test/ComprehensiveMultiManagerIT.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.accumulo.test; + +import org.apache.accumulo.core.client.Accumulo; +import org.apache.accumulo.core.client.AccumuloClient; +import org.apache.accumulo.core.conf.Property; +import org.apache.accumulo.harness.MiniClusterConfigurationCallback; +import org.apache.accumulo.harness.SharedMiniClusterBase; +import org.apache.accumulo.manager.Manager; +import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +; + +public class ComprehensiveMultiManagerIT extends ComprehensiveITBase { + + private static class ComprehensiveITConfiguration implements MiniClusterConfigurationCallback { + @Override + public void configureMiniCluster(MiniAccumuloConfigImpl cfg, + org.apache.hadoop.conf.Configuration coreSite) { + cfg.setProperty(Property.SSERV_CACHED_TABLET_METADATA_EXPIRATION, "5s"); + } + } + + @BeforeAll + public static void setup() throws Exception { + ComprehensiveITConfiguration c = new ComprehensiveITConfiguration(); + SharedMiniClusterBase.startMiniClusterWithConfig(c); + try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) { + client.securityOperations().changeUserAuthorizations("root", AUTHORIZATIONS); + } + + // Start two more managers + getCluster().exec(Manager.class); + getCluster().exec(Manager.class); + } + + @AfterAll + public static void teardown() { + SharedMiniClusterBase.stopMiniCluster(); + } +} diff --git a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java index 36d14d4a65a..d0386dfff30 100644 --- a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java +++ b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java @@ -67,7 +67,7 @@ public void test() throws Exception { var splits = IntStream.range(1, 10).mapToObj(i -> String.format("%03d", i)).map(Text::new) .collect(Collectors.toCollection(TreeSet::new)); var tableOpFutures = new ArrayList>(); - for (int i = 0; i < 100; i++) { + for (int i = 0; i < 1; i++) { var table = "t" + i; // TODO seeing in the logs that fate operations for the same table are running on different // processes, however there is a 5 second delay because there is no notification mechanism @@ -116,6 +116,6 @@ public void test() throws Exception { executor.shutdown(); System.out.println("DONE"); - // TODO kill processes + managerWorkers.forEach(Process::destroy); } } From 634ba3f72fe465240b9ba400cd192b1ae5f657e5 Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Wed, 18 Feb 2026 23:11:33 +0000 Subject: [PATCH 30/38] WIP --- .../apache/accumulo/test/ComprehensiveMultiManagerIT.java | 1 - .../main/java/org/apache/accumulo/test/fate/FateITBase.java | 5 ++++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/test/src/main/java/org/apache/accumulo/test/ComprehensiveMultiManagerIT.java b/test/src/main/java/org/apache/accumulo/test/ComprehensiveMultiManagerIT.java index a3d50853cd7..278a051ec8a 100644 --- a/test/src/main/java/org/apache/accumulo/test/ComprehensiveMultiManagerIT.java +++ b/test/src/main/java/org/apache/accumulo/test/ComprehensiveMultiManagerIT.java @@ -27,7 +27,6 @@ import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -; public class ComprehensiveMultiManagerIT extends ComprehensiveITBase { diff --git a/test/src/main/java/org/apache/accumulo/test/fate/FateITBase.java b/test/src/main/java/org/apache/accumulo/test/fate/FateITBase.java index 278f65d33a2..1777531cbbb 100644 --- a/test/src/main/java/org/apache/accumulo/test/fate/FateITBase.java +++ b/test/src/main/java/org/apache/accumulo/test/fate/FateITBase.java @@ -51,6 +51,7 @@ import org.apache.accumulo.core.fate.AbstractFateStore; import org.apache.accumulo.core.fate.Fate; import org.apache.accumulo.core.fate.FateId; +import org.apache.accumulo.core.fate.FatePartition; import org.apache.accumulo.core.fate.FateStore; import org.apache.accumulo.core.fate.ReadOnlyFateStore; import org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus; @@ -559,9 +560,11 @@ private void submitDeferred(Fate fate, ServerContext sctx, Set } protected Fate initializeFate(FateStore store) { - return new Fate<>(new TestEnv(), store, false, r -> r + "", + var fate = new Fate<>(new TestEnv(), store, false, r -> r + "", FateTestUtil.updateFateConfig(new ConfigurationCopy(), 1, "AllFateOps"), new ScheduledThreadPoolExecutor(2)); + fate.setPartitions(Set.of(FatePartition.all(store.type()))); + return fate; } protected abstract TStatus getTxStatus(ServerContext sctx, FateId fateId); From e5ef7e673902a523e25365ace40881c24f39412c Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Thu, 19 Feb 2026 00:14:44 +0000 Subject: [PATCH 31/38] WIP --- .../main/java/org/apache/accumulo/test/fate/FastFate.java | 3 +++ .../apache/accumulo/test/fate/FateExecutionOrderITBase.java | 5 ++++- .../org/apache/accumulo/test/fate/MultipleStoresITBase.java | 3 +++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/test/src/main/java/org/apache/accumulo/test/fate/FastFate.java b/test/src/main/java/org/apache/accumulo/test/fate/FastFate.java index f23596569f3..c969f17253b 100644 --- a/test/src/main/java/org/apache/accumulo/test/fate/FastFate.java +++ b/test/src/main/java/org/apache/accumulo/test/fate/FastFate.java @@ -19,11 +19,13 @@ package org.apache.accumulo.test.fate; import java.time.Duration; +import java.util.Set; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.function.Function; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.fate.Fate; +import org.apache.accumulo.core.fate.FatePartition; import org.apache.accumulo.core.fate.FateStore; import org.apache.accumulo.core.fate.Repo; @@ -40,6 +42,7 @@ public FastFate(T environment, FateStore store, boolean runDeadResCleaner, Function,String> toLogStrFunc, AccumuloConfiguration conf) { super(environment, store, runDeadResCleaner, toLogStrFunc, conf, new ScheduledThreadPoolExecutor(2)); + setPartitions(Set.of(FatePartition.all(store.type()))); } @Override diff --git a/test/src/main/java/org/apache/accumulo/test/fate/FateExecutionOrderITBase.java b/test/src/main/java/org/apache/accumulo/test/fate/FateExecutionOrderITBase.java index ee3427fc2c2..4bbf06e0a1a 100644 --- a/test/src/main/java/org/apache/accumulo/test/fate/FateExecutionOrderITBase.java +++ b/test/src/main/java/org/apache/accumulo/test/fate/FateExecutionOrderITBase.java @@ -54,6 +54,7 @@ import org.apache.accumulo.core.fate.Fate; import org.apache.accumulo.core.fate.Fate.TxInfo; import org.apache.accumulo.core.fate.FateId; +import org.apache.accumulo.core.fate.FatePartition; import org.apache.accumulo.core.fate.FateStore; import org.apache.accumulo.core.fate.Repo; import org.apache.accumulo.harness.SharedMiniClusterBase; @@ -194,9 +195,11 @@ private void waitFor(FateStore store, FateId txid) throws Exception } protected Fate initializeFate(AccumuloClient client, FateStore store) { - return new Fate<>(new FeoTestEnv(client), store, false, r -> r + "", + var fate = new Fate<>(new FeoTestEnv(client), store, false, r -> r + "", FateTestUtil.updateFateConfig(new ConfigurationCopy(), 1, "AllFateOps"), new ScheduledThreadPoolExecutor(2)); + fate.setPartitions(Set.of(FatePartition.all(store.type()))); + return fate; } private static Entry toIdStep(Entry e) { diff --git a/test/src/main/java/org/apache/accumulo/test/fate/MultipleStoresITBase.java b/test/src/main/java/org/apache/accumulo/test/fate/MultipleStoresITBase.java index cb33e45e056..248ea028032 100644 --- a/test/src/main/java/org/apache/accumulo/test/fate/MultipleStoresITBase.java +++ b/test/src/main/java/org/apache/accumulo/test/fate/MultipleStoresITBase.java @@ -255,8 +255,10 @@ private void testMultipleFateInstances(TestStoreFactory testSto Fate fate1 = new Fate<>(testEnv1, store1, true, Object::toString, DefaultConfiguration.getInstance(), new ScheduledThreadPoolExecutor(2)); + fate1.setPartitions(Set.of(FatePartition.all(store1.type()))); Fate fate2 = new Fate<>(testEnv2, store2, false, Object::toString, DefaultConfiguration.getInstance(), new ScheduledThreadPoolExecutor(2)); + fate2.setPartitions(Set.of(FatePartition.all(store2.type()))); try { for (int i = 0; i < numFateIds; i++) { @@ -363,6 +365,7 @@ private void testDeadReservationsCleanup(TestStoreFactory testStor // fate1. fate2 = new Fate<>(testEnv2, store2, false, Object::toString, config, new ScheduledThreadPoolExecutor(2)); + fate2.setPartitions(Set.of(FatePartition.all(store2.type()))); // Wait for the "dead" reservations to be deleted and picked up again (reserved using // fate2/store2/lock2 now). From 05cd769cc07cbddea0972ab4f05d633a0052297b Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Thu, 19 Feb 2026 01:03:55 +0000 Subject: [PATCH 32/38] WIP --- .../apache/accumulo/test/fate/FateITBase.java | 128 +++++++++++++++++- 1 file changed, 124 insertions(+), 4 deletions(-) diff --git a/test/src/main/java/org/apache/accumulo/test/fate/FateITBase.java b/test/src/main/java/org/apache/accumulo/test/fate/FateITBase.java index 1777531cbbb..1411e258ccb 100644 --- a/test/src/main/java/org/apache/accumulo/test/fate/FateITBase.java +++ b/test/src/main/java/org/apache/accumulo/test/fate/FateITBase.java @@ -24,6 +24,7 @@ import static org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus.IN_PROGRESS; import static org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus.NEW; import static org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus.SUBMITTED; +import static org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus.SUCCESSFUL; import static org.apache.accumulo.core.fate.ReadOnlyFateStore.TStatus.UNKNOWN; import static org.apache.accumulo.test.fate.FateTestUtil.TEST_FATE_OP; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; @@ -47,10 +48,10 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.accumulo.core.conf.ConfigurationCopy; -import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.fate.AbstractFateStore; import org.apache.accumulo.core.fate.Fate; import org.apache.accumulo.core.fate.FateId; +import org.apache.accumulo.core.fate.FateInstanceType; import org.apache.accumulo.core.fate.FatePartition; import org.apache.accumulo.core.fate.FateStore; import org.apache.accumulo.core.fate.ReadOnlyFateStore; @@ -577,9 +578,6 @@ public void testShutdownDoesNotFailTx() throws Exception { protected void testShutdownDoesNotFailTx(FateStore store, ServerContext sctx) throws Exception { - ConfigurationCopy config = new ConfigurationCopy(); - config.set(Property.GENERAL_THREADPOOL_SIZE, "2"); - Fate fate = initializeFate(store); // Wait for the transaction runner to be scheduled. @@ -636,6 +634,128 @@ protected void testShutdownDoesNotFailTx(FateStore store, ServerContext assertNull(interruptedException.get()); } + public static class DoNothingRepo implements Repo { + private static final long serialVersionUID = 1L; + + @Override + public Repo call(FateId fateId, TestEnv environment) throws Exception { + return null; + } + + @Override + public void undo(FateId fateId, TestEnv environment) throws Exception { + + } + + @Override + public String getReturn() { + return ""; + } + + @Override + public long isReady(FateId fateId, TestEnv environment) throws Exception { + return 0; + } + + @Override + public String getName() { + return "none"; + } + } + + @Test + @Timeout(60) + public void testPartitions() throws Exception { + executeTest(this::testPartitions); + } + + protected void testPartitions(FateStore store, ServerContext sctx) { + // This test ensures that fate only processes fateids that fall within its assigned partitions + // of fateids. + Fate fate = initializeFate(store); + fate.setPartitions(Set.of()); + + Set fateIds = new HashSet<>(); + + for (int i = 0; i < 100; i++) { + var txid = fate.startTransaction(); + fateIds.add(txid); + + fate.seedTransaction(TEST_FATE_OP, txid, new DoNothingRepo(), false, "no goal"); + } + + for (var fateId : fateIds) { + assertEquals(SUBMITTED, getTxStatus(sctx, fateId)); + } + + // start processing all uuids that start with 1 or 5, but no other ids + fate.setPartitions(Set.of(newPartition(store.type(), "1"), newPartition(store.type(), "5"))); + + Wait.waitFor(() -> fateIds.stream().filter( + fateId -> fateId.getTxUUIDStr().startsWith("1") || fateId.getTxUUIDStr().startsWith("5")) + .map(fateId -> getTxStatus(sctx, fateId)).allMatch(status -> status == SUCCESSFUL)); + + for (var fateId : fateIds) { + var uuid = fateId.getTxUUIDStr(); + if (uuid.startsWith("1") || uuid.startsWith("5")) { + assertEquals(SUCCESSFUL, getTxStatus(sctx, fateId)); + } else { + assertEquals(SUBMITTED, getTxStatus(sctx, fateId)); + } + } + + // start processing uuids that start with e + fate.setPartitions(Set.of(newPartition(store.type(), "e"))); + Wait.waitFor(() -> fateIds.stream().filter(fateId -> fateId.getTxUUIDStr().startsWith("e")) + .map(fateId -> getTxStatus(sctx, fateId)).allMatch(status -> status == SUCCESSFUL)); + + for (var fateId : fateIds) { + var uuid = fateId.getTxUUIDStr(); + if (uuid.startsWith("1") || uuid.startsWith("5") || uuid.startsWith("e")) { + assertEquals(SUCCESSFUL, getTxStatus(sctx, fateId)); + } else { + assertEquals(SUBMITTED, getTxStatus(sctx, fateId)); + } + } + + // add new ids to ensure that uuid prefixes 1 and 5 are no longer processed + Set fateIds2 = new HashSet<>(); + + for (int i = 0; i < 100; i++) { + var txid = fate.startTransaction(); + fateIds2.add(txid); + fate.seedTransaction(TEST_FATE_OP, txid, new DoNothingRepo(), false, "no goal"); + } + Wait.waitFor(() -> fateIds2.stream().filter(fateId -> fateId.getTxUUIDStr().startsWith("e")) + .map(fateId -> getTxStatus(sctx, fateId)).allMatch(status -> status == SUCCESSFUL)); + for (var fateId : fateIds2) { + var uuid = fateId.getTxUUIDStr(); + if (uuid.startsWith("e")) { + assertEquals(SUCCESSFUL, getTxStatus(sctx, fateId)); + } else { + assertEquals(SUBMITTED, getTxStatus(sctx, fateId)); + } + } + + // nothing should have changed with the first set of ids + for (var fateId : fateIds) { + var uuid = fateId.getTxUUIDStr(); + if (uuid.startsWith("1") || uuid.startsWith("5") || uuid.startsWith("e")) { + assertEquals(SUCCESSFUL, getTxStatus(sctx, fateId)); + } else { + assertEquals(SUBMITTED, getTxStatus(sctx, fateId)); + } + } + } + + private FatePartition newPartition(FateInstanceType type, String firstNibble) { + // these suffixes have all uuid chars except for the first nibble/4-bits + String zeroSuffix = "0000000-0000-0000-0000-000000000000"; + String ffSuffix = "fffffff-ffff-ffff-ffff-ffffffffffff"; + return new FatePartition(FateId.from(type, firstNibble + zeroSuffix), + FateId.from(type, firstNibble + ffSuffix)); + } + private static void inCall() throws InterruptedException { // signal that call started callStarted.countDown(); From aa5a1ec3f2f1d55e6979166b817d8326cf1ab292 Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Thu, 19 Feb 2026 01:15:50 +0000 Subject: [PATCH 33/38] WIP --- .../accumulo/core/lock/ServiceLockData.java | 2 +- .../accumulo/test/MultipleManagerIT.java | 121 ------------------ 2 files changed, 1 insertion(+), 122 deletions(-) delete mode 100644 test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java diff --git a/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockData.java b/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockData.java index ec1bfd50b2a..9997d75c11c 100644 --- a/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockData.java +++ b/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockData.java @@ -46,7 +46,7 @@ public static enum ThriftService { COORDINATOR, COMPACTOR, FATE, - FATE_WORKER, + MANAGER_ASSISTANT, GC, MANAGER, NONE, diff --git a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java deleted file mode 100644 index d0386dfff30..00000000000 --- a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.accumulo.test; - -import static org.junit.jupiter.api.Assertions.assertEquals; - -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.TreeSet; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import org.apache.accumulo.core.client.Accumulo; -import org.apache.accumulo.core.client.admin.CompactionConfig; -import org.apache.accumulo.core.data.Mutation; -import org.apache.accumulo.manager.Manager; -import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl; -import org.apache.accumulo.test.functional.ConfigurableMacBase; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.Text; -import org.junit.jupiter.api.Test; - -public class MultipleManagerIT extends ConfigurableMacBase { - @Override - protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { - // TODO add a way to start multiple managers to mini - cfg.getClusterServerConfiguration().setNumDefaultCompactors(8); - super.configure(cfg, hadoopCoreSite); - } - - @Test - public void test() throws Exception { - - List managerWorkers = new ArrayList<>(); - for (int i = 0; i < 1; i++) { - managerWorkers.add(exec(Manager.class)); - } - - var executor = Executors.newCachedThreadPool(); - - Thread.sleep(30_000); - // start more manager processes, should be assigned fate work - for (int i = 0; i < 3; i++) { - managerWorkers.add(exec(Manager.class)); - } - - try (var client = Accumulo.newClient().from(getClientProperties()).build()) { - var splits = IntStream.range(1, 10).mapToObj(i -> String.format("%03d", i)).map(Text::new) - .collect(Collectors.toCollection(TreeSet::new)); - var tableOpFutures = new ArrayList>(); - for (int i = 0; i < 1; i++) { - var table = "t" + i; - // TODO seeing in the logs that fate operations for the same table are running on different - // processes, however there is a 5 second delay because there is no notification mechanism - // currently. - - // TODO its hard to find everything related to a table id in the logs, especially when the - // table id is like "b". Was trying to follow a single table across multiple manager workers - // processes. - var tableOpsFuture = executor.submit(() -> { - client.tableOperations().create(table); - log.info("Created table {}", table); - var expectedRows = new HashSet(); - try (var writer = client.createBatchWriter(table)) { - for (int r = 0; r < 10; r++) { - var row = String.format("%03d", r); - expectedRows.add(row); - Mutation m = new Mutation(row); - m.put("f", "q", "v"); - writer.addMutation(m); - } - } - log.info("Wrote data to table {}", table); - client.tableOperations().addSplits(table, splits); - log.info("Split table {}", table); // TODO split operation does not log table id and fate - // opid anywhere - client.tableOperations().compact(table, new CompactionConfig().setWait(true)); - log.info("Compacted table {}", table); - client.tableOperations().merge(table, null, null); - log.info("Merged table {}", table); - try (var scanner = client.createScanner(table)) { - var rowsSeen = scanner.stream().map(e -> e.getKey().getRowData().toString()) - .collect(Collectors.toSet()); - assertEquals(expectedRows, rowsSeen); - log.info("verified table {}", table); - } - return null; - }); - tableOpFutures.add(tableOpsFuture); - } - - for (var tof : tableOpFutures) { - tof.get(); - } - } - - executor.shutdown(); - - System.out.println("DONE"); - managerWorkers.forEach(Process::destroy); - } -} From 566bce857e0b3faa5235a0ead9d4921515dcd301 Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Thu, 19 Feb 2026 16:07:51 +0000 Subject: [PATCH 34/38] WIP --- .../java/org/apache/accumulo/manager/ManagerAssistant.java | 3 ++- .../org/apache/accumulo/manager/fate/FateWorkerEnv.java | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java index 4db4a39b940..4943599f353 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/ManagerAssistant.java @@ -124,7 +124,8 @@ private void announceExistence(HostAndPort advertiseAddress) { ServiceLockData.ServiceDescriptors descriptors = new ServiceLockData.ServiceDescriptors(); for (ServiceLockData.ThriftService svc : new ServiceLockData.ThriftService[] { - ServiceLockData.ThriftService.CLIENT, ServiceLockData.ThriftService.FATE_WORKER}) { + ServiceLockData.ThriftService.CLIENT, + ServiceLockData.ThriftService.MANAGER_ASSISTANT}) { descriptors.addService(new ServiceLockData.ServiceDescriptor(serverLockUUID, svc, advertiseAddress.toString(), this.getResourceGroup())); } diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java index da151f7154f..f53bd667824 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateWorkerEnv.java @@ -18,7 +18,7 @@ */ package org.apache.accumulo.manager.fate; -import static org.apache.accumulo.core.util.threads.ThreadPoolNames.IMPORT_TABLE_RENAME_POOL; +import static org.apache.accumulo.core.util.threads.ThreadPoolNames.FILE_RENAME_POOL; import java.util.Collection; import java.util.Set; @@ -156,8 +156,8 @@ public void event(Collection extents, String msg, Object... args) { int poolSize = ctx.getConfiguration().getCount(Property.MANAGER_RENAME_THREADS); // FOLLOW_ON this import table name is not correct for the thread pool name, fix in stand alone // PR - this.renamePool = ThreadPools.getServerThreadPools() - .getPoolBuilder(IMPORT_TABLE_RENAME_POOL.poolName).numCoreThreads(poolSize).build(); + this.renamePool = ThreadPools.getServerThreadPools().getPoolBuilder(FILE_RENAME_POOL.poolName) + .numCoreThreads(poolSize).build(); this.serviceLock = lock; this.splitCache = new SplitFileCache(ctx); this.eventHandler = new EventHandler(); From 8a64f5b405d9ba99d635795e576d3be0f4e9adea Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Thu, 19 Feb 2026 17:16:09 +0000 Subject: [PATCH 35/38] WIP --- .../accumulo/core/fate/AbstractFateStore.java | 7 +- .../org/apache/accumulo/manager/Manager.java | 4 +- .../accumulo/manager/fate/FateManager.java | 69 ++++++++++++++----- 3 files changed, 59 insertions(+), 21 deletions(-) diff --git a/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java b/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java index 3a7b30c76fb..0c330d1ea40 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java @@ -248,9 +248,10 @@ public ReadOnlyFateTxStore read(FateId fateId) { @Override public Map getActiveReservations(Set partitions) { - return getTransactions(partitions, EnumSet.allOf(TStatus.class)) - .filter(entry -> entry.getFateReservation().isPresent()).collect(Collectors - .toMap(FateIdStatus::getFateId, entry -> entry.getFateReservation().orElseThrow())); + try(var stream = getTransactions(partitions, EnumSet.allOf(TStatus.class))){ + return stream.filter(entry -> entry.getFateReservation().isPresent()).collect(Collectors + .toMap(FateIdStatus::getFateId, entry -> entry.getFateReservation().orElseThrow())); + } } protected boolean isRunnable(TStatus status) { diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java index eec2de01d2e..6ebe0d6298c 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/Manager.java @@ -714,7 +714,7 @@ public void run() { case CLEAN_STOP: switch (getManagerState()) { case NORMAL: - fateManager.stop(); + fateManager.stop(Duration.ofMinutes(1)); setManagerState(ManagerState.SAFE_MODE); break; case SAFE_MODE: { @@ -1247,7 +1247,7 @@ boolean canSuspendTablets() { log.debug("Shutting down fate."); fate(FateInstanceType.META).close(); - fateManager.stop(); + fateManager.stop(Duration.ZERO); assitantManager.stop(); splitter.stop(); diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index 05ffed14ac6..12a412940f6 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -20,11 +20,13 @@ import static org.apache.accumulo.core.lock.ServiceLockPaths.ResourceGroupPredicate.DEFAULT_RG_ONLY; +import java.time.Duration; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -33,11 +35,15 @@ import org.apache.accumulo.core.fate.FateInstanceType; import org.apache.accumulo.core.fate.FatePartition; import org.apache.accumulo.core.fate.thrift.FateWorkerService; +import org.apache.accumulo.core.fate.user.UserFateStore; import org.apache.accumulo.core.lock.ServiceLockPaths.AddressSelector; +import org.apache.accumulo.core.metadata.SystemTables; import org.apache.accumulo.core.rpc.ThriftUtil; import org.apache.accumulo.core.rpc.clients.ThriftClientTypes; import org.apache.accumulo.core.trace.TraceUtil; +import org.apache.accumulo.core.util.CountDownTimer; import org.apache.accumulo.core.util.threads.Threads; +import org.apache.accumulo.manager.tableOps.FateEnv; import org.apache.accumulo.server.ServerContext; import org.apache.thrift.TException; import org.slf4j.Logger; @@ -51,9 +57,11 @@ import com.google.common.net.HostAndPort; /** - * Partitions fate across manager assistant processes. This is done by assigning ranges of the fate + * Partitions {@link FateInstanceType#USER} fate across manager assistant processes. This is done by assigning ranges of the fate * uuid key space to different processes. The partitions are logical and do not correspond to the * physical partitioning of the fate table. + * + *

Does not currently manage {@link FateInstanceType#META}

*/ public class FateManager { @@ -152,35 +160,37 @@ private void managerWorkers() throws TException, InterruptedException { } } - private Thread thread = null; + private Thread assignmentThread = null; private Thread ntfyThread = null; public synchronized void start() { - Preconditions.checkState(thread == null); + Preconditions.checkState(assignmentThread == null); Preconditions.checkState(ntfyThread == null); Preconditions.checkState(!stop.get()); - thread = Threads.createCriticalThread("Fate Manager", () -> { + assignmentThread = Threads.createCriticalThread("Fate Manager", () -> { try { managerWorkers(); } catch (Exception e) { throw new IllegalStateException(e); } }); - thread.start(); + assignmentThread.start(); ntfyThread = Threads.createCriticalThread("Fate Notify", new NotifyTask()); ntfyThread.start(); } - public synchronized void stop() { + public synchronized void stop(Duration timeout) { if (!stop.compareAndSet(false, true)) { return; } + var timer = CountDownTimer.startNew(timeout); + try { - if (thread != null) { - thread.join(); + if (assignmentThread != null) { + assignmentThread.join(); } if (ntfyThread != null) { ntfyThread.join(); @@ -188,7 +198,7 @@ public synchronized void stop() { } catch (InterruptedException e) { throw new IllegalStateException(e); } - // Try to set every assistant manager to nothing. + // Try to set every assistant manager to an empty set of partitions. This will cause them all to stop looking for work. Map currentAssignments = null; try { currentAssignments = getCurrentAssignments(); @@ -208,10 +218,25 @@ public synchronized void stop() { } } - // TODO could wait for each assitant to finish any current operations - stableAssignments.set(TreeRangeMap.create()); + if(!timer.isExpired()) { + var store = new UserFateStore(context, SystemTables.FATE.tableName(), null, null); + + var reserved = store.getActiveReservations(Set.of(FatePartition.all(FateInstanceType.USER))); + while (!reserved.isEmpty() && !timer.isExpired()) { + if (log.isTraceEnabled()) { + reserved.forEach((fateId, reservation) -> { + log.trace("In stop(), waiting on {} {} ", fateId, reservation); + }); + } + try { + Thread.sleep(Math.min(100, timer.timeLeft(TimeUnit.MILLISECONDS))); + } catch (InterruptedException e) { + throw new IllegalStateException(e); + } + } + } } /** @@ -282,8 +307,10 @@ private boolean setWorkerPartitions(HostAndPort address, long updateId, FateWorkerService.Client client = ThriftUtil.getClient(ThriftClientTypes.FATE_WORKER, address, context); try { - return client.setPartitions(TraceUtil.traceInfo(), context.rpcCreds(), updateId, + log.trace("Setting partitions {} {}", address, desired); + var result = client.setPartitions(TraceUtil.traceInfo(), context.rpcCreds(), updateId, desired.stream().map(FatePartition::toThrift).toList()); + return result; } finally { ThriftUtil.returnClient(client, context); } @@ -319,9 +346,12 @@ private Map> computeDesiredAssignments( } }); - desiredAssignments.forEach((hp, parts) -> { - log.trace(" desired {} {} {}", hp, parts.size(), parts); - }); + if(log.isTraceEnabled()) { + log.trace("Logging desired partitions"); + desiredAssignments.forEach((hp, parts) -> { + log.trace(" desired {} {} {}", hp, parts.size(), parts); + }); + } return desiredAssignments; } @@ -375,7 +405,7 @@ private Map getCurrentAssignments() throws TExcep var workers = context.getServerPaths().getManagerWorker(DEFAULT_RG_ONLY, AddressSelector.all(), true); - log.trace("workers : " + workers); + log.trace("getting current assignments from {}", workers); Map currentAssignments = new HashMap<>(); @@ -395,6 +425,13 @@ private Map getCurrentAssignments() throws TExcep } } + if(log.isTraceEnabled()){ + log.trace("Logging current assignments"); + currentAssignments.forEach((hostPort, partitions)->{ + log.trace("current assignment {} {}", hostPort, partitions); + }); + } + return currentAssignments; } } From 812741ba2ccade3d4f39da0437035be76caeb967 Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Thu, 19 Feb 2026 20:08:10 +0000 Subject: [PATCH 36/38] WIP --- .../accumulo/core/fate/AbstractFateStore.java | 4 +- .../accumulo/core/lock/ServiceLockPaths.java | 5 + .../accumulo/manager/fate/FateManager.java | 33 ++- .../accumulo/test/MultipleManagerIT.java | 276 ++++++++++++++++++ .../apache/accumulo/test/fate/FastFate.java | 3 - .../test/fate/FateOpsCommandsITBase.java | 6 +- .../test/fate/FatePoolsWatcherITBase.java | 7 + .../test/fate/MultipleStoresITBase.java | 1 + 8 files changed, 317 insertions(+), 18 deletions(-) create mode 100644 test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java diff --git a/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java b/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java index 0c330d1ea40..62fee8ac78e 100644 --- a/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java +++ b/core/src/main/java/org/apache/accumulo/core/fate/AbstractFateStore.java @@ -248,9 +248,9 @@ public ReadOnlyFateTxStore read(FateId fateId) { @Override public Map getActiveReservations(Set partitions) { - try(var stream = getTransactions(partitions, EnumSet.allOf(TStatus.class))){ + try (var stream = getTransactions(partitions, EnumSet.allOf(TStatus.class))) { return stream.filter(entry -> entry.getFateReservation().isPresent()).collect(Collectors - .toMap(FateIdStatus::getFateId, entry -> entry.getFateReservation().orElseThrow())); + .toMap(FateIdStatus::getFateId, entry -> entry.getFateReservation().orElseThrow())); } } diff --git a/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockPaths.java b/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockPaths.java index d07fe8cb770..4abe201a61f 100644 --- a/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockPaths.java +++ b/core/src/main/java/org/apache/accumulo/core/lock/ServiceLockPaths.java @@ -331,6 +331,11 @@ public ServiceLockPath getManager(boolean withLock) { } } + public Set getAssistantManagers(AddressSelector address, boolean withLock) { + return get(Constants.ZMANAGER_ASSISTANT_LOCK, ResourceGroupPredicate.DEFAULT_RG_ONLY, address, + withLock); + } + /** * Note that the ServiceLockPath object returned by this method does not populate the server * attribute. To get the location of the Monitor you will need to parse the lock data at the diff --git a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java index 12a412940f6..0fd9fb9b64e 100644 --- a/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java +++ b/server/manager/src/main/java/org/apache/accumulo/manager/fate/FateManager.java @@ -57,11 +57,13 @@ import com.google.common.net.HostAndPort; /** - * Partitions {@link FateInstanceType#USER} fate across manager assistant processes. This is done by assigning ranges of the fate - * uuid key space to different processes. The partitions are logical and do not correspond to the - * physical partitioning of the fate table. + * Partitions {@link FateInstanceType#USER} fate across manager assistant processes. This is done by + * assigning ranges of the fate uuid key space to different processes. The partitions are logical + * and do not correspond to the physical partitioning of the fate table. * - *

Does not currently manage {@link FateInstanceType#META}

+ *

+ * Does not currently manage {@link FateInstanceType#META} + *

*/ public class FateManager { @@ -91,8 +93,14 @@ private void managerWorkers() throws TException, InterruptedException { long sleepTime = Math.min(stableCount * 100, 5_000); Thread.sleep(sleepTime); - // This map will contain all current workers even their partitions are empty - Map currentPartitions = getCurrentAssignments(); + // This map will contain all current workers even if their partitions are empty + Map currentPartitions; + try { + currentPartitions = getCurrentAssignments(); + } catch (TException e) { + log.warn("Failed to get current partitions ", e); + continue; + } Map> currentAssignments = new HashMap<>(); currentPartitions.forEach((k, v) -> currentAssignments.put(k, v.partitions())); Set desiredParititions = getDesiredPartitions(currentAssignments.size()); @@ -198,7 +206,8 @@ public synchronized void stop(Duration timeout) { } catch (InterruptedException e) { throw new IllegalStateException(e); } - // Try to set every assistant manager to an empty set of partitions. This will cause them all to stop looking for work. + // Try to set every assistant manager to an empty set of partitions. This will cause them all to + // stop looking for work. Map currentAssignments = null; try { currentAssignments = getCurrentAssignments(); @@ -220,7 +229,7 @@ public synchronized void stop(Duration timeout) { stableAssignments.set(TreeRangeMap.create()); - if(!timer.isExpired()) { + if (!timer.isExpired()) { var store = new UserFateStore(context, SystemTables.FATE.tableName(), null, null); var reserved = store.getActiveReservations(Set.of(FatePartition.all(FateInstanceType.USER))); @@ -308,7 +317,7 @@ private boolean setWorkerPartitions(HostAndPort address, long updateId, ThriftUtil.getClient(ThriftClientTypes.FATE_WORKER, address, context); try { log.trace("Setting partitions {} {}", address, desired); - var result = client.setPartitions(TraceUtil.traceInfo(), context.rpcCreds(), updateId, + var result = client.setPartitions(TraceUtil.traceInfo(), context.rpcCreds(), updateId, desired.stream().map(FatePartition::toThrift).toList()); return result; } finally { @@ -346,7 +355,7 @@ private Map> computeDesiredAssignments( } }); - if(log.isTraceEnabled()) { + if (log.isTraceEnabled()) { log.trace("Logging desired partitions"); desiredAssignments.forEach((hp, parts) -> { log.trace(" desired {} {} {}", hp, parts.size(), parts); @@ -425,9 +434,9 @@ private Map getCurrentAssignments() throws TExcep } } - if(log.isTraceEnabled()){ + if (log.isTraceEnabled()) { log.trace("Logging current assignments"); - currentAssignments.forEach((hostPort, partitions)->{ + currentAssignments.forEach((hostPort, partitions) -> { log.trace("current assignment {} {}", hostPort, partitions); }); } diff --git a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java new file mode 100644 index 00000000000..5ed65068b44 --- /dev/null +++ b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java @@ -0,0 +1,276 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.accumulo.test; + +import static java.util.stream.Collectors.toSet; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import org.apache.accumulo.core.Constants; +import org.apache.accumulo.core.cli.ServerOpts; +import org.apache.accumulo.core.client.Accumulo; +import org.apache.accumulo.core.client.admin.CompactionConfig; +import org.apache.accumulo.core.clientImpl.ClientContext; +import org.apache.accumulo.core.conf.Property; +import org.apache.accumulo.core.data.Mutation; +import org.apache.accumulo.core.fate.Fate; +import org.apache.accumulo.core.fate.FateInstanceType; +import org.apache.accumulo.core.fate.FatePartition; +import org.apache.accumulo.core.fate.FateStore; +import org.apache.accumulo.core.fate.user.UserFateStore; +import org.apache.accumulo.core.lock.ServiceLock; +import org.apache.accumulo.core.lock.ServiceLockPaths; +import org.apache.accumulo.core.lock.ServiceLockPaths.ServiceLockPath; +import org.apache.accumulo.core.metadata.SystemTables; +import org.apache.accumulo.core.util.UtilWaitThread; +import org.apache.accumulo.manager.Manager; +import org.apache.accumulo.manager.tableOps.FateEnv; +import org.apache.accumulo.manager.tableOps.TraceRepo; +import org.apache.accumulo.minicluster.ServerType; +import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl; +import org.apache.accumulo.server.ServerContext; +import org.apache.accumulo.test.fate.FastFate; +import org.apache.accumulo.test.functional.ConfigurableMacBase; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.Text; +import org.junit.jupiter.api.Test; +import org.slf4j.LoggerFactory; + +import com.google.common.net.HostAndPort; + +/** + * {@link ComprehensiveMultiManagerIT} runs multiple managers with lots of Accumulo APIs, however + * that does not actually verify that fate operations actually run on multiple managers. This test + * runs a smaller set of Accumulo API operations and does the following. + * + *
    + *
  • Starts new manager processes and verifies fate operations start running on them
  • + *
  • Kills assistant/non-primary manager processes and verifies the system recovers
  • + *
  • Kills primary manager processes and verifies the system recovers
  • + *
  • Verifies that Accumulo API calls are not impacted by managers starting/stoppping
  • + *
+ * + */ +public class MultipleManagerIT extends ConfigurableMacBase { + + // A manager that will quickly clean up fate reservations held by dead managers + public static class FastFateCleanupManager extends Manager { + protected FastFateCleanupManager(ServerOpts opts, String[] args) throws IOException { + super(opts, ServerContext::new, args); + } + + @Override + protected Fate createFateInstance(FateEnv env, FateStore store, + ServerContext context) { + LoggerFactory.getLogger(FastFateCleanupManager.class) + .info("Creating Fast fate cleanup manager for {}", store.type()); + return new FastFate<>(env, store, true, TraceRepo::toLogString, getConfiguration()); + } + + public static void main(String[] args) throws Exception { + try (FastFateCleanupManager manager = new FastFateCleanupManager(new ServerOpts(), args)) { + manager.runServer(); + } + } + } + + @Override + protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { + // TODO add a way to start multiple managers to mini + cfg.getClusterServerConfiguration().setNumDefaultCompactors(8); + // Set this lower so that locks timeout faster + cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s"); + cfg.setServerClass(ServerType.MANAGER, r -> FastFateCleanupManager.class); + super.configure(cfg, hadoopCoreSite); + } + + @Test + public void testFate() throws Exception { + + List managerWorkers = new ArrayList<>(); + var executor = Executors.newCachedThreadPool(); + + // Start a lot of background threads that should cause fate operations to run. + try (var client = Accumulo.newClient().from(getClientProperties()).build()) { + // Create a table in order to wait for the single manager to become the primary manager + client.tableOperations().create("waitTable"); + + // start more manager processes, should be assigned fate work + managerWorkers.add(exec(FastFateCleanupManager.class)); + managerWorkers.add(exec(FastFateCleanupManager.class)); + + AtomicBoolean stop = new AtomicBoolean(false); + + var splits = IntStream.range(1, 10).mapToObj(i -> String.format("%03d", i)).map(Text::new) + .collect(Collectors.toCollection(TreeSet::new)); + var tableOpFutures = new ArrayList>(); + for (int i = 0; i < 10; i++) { + var table = "t" + i; + + // FOLLOW_ON its hard to find everything related to a table id in the logs across processes, + // especially when the + // table id is like "b". Was trying to follow a single table across multiple manager workers + // processes. + var tableOpsFuture = executor.submit(() -> { + while (!stop.get()) { + client.tableOperations().create(table); + log.info("Created table {}", table); + if (stop.get()) { + break; + } + var expectedRows = new HashSet(); + try (var writer = client.createBatchWriter(table)) { + for (int r = 0; r < 10; r++) { + var row = String.format("%03d", r); + expectedRows.add(row); + Mutation m = new Mutation(row); + m.put("f", "q", "v"); + writer.addMutation(m); + } + } + log.info("Wrote data to table {}", table); + if (stop.get()) { + break; + } + client.tableOperations().addSplits(table, splits); + log.info("Split table {}", table); + if (stop.get()) { + break; + } + client.tableOperations().compact(table, new CompactionConfig().setWait(true)); + log.info("Compacted table {}", table); + if (stop.get()) { + break; + } + client.tableOperations().merge(table, null, null); + log.info("Merged table {}", table); + if (stop.get()) { + break; + } + try (var scanner = client.createScanner(table)) { + var rowsSeen = + scanner.stream().map(e -> e.getKey().getRowData().toString()).collect(toSet()); + assertEquals(expectedRows, rowsSeen); + log.info("verified table {}", table); + } + client.tableOperations().delete(table); + log.info("Deleted table {}", table); + } + return null; + }); + tableOpFutures.add(tableOpsFuture); + } + + var ctx = getServerContext(); + + // FOLLOW_ON it seems any user can scan the fate table that is probably not good. Need to + // restrict read/write access to the system user. + var store = new UserFateStore(ctx, SystemTables.FATE.tableName(), null, null); + + // Wait until three different manager are seen running fate operations. + waitToSeeManagers(ctx, 3, store, false); + + // Start two new manager processes and wait until 5 managers are seen running fate operations + managerWorkers.add(exec(FastFateCleanupManager.class)); + managerWorkers.add(exec(FastFateCleanupManager.class)); + waitToSeeManagers(ctx, 5, store, false); + + // Kill two manager processes. Any fate operations they are running should resume elsewhere. + // Should also see three manager running operations after that. + managerWorkers.get(2).destroy(); + managerWorkers.get(3).destroy(); + log.debug("Killed 2 managers"); + waitToSeeManagers(ctx, 3, store, true); + + // Delete the lock of the primary manager which should cause it to halt. Then wait to see two + // assistant managers. + var primaryManager = ctx.getServerPaths().getManager(true); + log.debug("Delete lock of primary manager"); + ServiceLock.deleteLock(ctx.getZooSession().asReaderWriter(), primaryManager); + waitToSeeManagers(ctx, 2, store, true); + + stop.set(true); + // Wait for the background operations to complete and ensure that none had errors. Managers + // stoppping/starting should not cause any problems for Accumulo API operations. + for (var tof : tableOpFutures) { + tof.get(); + } + } + + executor.shutdown(); + + managerWorkers.forEach(Process::destroy); + } + + private static void waitToSeeManagers(ClientContext context, int expectedManagers, + UserFateStore store, boolean managersKilled) { + + var assistants = + context.getServerPaths().getAssistantManagers(ServiceLockPaths.AddressSelector.all(), true); + while (assistants.size() != expectedManagers) { + UtilWaitThread.sleep(1); + assistants = context.getServerPaths() + .getAssistantManagers(ServiceLockPaths.AddressSelector.all(), true); + } + + var expectedServers = assistants.stream().map(ServiceLockPath::getServer) + .map(HostAndPort::fromString).collect(toSet()); + log.debug("managers seen in zookeeper :{}", expectedServers); + + Set reservationsSeen = new HashSet<>(); + Set extraSeen = new HashSet<>(); + while (reservationsSeen.size() < expectedManagers) { + var reservations = + store.getActiveReservations(Set.of(FatePartition.all(FateInstanceType.USER))); + reservations.values().forEach(reservation -> { + var slp = ServiceLockPaths.parse(Optional.empty(), reservation.getLockID().path); + if (slp.getType().equals(Constants.ZMANAGER_ASSISTANT_LOCK)) { + var hostPort = HostAndPort.fromString(slp.getServer()); + if (expectedServers.contains(hostPort)) { + reservationsSeen.add(hostPort); + } else if (!managersKilled) { + fail("Saw unexpected extra manager " + slp); + } else { + extraSeen.add(hostPort); + } + } + }); + UtilWaitThread.sleep(1); + } + + log.debug("managers seen in fate reservations :{}", reservationsSeen); + if (managersKilled) { + log.debug("extra managers seen in fate reservations : {}", extraSeen); + } + assertEquals(expectedManagers, reservationsSeen.size()); + } +} diff --git a/test/src/main/java/org/apache/accumulo/test/fate/FastFate.java b/test/src/main/java/org/apache/accumulo/test/fate/FastFate.java index c969f17253b..f23596569f3 100644 --- a/test/src/main/java/org/apache/accumulo/test/fate/FastFate.java +++ b/test/src/main/java/org/apache/accumulo/test/fate/FastFate.java @@ -19,13 +19,11 @@ package org.apache.accumulo.test.fate; import java.time.Duration; -import java.util.Set; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.function.Function; import org.apache.accumulo.core.conf.AccumuloConfiguration; import org.apache.accumulo.core.fate.Fate; -import org.apache.accumulo.core.fate.FatePartition; import org.apache.accumulo.core.fate.FateStore; import org.apache.accumulo.core.fate.Repo; @@ -42,7 +40,6 @@ public FastFate(T environment, FateStore store, boolean runDeadResCleaner, Function,String> toLogStrFunc, AccumuloConfiguration conf) { super(environment, store, runDeadResCleaner, toLogStrFunc, conf, new ScheduledThreadPoolExecutor(2)); - setPartitions(Set.of(FatePartition.all(store.type()))); } @Override diff --git a/test/src/main/java/org/apache/accumulo/test/fate/FateOpsCommandsITBase.java b/test/src/main/java/org/apache/accumulo/test/fate/FateOpsCommandsITBase.java index e0ff93e4a4f..280efe6312b 100644 --- a/test/src/main/java/org/apache/accumulo/test/fate/FateOpsCommandsITBase.java +++ b/test/src/main/java/org/apache/accumulo/test/fate/FateOpsCommandsITBase.java @@ -59,6 +59,7 @@ import org.apache.accumulo.core.fate.Fate; import org.apache.accumulo.core.fate.FateId; import org.apache.accumulo.core.fate.FateInstanceType; +import org.apache.accumulo.core.fate.FatePartition; import org.apache.accumulo.core.fate.FateStore; import org.apache.accumulo.core.fate.ReadOnlyFateStore; import org.apache.accumulo.core.fate.user.UserFateStore; @@ -935,7 +936,10 @@ protected FastFate initFateWithDeadResCleaner(FateStore(env, store, true, Object::toString, DefaultConfiguration.getInstance()); + var fate = + new FastFate<>(env, store, true, Object::toString, DefaultConfiguration.getInstance()); + fate.setPartitions(Set.of(FatePartition.all(store.type()))); + return fate; } protected Fate initFateNoDeadResCleaner(FateStore store) { diff --git a/test/src/main/java/org/apache/accumulo/test/fate/FatePoolsWatcherITBase.java b/test/src/main/java/org/apache/accumulo/test/fate/FatePoolsWatcherITBase.java index ae24acbfa02..c09a1ee737f 100644 --- a/test/src/main/java/org/apache/accumulo/test/fate/FatePoolsWatcherITBase.java +++ b/test/src/main/java/org/apache/accumulo/test/fate/FatePoolsWatcherITBase.java @@ -32,6 +32,7 @@ import org.apache.accumulo.core.fate.Fate; import org.apache.accumulo.core.fate.FateId; import org.apache.accumulo.core.fate.FateInstanceType; +import org.apache.accumulo.core.fate.FatePartition; import org.apache.accumulo.core.fate.FateStore; import org.apache.accumulo.core.fate.Repo; import org.apache.accumulo.harness.SharedMiniClusterBase; @@ -92,6 +93,7 @@ protected void testIncrease1(FateStore store, ServerContext s final ConfigurationCopy config = initConfigIncTest1(); final var env = new PoolResizeTestEnv(); final Fate fate = new FastFate<>(env, store, false, r -> r + "", config); + fate.setPartitions(Set.of(FatePartition.all(store.type()))); boolean isUserStore = store.type() == FateInstanceType.USER; final Set set1 = isUserStore ? USER_FATE_OPS_SET1 : META_FATE_OPS_SET1; final Set set2 = isUserStore ? USER_FATE_OPS_SET2 : META_FATE_OPS_SET2; @@ -224,6 +226,7 @@ protected void testIncrease2(FateStore store, ServerContext s FateTestUtil.updateFateConfig(new ConfigurationCopy(), 2, fateExecName); final var env = new PoolResizeTestEnv(); final Fate fate = new FastFate<>(env, store, false, r -> r + "", config); + fate.setPartitions(Set.of(FatePartition.all(store.type()))); final int numWorkers = 2; final int newNumWorkers = 3; final Set allFateOps = @@ -305,6 +308,7 @@ protected void testDecrease(FateStore store, ServerContext sc final ConfigurationCopy config = initConfigDecTest(); final var env = new PoolResizeTestEnv(); final Fate fate = new FastFate<>(env, store, false, r -> r + "", config); + fate.setPartitions(Set.of(FatePartition.all(store.type()))); boolean isUserStore = store.type() == FateInstanceType.USER; final Set set1 = isUserStore ? USER_FATE_OPS_SET1 : META_FATE_OPS_SET1; final Set set2 = isUserStore ? USER_FATE_OPS_SET2 : META_FATE_OPS_SET2; @@ -435,6 +439,7 @@ protected void testIdleCountHistory(FateStore store, ServerCo final var env = new PoolResizeTestEnv(); final Fate fate = new FastFate<>(env, store, false, r -> r + "", config); + fate.setPartitions(Set.of(FatePartition.all(store.type()))); try { // We have two worker threads. Submit 3 transactions that won't complete yet so we can check // for a warning @@ -548,6 +553,7 @@ protected void testFatePoolsPartitioning(FateStore store, Ser final var env = new PoolResizeTestEnv(); final Fate fate = new FastFate<>(env, store, false, r -> r + "", config); + fate.setPartitions(Set.of(FatePartition.all(store.type()))); try { // seeding pool1/FateExecutor1 @@ -650,6 +656,7 @@ protected void testFateExecutorRename(FateStore store, Server final var config = FateTestUtil.updateFateConfig(new ConfigurationCopy(), poolSize, "AllFateOps"); final Fate fate = new FastFate<>(env, store, false, r -> r + "", config); + fate.setPartitions(Set.of(FatePartition.all(store.type()))); try { // start a single transaction diff --git a/test/src/main/java/org/apache/accumulo/test/fate/MultipleStoresITBase.java b/test/src/main/java/org/apache/accumulo/test/fate/MultipleStoresITBase.java index 248ea028032..5a5a7734508 100644 --- a/test/src/main/java/org/apache/accumulo/test/fate/MultipleStoresITBase.java +++ b/test/src/main/java/org/apache/accumulo/test/fate/MultipleStoresITBase.java @@ -323,6 +323,7 @@ private void testDeadReservationsCleanup(TestStoreFactory testStor try { fate1 = new FastFate<>(testEnv1, store1, true, Object::toString, config); + fate1.setPartitions(Set.of(FatePartition.all(store1.type()))); // Ensure nothing is reserved yet assertTrue( store1.getActiveReservations(Set.of(FatePartition.all(store1.type()))).isEmpty()); From 1c82a68e12bf14e16bc4492bc319328e9ec1fe03 Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Thu, 19 Feb 2026 20:31:37 +0000 Subject: [PATCH 37/38] WIP --- .../java/org/apache/accumulo/test/MultipleManagerIT.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java index 5ed65068b44..0e5f716f803 100644 --- a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java +++ b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java @@ -75,8 +75,8 @@ *
    *
  • Starts new manager processes and verifies fate operations start running on them
  • *
  • Kills assistant/non-primary manager processes and verifies the system recovers
  • - *
  • Kills primary manager processes and verifies the system recovers
  • - *
  • Verifies that Accumulo API calls are not impacted by managers starting/stoppping
  • + *
  • Kills primary manager process and verifies the system recovers
  • + *
  • Verifies that Accumulo API calls are not impacted by managers starting/stopping
  • *
* */ @@ -204,8 +204,8 @@ public void testFate() throws Exception { managerWorkers.add(exec(FastFateCleanupManager.class)); waitToSeeManagers(ctx, 5, store, false); - // Kill two manager processes. Any fate operations they are running should resume elsewhere. - // Should also see three manager running operations after that. + // Kill two assistant manager processes. Any fate operations that were running should resume + // elsewhere. Should see three manager running operations after that. managerWorkers.get(2).destroy(); managerWorkers.get(3).destroy(); log.debug("Killed 2 managers"); From 28ca16ad08c0f0047b45e481a6564a2097dcda54 Mon Sep 17 00:00:00 2001 From: Keith Turner Date: Thu, 19 Feb 2026 23:40:41 +0000 Subject: [PATCH 38/38] WIP --- .../accumulo/test/MultipleManagerIT.java | 61 ++++++++++++++----- 1 file changed, 45 insertions(+), 16 deletions(-) diff --git a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java index 0e5f716f803..e4bd311155d 100644 --- a/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java +++ b/test/src/main/java/org/apache/accumulo/test/MultipleManagerIT.java @@ -20,6 +20,7 @@ import static java.util.stream.Collectors.toSet; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import java.io.IOException; @@ -62,6 +63,7 @@ import org.apache.accumulo.test.functional.ConfigurableMacBase; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; +import org.apache.hadoop.util.Sets; import org.junit.jupiter.api.Test; import org.slf4j.LoggerFactory; @@ -132,7 +134,7 @@ public void testFate() throws Exception { var splits = IntStream.range(1, 10).mapToObj(i -> String.format("%03d", i)).map(Text::new) .collect(Collectors.toCollection(TreeSet::new)); - var tableOpFutures = new ArrayList>(); + var tableOpFutures = new ArrayList>(); for (int i = 0; i < 10; i++) { var table = "t" + i; @@ -141,10 +143,11 @@ public void testFate() throws Exception { // table id is like "b". Was trying to follow a single table across multiple manager workers // processes. var tableOpsFuture = executor.submit(() -> { - while (!stop.get()) { + int loops = 0; + while (!stop.get() || loops == 0) { client.tableOperations().create(table); log.info("Created table {}", table); - if (stop.get()) { + if (stop.get() && loops > 0) { break; } var expectedRows = new HashSet(); @@ -158,22 +161,22 @@ public void testFate() throws Exception { } } log.info("Wrote data to table {}", table); - if (stop.get()) { + if (stop.get() && loops > 0) { break; } client.tableOperations().addSplits(table, splits); log.info("Split table {}", table); - if (stop.get()) { + if (stop.get() && loops > 0) { break; } client.tableOperations().compact(table, new CompactionConfig().setWait(true)); log.info("Compacted table {}", table); - if (stop.get()) { + if (stop.get() && loops > 0) { break; } client.tableOperations().merge(table, null, null); log.info("Merged table {}", table); - if (stop.get()) { + if (stop.get() && loops > 0) { break; } try (var scanner = client.createScanner(table)) { @@ -184,16 +187,15 @@ public void testFate() throws Exception { } client.tableOperations().delete(table); log.info("Deleted table {}", table); + loops++; } - return null; + return loops; }); tableOpFutures.add(tableOpsFuture); } var ctx = getServerContext(); - // FOLLOW_ON it seems any user can scan the fate table that is probably not good. Need to - // restrict read/write access to the system user. var store = new UserFateStore(ctx, SystemTables.FATE.tableName(), null, null); // Wait until three different manager are seen running fate operations. @@ -214,15 +216,18 @@ public void testFate() throws Exception { // Delete the lock of the primary manager which should cause it to halt. Then wait to see two // assistant managers. var primaryManager = ctx.getServerPaths().getManager(true); - log.debug("Delete lock of primary manager"); ServiceLock.deleteLock(ctx.getZooSession().asReaderWriter(), primaryManager); + log.debug("Deleted lock of primary manager"); waitToSeeManagers(ctx, 2, store, true); stop.set(true); // Wait for the background operations to complete and ensure that none had errors. Managers // stoppping/starting should not cause any problems for Accumulo API operations. for (var tof : tableOpFutures) { - tof.get(); + int loops = tof.get(); + log.debug("Background thread loops {}", loops); + // Check that each background thread made a least one loop over all its table operations. + assertTrue(loops > 0); } } @@ -234,8 +239,17 @@ public void testFate() throws Exception { private static void waitToSeeManagers(ClientContext context, int expectedManagers, UserFateStore store, boolean managersKilled) { + // Track what reservations exist when entering, want to see new reservations created during this + // function call. + var existingReservationUUIDs = + store.getActiveReservations(Set.of(FatePartition.all(FateInstanceType.USER))).values() + .stream().map(FateStore.FateReservation::getReservationUUID).collect(toSet()); + log.debug("existingReservationUUIDs {}", existingReservationUUIDs); + var assistants = context.getServerPaths().getAssistantManagers(ServiceLockPaths.AddressSelector.all(), true); + // Wait for there to be the expected number of managers in zookeeper. After manager processes + // are kill these entries in zookeeper may persist for a bit. while (assistants.size() != expectedManagers) { UtilWaitThread.sleep(1); assistants = context.getServerPaths() @@ -248,15 +262,30 @@ private static void waitToSeeManagers(ClientContext context, int expectedManager Set reservationsSeen = new HashSet<>(); Set extraSeen = new HashSet<>(); - while (reservationsSeen.size() < expectedManagers) { + Set expectedPrefixes = + Set.of('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'); + // Track fate uuid prefixes seen. This is done because fate is partitioned across managers by + // uuid ranges. If all uuid prefixes are seen then it is an indication that fate ids are being + // processed. After new manager processes are started or stopped the partitions should be + // reassigned. + Set seenPrefixes = new HashSet<>(); + + while (reservationsSeen.size() < expectedManagers || !seenPrefixes.equals(expectedPrefixes)) { var reservations = store.getActiveReservations(Set.of(FatePartition.all(FateInstanceType.USER))); - reservations.values().forEach(reservation -> { + reservations.forEach((fateId, reservation) -> { var slp = ServiceLockPaths.parse(Optional.empty(), reservation.getLockID().path); if (slp.getType().equals(Constants.ZMANAGER_ASSISTANT_LOCK)) { var hostPort = HostAndPort.fromString(slp.getServer()); if (expectedServers.contains(hostPort)) { - reservationsSeen.add(hostPort); + if (!existingReservationUUIDs.contains(reservation.getReservationUUID())) { + reservationsSeen.add(hostPort); + Character prefix = fateId.getTxUUIDStr().charAt(0); + if (seenPrefixes.add(prefix)) { + log.debug("Saw fate uuid prefix {} in id {} still waiting for {}", prefix, fateId, + Sets.difference(expectedPrefixes, seenPrefixes)); + } + } } else if (!managersKilled) { fail("Saw unexpected extra manager " + slp); } else { @@ -269,7 +298,7 @@ private static void waitToSeeManagers(ClientContext context, int expectedManager log.debug("managers seen in fate reservations :{}", reservationsSeen); if (managersKilled) { - log.debug("extra managers seen in fate reservations : {}", extraSeen); + log.debug("killed managers seen in fate reservations : {}", extraSeen); } assertEquals(expectedManagers, reservationsSeen.size()); }