From db778e02caca581b5fe99177b18ca370f7c8e1d0 Mon Sep 17 00:00:00 2001 From: "klaus.freitas.scclouds" Date: Fri, 16 Feb 2024 16:08:35 -0300 Subject: [PATCH 1/6] Refactor cloud-engine-orchestration module logs for Log4j2 --- .../com/cloud/agent/manager/AgentAttache.java | 57 +-- .../cloud/agent/manager/AgentManagerImpl.java | 107 ++---- .../agent/manager/ClusteredAgentAttache.java | 24 +- .../manager/ClusteredAgentManagerImpl.java | 162 +++------ .../agent/manager/DirectAgentAttache.java | 32 +- .../agent/manager/SynchronousListener.java | 9 +- .../cloud/vm/VirtualMachineManagerImpl.java | 328 ++++++------------ .../vm/VirtualMachinePowerStateSyncImpl.java | 51 +-- .../com/cloud/vm/VmWorkJobDispatcher.java | 6 +- .../cloud/entity/api/VMEntityManagerImpl.java | 4 +- .../orchestration/DataMigrationUtility.java | 6 +- .../orchestration/NetworkOrchestrator.java | 114 ++---- .../orchestration/StorageOrchestrator.java | 6 +- .../orchestration/VolumeOrchestrator.java | 150 ++++---- 14 files changed, 328 insertions(+), 728 deletions(-) diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java index 1b7069c6e73a..d9dce6a00c22 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java @@ -197,9 +197,7 @@ protected void cancel(final Request req) { } protected synchronized void cancel(final long seq) { - if (logger.isDebugEnabled()) { - logger.debug(log(seq, "Cancelling.")); - } + logger.debug(log(seq, "Cancelling.")); final Listener listener = _waitForList.remove(seq); if (listener != null) { listener.processDisconnect(_id, Status.Disconnected); @@ -223,9 +221,7 @@ protected String log(final long seq, final String msg) { } protected void registerListener(final long seq, final Listener listener) { - if (logger.isTraceEnabled()) { - logger.trace(log(seq, "Registering listener")); - } + logger.trace(log(seq, "Registering listener")); if (listener.getTimeout() != -1) { s_listenerExecutor.schedule(new Alarm(seq), listener.getTimeout(), TimeUnit.SECONDS); } @@ -233,9 +229,7 @@ protected void registerListener(final long seq, final Listener listener) { } protected Listener unregisterListener(final long sequence) { - if (logger.isTraceEnabled()) { - logger.trace(log(sequence, "Unregistering listener")); - } + logger.trace(log(sequence, "Unregistering listener")); return _waitForList.remove(sequence); } @@ -290,15 +284,10 @@ public boolean processAnswers(final long seq, final Response resp) { if (answers[0] != null && answers[0].getResult()) { processed = true; } - if (logger.isDebugEnabled()) { - logger.debug(log(seq, "Unable to find listener.")); - } + logger.debug(log(seq, "Unable to find listener.")); } else { processed = monitor.processAnswers(_id, seq, answers); - if (logger.isTraceEnabled()) { - logger.trace(log(seq, (processed ? "" : " did not ") + " processed ")); - } - + logger.trace(log(seq, (processed ? "" : " did not ") + " processed ")); if (!monitor.isRecurring()) { unregisterListener(seq); } @@ -324,9 +313,7 @@ protected void cancelAllCommands(final Status state, final boolean cancelActive) final Map.Entry entry = it.next(); it.remove(); final Listener monitor = entry.getValue(); - if (logger.isDebugEnabled()) { - logger.debug(log(entry.getKey(), "Sending disconnect to " + monitor.getClass())); - } + logger.debug(log(entry.getKey(), "Sending disconnect to " + monitor.getClass())); monitor.processDisconnect(_id, state); } } @@ -357,9 +344,8 @@ public void send(final Request req, final Listener listener) throws AgentUnavail long seq = req.getSequence(); if (listener != null) { registerListener(seq, listener); - } else if (logger.isDebugEnabled()) { - logger.debug(log(seq, "Routed from " + req.getManagementServerId())); } + logger.debug(log(seq, "Routed from " + req.getManagementServerId())); synchronized (this) { try { @@ -412,17 +398,13 @@ public Answer[] send(final Request req, final int wait) throws AgentUnavailableE logger.debug(log(seq, "Interrupted")); } if (answers != null) { - if (logger.isDebugEnabled()) { - new Response(req, answers).logD("Received: ", false); - } + new Response(req, answers).logD("Received: ", false); return answers; } answers = sl.getAnswers(); // Try it again. if (answers != null) { - if (logger.isDebugEnabled()) { - new Response(req, answers).logD("Received after timeout: ", true); - } + new Response(req, answers).logD("Received after timeout: ", true); _agentMgr.notifyAnswersToMonitors(_id, seq, answers); return answers; @@ -430,16 +412,11 @@ public Answer[] send(final Request req, final int wait) throws AgentUnavailableE final Long current = _currentSequence; if (current != null && seq != current) { - if (logger.isDebugEnabled()) { - logger.debug(log(seq, "Waited too long.")); - } + logger.debug(log(seq, "Waited too long.")); throw new OperationTimedoutException(req.getCommands(), _id, seq, wait, false); } - - if (logger.isDebugEnabled()) { - logger.debug(log(seq, "Waiting some more time because this is the current command")); - } + logger.debug(log(seq, "Waiting some more time because this is the current command")); } throw new OperationTimedoutException(req.getCommands(), _id, seq, wait * 2, true); @@ -467,22 +444,16 @@ public Answer[] send(final Request req, final int wait) throws AgentUnavailableE protected synchronized void sendNext(final long seq) { _currentSequence = null; if (_requests.isEmpty()) { - if (logger.isDebugEnabled()) { - logger.debug(log(seq, "No more commands found")); - } + logger.debug(log(seq, "No more commands found")); return; } Request req = _requests.pop(); - if (logger.isDebugEnabled()) { - logger.debug(log(req.getSequence(), "Sending now. is current sequence.")); - } + logger.debug(log(req.getSequence(), "Sending now. is current sequence.")); try { send(req); } catch (AgentUnavailableException e) { - if (logger.isDebugEnabled()) { - logger.debug(log(req.getSequence(), "Unable to send the next sequence")); - } + logger.debug(log(req.getSequence(), "Unable to send the next sequence")); cancel(req.getSequence()); } _currentSequence = req.getSequence(); diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index 50c7b151d77e..b276a26dd002 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -266,9 +266,7 @@ public int registerForHostEvents(final Listener listener, final boolean connecti _cmdMonitors.add(new Pair(_monitorId, listener)); } } - if (logger.isDebugEnabled()) { - logger.debug("Registering listener " + listener.getClass().getSimpleName() + " with id " + _monitorId); - } + logger.debug("Registering listener " + listener.getClass().getSimpleName() + " with id " + _monitorId); return _monitorId; } } @@ -310,9 +308,7 @@ private AgentControlAnswer handleControlCommand(final AgentAttache attache, fina public void handleCommands(final AgentAttache attache, final long sequence, final Command[] cmds) { for (final Pair listener : _cmdMonitors) { final boolean processed = listener.second().processCommands(attache.getId(), sequence, cmds); - if (logger.isTraceEnabled()) { - logger.trace("SeqA " + attache.getId() + "-" + sequence + ": " + (processed ? "processed" : "not processed") + " by " + listener.getClass()); - } + logger.trace("SeqA " + attache.getId() + "-" + sequence + ": " + (processed ? "processed" : "not processed") + " by " + listener.getClass()); } } @@ -349,9 +345,7 @@ public Answer sendTo(final Long dcId, final HypervisorType type, final Command c String errorMsg = String.format("Error sending command %s to host %s, due to %s", cmd.getClass().getName(), host.getUuid(), e.getLocalizedMessage()); logger.error(errorMsg); - if (logger.isDebugEnabled()) { - logger.debug(errorMsg, e); - } + logger.debug(errorMsg, e); } if (answer != null) { return answer; @@ -469,15 +463,11 @@ protected Status investigate(final AgentAttache agent) { final Long hostId = agent.getId(); final HostVO host = _hostDao.findById(hostId); if (host != null && host.getType() != null && !host.getType().isVirtual()) { - if (logger.isDebugEnabled()) { - logger.debug("checking if agent (" + hostId + ") is alive"); - } + logger.debug("checking if agent (" + hostId + ") is alive"); final Answer answer = easySend(hostId, new CheckHealthCommand()); if (answer != null && answer.getResult()) { final Status status = Status.Up; - if (logger.isDebugEnabled()) { - logger.debug("agent (" + hostId + ") responded to checkHeathCommand, reporting that agent is " + status); - } + logger.debug("agent (" + hostId + ") responded to checkHeathCommand, reporting that agent is " + status); return status; } return _haMgr.investigate(hostId); @@ -519,9 +509,7 @@ public void removeAgent(final AgentAttache attache, final Status nextState) { return; } final long hostId = attache.getId(); - if (logger.isDebugEnabled()) { - logger.debug("Remove Agent : " + hostId); - } + logger.debug("Remove Agent : " + hostId); AgentAttache removed = null; boolean conflict = false; synchronized (_agents) { @@ -540,9 +528,7 @@ public void removeAgent(final AgentAttache attache, final Status nextState) { } for (final Pair monitor : _hostMonitors) { - if (logger.isDebugEnabled()) { - logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName()); - } + logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName()); monitor.second().processDisconnect(hostId, nextState); } } @@ -550,9 +536,7 @@ public void removeAgent(final AgentAttache attache, final Status nextState) { @Override public void notifyMonitorsOfNewlyAddedHost(long hostId) { for (final Pair monitor : _hostMonitors) { - if (logger.isDebugEnabled()) { - logger.debug("Sending host added to listener: " + monitor.second().getClass().getSimpleName()); - } + logger.debug("Sending host added to listener: " + monitor.second().getClass().getSimpleName()); monitor.second().processHostAdded(hostId); } @@ -562,9 +546,7 @@ protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, fi final long hostId = attache.getId(); final HostVO host = _hostDao.findById(hostId); for (final Pair monitor : _hostMonitors) { - if (logger.isDebugEnabled()) { - logger.debug("Sending Connect to listener: " + monitor.second().getClass().getSimpleName()); - } + logger.debug("Sending Connect to listener: " + monitor.second().getClass().getSimpleName()); for (int i = 0; i < cmd.length; i++) { try { monitor.second().processConnect(host, cmd[i], forRebalance); @@ -607,7 +589,7 @@ protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, fi Map detailsMap = readyAnswer.getDetailsMap(); if (detailsMap != null) { String uefiEnabled = detailsMap.get(Host.HOST_UEFI_ENABLE); - logger.debug(String.format("Got HOST_UEFI_ENABLE [%s] for hostId [%s]:", uefiEnabled, host.getUuid())); + logger.debug("Got HOST_UEFI_ENABLE [{}] for hostId [{}]:", uefiEnabled, host.getUuid()); if (uefiEnabled != null) { _hostDao.loadDetails(host); if (!uefiEnabled.equals(host.getDetails().get(Host.HOST_UEFI_ENABLE))) { @@ -803,9 +785,7 @@ protected Status getNextStatusOnDisconnection(Host host, final Status.Event even final Status currentStatus = host.getStatus(); Status nextStatus; if (currentStatus == Status.Down || currentStatus == Status.Alert || currentStatus == Status.Removed) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("Host %s is already %s", host.getUuid(), currentStatus)); - } + logger.debug("Host {} is already {}", host.getUuid(), currentStatus); nextStatus = currentStatus; } else { try { @@ -815,10 +795,7 @@ protected Status getNextStatusOnDisconnection(Host host, final Status.Event even logger.debug(err); throw new CloudRuntimeException(err); } - - if (logger.isDebugEnabled()) { - logger.debug(String.format("The next status of agent %s is %s, current status is %s", host.getUuid(), nextStatus, currentStatus)); - } + logger.debug("The next status of agent {} is {}, current status is {}", host.getUuid(), nextStatus, currentStatus); } return nextStatus; } @@ -830,20 +807,17 @@ protected boolean handleDisconnectWithoutInvestigation(final AgentAttache attach GlobalLock joinLock = getHostJoinLock(hostId); if (joinLock.lock(60)) { try { - logger.info(String.format("Host %d is disconnecting with event %s", hostId, event)); + logger.info("Host {} is disconnecting with event {}", hostId, event); Status nextStatus = null; final HostVO host = _hostDao.findById(hostId); if (host == null) { - logger.warn(String.format("Can't find host with %d", hostId)); + logger.warn("Can't find host with {}", hostId); nextStatus = Status.Removed; } else { nextStatus = getNextStatusOnDisconnection(host, event); caService.purgeHostCertificate(host); } - - if (logger.isDebugEnabled()) { - logger.debug(String.format("Deregistering link for %d with state %s", hostId, nextStatus)); - } + logger.debug("Deregistering link for {} with state {}", hostId, nextStatus); removeAgent(attache, nextStatus); @@ -1053,9 +1027,7 @@ public void reconnect(final long hostId) throws AgentUnavailableException { @Override public void notifyMonitorsOfHostAboutToBeRemoved(long hostId) { for (final Pair monitor : _hostMonitors) { - if (logger.isDebugEnabled()) { - logger.debug("Sending host about to be removed to listener: " + monitor.second().getClass().getSimpleName()); - } + logger.debug("Sending host about to be removed to listener: " + monitor.second().getClass().getSimpleName()); monitor.second().processHostAboutToBeRemoved(hostId); } @@ -1064,9 +1036,7 @@ public void notifyMonitorsOfHostAboutToBeRemoved(long hostId) { @Override public void notifyMonitorsOfRemovedHost(long hostId, long clusterId) { for (final Pair monitor : _hostMonitors) { - if (logger.isDebugEnabled()) { - logger.debug("Sending host removed to listener: " + monitor.second().getClass().getSimpleName()); - } + logger.debug("Sending host removed to listener: " + monitor.second().getClass().getSimpleName()); monitor.second().processHostRemoved(hostId, clusterId); } @@ -1074,9 +1044,7 @@ public void notifyMonitorsOfRemovedHost(long hostId, long clusterId) { public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException { if (event == Event.AgentDisconnected) { - if (logger.isDebugEnabled()) { - logger.debug("Received agent disconnect event for host " + hostId); - } + logger.debug("Received agent disconnect event for host " + hostId); AgentAttache attache = null; attache = findAttache(hostId); if (attache != null) { @@ -1202,9 +1170,7 @@ public SimulateStartTask(final long id, final ServerResource resource, final Map @Override protected void runInContext() { try { - if (logger.isDebugEnabled()) { - logger.debug("Simulating start for resource " + resource.getName() + " id " + id); - } + logger.debug("Simulating start for resource " + resource.getName() + " id " + id); if (tapLoadingAgents(id, TapAgentsAction.Add)) { try { @@ -1288,25 +1254,25 @@ private void processHostHealthCheckResult(Boolean hostHealthCheckResult, long ho } HostVO host = _hostDao.findById(hostId); if (host == null) { - logger.error(String.format("Unable to find host with ID: %s", hostId)); + logger.error("Unable to find host with ID: {}", hostId); return; } if (!BooleanUtils.toBoolean(EnableKVMAutoEnableDisable.valueIn(host.getClusterId()))) { - logger.debug(String.format("%s is disabled for the cluster %s, cannot process the health check result " + - "received for the host %s", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host.getName())); + logger.debug("{} is disabled for the cluster {}, cannot process the health check result " + + "received for the host {}", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host.getName()); return; } ResourceState.Event resourceEvent = hostHealthCheckResult ? ResourceState.Event.Enable : ResourceState.Event.Disable; try { - logger.info(String.format("Host health check %s, auto %s KVM host: %s", + logger.info("Host health check {}, auto {} KVM host: {}", hostHealthCheckResult ? "succeeds" : "fails", hostHealthCheckResult ? "enabling" : "disabling", - host.getName())); + host.getName()); _resourceMgr.autoUpdateHostAllocationState(hostId, resourceEvent); } catch (NoTransitionException e) { - logger.error(String.format("Cannot Auto %s host: %s", resourceEvent, host.getName()), e); + logger.error("Cannot Auto {} host: {}", resourceEvent, host.getName(), e); } } @@ -1433,9 +1399,7 @@ protected void processRequest(final Link link, final Request request) { } else if (cmd instanceof ReadyAnswer) { final HostVO host = _hostDao.findById(attache.getId()); if (host == null) { - if (logger.isDebugEnabled()) { - logger.debug("Cant not find host " + attache.getId()); - } + logger.debug("Cant not find host " + attache.getId()); } answer = new Answer(cmd); } else { @@ -1450,12 +1414,10 @@ protected void processRequest(final Link link, final Request request) { } final Response response = new Response(request, answers, _nodeId, attache.getId()); - if (logger.isDebugEnabled()) { - if (logD) { - logger.debug("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response); - } else { - logger.trace("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response); - } + if (logD) { + logger.debug("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response); + } else { + logger.trace("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response); } try { link.send(response.toBytes()); @@ -1539,14 +1501,7 @@ public boolean tapLoadingAgents(final Long hostId, final TapAgentsAction action) public boolean agentStatusTransitTo(final HostVO host, final Status.Event e, final long msId) { try { _agentStatusLock.lock(); - if (logger.isDebugEnabled()) { - final ResourceState state = host.getResourceState(); - final StringBuilder msg = new StringBuilder("Transition:"); - msg.append("[Resource state = ").append(state); - msg.append(", Agent event = ").append(e.toString()); - msg.append(", Host id = ").append(host.getId()).append(", name = " + host.getName()).append("]"); - logger.debug(msg); - } + logger.debug("[Resource state = {}, Agent event = , Host id = {}, name = {}]", host.getResourceState(), e.toString(), host.getId(), host.getName()); host.setManagementServerId(msId); try { diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java index beafb4da8eb4..0f4abeb6e7c3 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java @@ -105,29 +105,21 @@ public void cancel(final long seq) { @Override public void routeToAgent(final byte[] data) throws AgentUnavailableException { - if (logger.isDebugEnabled()) { - logger.debug(log(Request.getSequence(data), "Routing from " + Request.getManagementServerId(data))); - } + logger.debug(log(Request.getSequence(data), "Routing from " + Request.getManagementServerId(data))); if (_link == null) { - if (logger.isDebugEnabled()) { - logger.debug(log(Request.getSequence(data), "Link is closed")); - } + logger.debug(log(Request.getSequence(data), "Link is closed")); throw new AgentUnavailableException("Link is closed", _id); } try { _link.send(data); } catch (ClosedChannelException e) { - if (logger.isDebugEnabled()) { - logger.debug(log(Request.getSequence(data), "Channel is closed")); - } + logger.debug(log(Request.getSequence(data), "Channel is closed")); throw new AgentUnavailableException("Channel to agent is closed", _id); } catch (NullPointerException e) { - if (logger.isDebugEnabled()) { - logger.debug(log(Request.getSequence(data), "Link is closed")); - } + logger.debug(log(Request.getSequence(data), "Link is closed")); // Note: since this block is not in synchronized. It is possible for _link to become null. throw new AgentUnavailableException("Channel to agent is null", _id); } @@ -148,9 +140,7 @@ public void send(final Request req, final Listener listener) throws AgentUnavail if (_transferMode) { - if (logger.isDebugEnabled()) { - logger.debug(log(seq, "Holding request as the corresponding agent is in transfer mode: ")); - } + logger.debug(log(seq, "Holding request as the corresponding agent is in transfer mode: ")); synchronized (this) { addRequestToTransfer(req); @@ -174,9 +164,7 @@ public void send(final Request req, final Listener listener) throws AgentUnavail ch = s_clusteredAgentMgr.connectToPeer(peerName, ch); if (ch == null) { - if (logger.isDebugEnabled()) { - logger.debug(log(seq, "Unable to forward " + req.toString())); - } + logger.debug(log(seq, "Unable to forward " + req.toString())); continue; } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index 1fe6b19ab681..05df9c6b15ff 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -170,9 +170,7 @@ public boolean start() { return false; } _timer.schedule(new DirectAgentScanTimerTask(), STARTUP_DELAY, ScanInterval.value()); - if (logger.isDebugEnabled()) { - logger.debug("Scheduled direct agent scan task to run at an interval of " + ScanInterval.value() + " seconds"); - } + logger.debug("Scheduled direct agent scan task to run at an interval of " + ScanInterval.value() + " seconds"); // Schedule tasks for agent rebalancing if (isAgentRebalanceEnabled()) { @@ -186,9 +184,7 @@ public boolean start() { public void scheduleHostScanTask() { _timer.schedule(new DirectAgentScanTimerTask(), 0); - if (logger.isDebugEnabled()) { - logger.debug("Scheduled a direct agent scan task"); - } + logger.debug("Scheduled a direct agent scan task"); } private void runDirectAgentScanTimerTask() { @@ -196,9 +192,7 @@ private void runDirectAgentScanTimerTask() { } private void scanDirectAgentToLoad() { - if (logger.isTraceEnabled()) { - logger.trace("Begin scanning directly connected hosts"); - } + logger.trace("Begin scanning directly connected hosts"); // for agents that are self-managed, threshold to be considered as disconnected after pingtimeout final long cutSeconds = (System.currentTimeMillis() >> 10) - mgmtServiceConf.getTimeout(); @@ -234,9 +228,7 @@ private void scanDirectAgentToLoad() { } } } - if (logger.isTraceEnabled()) { - logger.trace("End scanning directly connected hosts"); - } + logger.trace("End scanning directly connected hosts"); } private class DirectAgentScanTimerTask extends ManagedContextTimerTask { @@ -265,9 +257,7 @@ protected AgentAttache createAttache(final long id) { _agents.put(id, attache); } if (old != null) { - if (logger.isDebugEnabled()) { - logger.debug("Remove stale agent attache from current management server"); - } + logger.debug("Remove stale agent attache from current management server"); removeAgent(old, Status.Removed); } return attache; @@ -291,7 +281,7 @@ protected AgentAttache createAttacheForConnect(final HostVO host, final Link lin @Override protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) { - logger.debug(String.format("Create ClusteredDirectAgentAttache for %s.", host)); + logger.debug("Create ClusteredDirectAgentAttache for {}.", host); final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), host.getName(), _nodeId, resource, host.isInMaintenanceStates()); AgentAttache old = null; synchronized (_agents) { @@ -335,9 +325,7 @@ protected boolean handleDisconnect(final AgentAttache agent, final Status.Event @Override public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException { if (event == Event.AgentDisconnected) { - if (logger.isDebugEnabled()) { - logger.debug("Received agent disconnect event for host " + hostId); - } + logger.debug("Received agent disconnect event for host " + hostId); final AgentAttache attache = findAttache(hostId); if (attache != null) { // don't process disconnect if the host is being rebalanced @@ -387,9 +375,7 @@ public void notifyNodesInCluster(final AgentAttache attache) { // notifies MS peers to schedule a host scan task immediately, triggered during addHost operation public void notifyNodesInClusterToScheduleHostScanTask() { - if (logger.isDebugEnabled()) { - logger.debug("Notifying other MS nodes to run host scan task"); - } + logger.debug("Notifying other MS nodes to run host scan task"); final Command[] cmds = new Command[] {new ScheduleHostScanTaskCommand()}; _clusterMgr.broadcast(0, _gson.toJson(cmds)); } @@ -430,9 +416,7 @@ public boolean routeToPeer(final String peer, final byte[] bytes) { return false; } try { - if (logger.isDebugEnabled()) { - logD(bytes, "Routing to peer"); - } + logD(bytes, "Routing to peer"); Link.write(ch, new ByteBuffer[] {ByteBuffer.wrap(bytes)}, sslEngine); return true; } catch (final IOException e) { @@ -518,14 +502,12 @@ public SocketChannel connectToPeer(final String peerName, final SocketChannel pr ch1.close(); throw new IOException(String.format("SSL: Handshake failed with peer management server '%s' on %s:%d ", peerName, ip, port)); } - logger.info(String.format("SSL: Handshake done with peer management server '%s' on %s:%d ", peerName, ip, port)); + logger.info("SSL: Handshake done with peer management server '{}' on {}:{} ", peerName, ip, port); } catch (final Exception e) { ch1.close(); throw new IOException("SSL: Fail to init SSL! " + e); } - if (logger.isDebugEnabled()) { - logger.debug("Connection to peer opened: " + peerName + ", ip: " + ip); - } + logger.debug("Connection to peer opened: " + peerName + ", ip: " + ip); _peers.put(peerName, ch1); _sslEngines.put(peerName, sslEngine); return ch1; @@ -542,9 +524,7 @@ public SocketChannel connectToPeer(final String peerName, final SocketChannel pr } } - if (logger.isTraceEnabled()) { - logger.trace("Found open channel for peer: " + peerName); - } + logger.trace("Found open channel for peer: " + peerName); return ch; } } @@ -569,9 +549,7 @@ protected AgentAttache getAttache(final Long hostId) throws AgentUnavailableExce AgentAttache agent = findAttache(hostId); if (agent == null || !agent.forForward()) { if (isHostOwnerSwitched(host)) { - if (logger.isDebugEnabled()) { - logger.debug("Host " + hostId + " has switched to another management server, need to update agent map with a forwarding agent attache"); - } + logger.debug("Host " + hostId + " has switched to another management server, need to update agent map with a forwarding agent attache"); agent = createAttache(hostId); } } @@ -649,9 +627,7 @@ protected void doTask(final Task task) throws TaskExecutionException { final Request req = Request.parse(data); final Command[] cmds = req.getCommands(); final CancelCommand cancel = (CancelCommand)cmds[0]; - if (logger.isDebugEnabled()) { - logD(data, "Cancel request received"); - } + logD(data, "Cancel request received"); agent.cancel(cancel.getSequence()); final Long current = agent._currentSequence; // if the request is the current request, always have to trigger sending next request in @@ -697,9 +673,8 @@ protected void doTask(final Task task) throws TaskExecutionException { final AgentAttache attache = (AgentAttache)link.attachment(); if (attache != null) { attache.sendNext(Request.getSequence(data)); - } else if (logger.isDebugEnabled()) { - logD(data, "No attache to process " + Request.parse(data).toString()); } + logD(data, "No attache to process " + Request.parse(data).toString()); } return; } else { @@ -806,9 +781,7 @@ protected synchronized void runInContext() { try { if (!cancelled) { startRebalanceAgents(); - if (logger.isInfoEnabled()) { - logger.info("The agent load balancer task is now being cancelled"); - } + logger.info("The agent load balancer task is now being cancelled"); cancelled = true; } } catch (final Throwable e) { @@ -830,17 +803,13 @@ public void startRebalanceAgents() { if (!allManagedAgents.isEmpty() && !allMS.isEmpty()) { avLoad = allManagedAgents.size() / allMS.size(); } else { - if (logger.isDebugEnabled()) { - logger.debug("There are no hosts to rebalance in the system. Current number of active management server nodes in the system is " + allMS.size() + "; number of managed agents is " + logger.debug("There are no hosts to rebalance in the system. Current number of active management server nodes in the system is " + allMS.size() + "; number of managed agents is " + allManagedAgents.size()); - } return; } if (avLoad == 0L) { - if (logger.isDebugEnabled()) { - logger.debug("As calculated average load is less than 1, rounding it to 1"); - } + logger.debug("As calculated average load is less than 1, rounding it to 1"); avLoad = 1; } @@ -852,9 +821,8 @@ public void startRebalanceAgents() { hostsToRebalance = lbPlanner.getHostsToRebalance(node.getMsid(), avLoad); if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) { break; - } else { - logger.debug("Agent load balancer planner " + lbPlanner.getName() + " found no hosts to be rebalanced from management server " + node.getMsid()); } + logger.debug("Agent load balancer planner " + lbPlanner.getName() + " found no hosts to be rebalanced from management server " + node.getMsid()); } if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) { @@ -884,9 +852,7 @@ public void startRebalanceAgents() { if (transfer != null) { final HostTransferMapVO transferState = _hostTransferDao.findByIdAndFutureOwnerId(transfer.getId(), _nodeId); if (!result && transferState != null && transferState.getState() == HostTransferState.TransferRequested) { - if (logger.isDebugEnabled()) { - logger.debug("Removing mapping from op_host_transfer as it failed to be set to transfer mode"); - } + logger.debug("Removing mapping from op_host_transfer as it failed to be set to transfer mode"); // just remove the mapping (if exists) as nothing was done on the peer management // server yet _hostTransferDao.remove(transfer.getId()); @@ -909,9 +875,7 @@ private Answer[] sendRebalanceCommand(final long peer, final long agentId, final final Command[] cmds = commands.toCommands(); try { - if (logger.isDebugEnabled()) { - logger.debug("Forwarding " + cmds[0].toString() + " to " + peer); - } + logger.debug("Forwarding " + cmds[0].toString() + " to " + peer); final String peerName = Long.toString(peer); final String cmdStr = _gson.toJson(cmds); final String ansStr = _clusterMgr.execute(peerName, agentId, cmdStr, true); @@ -942,9 +906,7 @@ public Boolean propagateAgentEvent(final long agentId, final Event event) throws return null; } - if (logger.isDebugEnabled()) { - logger.debug("Propagating agent change request event:" + event.toString() + " to agent:" + agentId); - } + logger.debug("Propagating agent change request event:" + event.toString() + " to agent:" + agentId); final Command[] cmds = new Command[1]; cmds[0] = new ChangeAgentCommand(agentId, event); @@ -955,9 +917,7 @@ public Boolean propagateAgentEvent(final long agentId, final Event event) throws final Answer[] answers = _gson.fromJson(ansStr, Answer[].class); - if (logger.isDebugEnabled()) { - logger.debug("Result for agent change is " + answers[0].getResult()); - } + logger.debug("Result for agent change is " + answers[0].getResult()); return answers[0].getResult(); } @@ -967,9 +927,7 @@ private Runnable getTransferScanTask() { @Override protected void runInContext() { try { - if (logger.isTraceEnabled()) { - logger.trace("Clustered agent transfer scan check, management server id:" + _nodeId); - } + logger.trace("Clustered agent transfer scan check, management server id:" + _nodeId); synchronized (_agentToTransferIds) { if (_agentToTransferIds.size() > 0) { logger.debug("Found " + _agentToTransferIds.size() + " agents to transfer"); @@ -1024,9 +982,7 @@ protected void runInContext() { } } } else { - if (logger.isTraceEnabled()) { - logger.trace("Found no agents to be transferred by the management server " + _nodeId); - } + logger.trace("Found no agents to be transferred by the management server " + _nodeId); } } @@ -1075,9 +1031,7 @@ protected boolean rebalanceHost(final long hostId, final long currentOwnerId, fi } else if (futureOwnerId == _nodeId) { final HostVO host = _hostDao.findById(hostId); try { - if (logger.isDebugEnabled()) { - logger.debug("Disconnecting host " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification"); - } + logger.debug("Disconnecting host " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification"); final AgentAttache attache = findAttache(hostId); if (attache != null) { @@ -1085,9 +1039,7 @@ protected boolean rebalanceHost(final long hostId, final long currentOwnerId, fi } if (result) { - if (logger.isDebugEnabled()) { - logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process"); - } + logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process"); result = loadDirectlyConnectedHost(host, true); } else { logger.warn("Failed to disconnect " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification"); @@ -1112,9 +1064,7 @@ protected boolean rebalanceHost(final long hostId, final long currentOwnerId, fi protected void finishRebalance(final long hostId, final long futureOwnerId, final Event event) { final boolean success = event == Event.RebalanceCompleted ? true : false; - if (logger.isDebugEnabled()) { - logger.debug("Finishing rebalancing for the agent " + hostId + " with event " + event); - } + logger.debug("Finishing rebalancing for the agent " + hostId + " with event " + event); final AgentAttache attache = findAttache(hostId); if (attache == null || !(attache instanceof ClusteredAgentAttache)) { @@ -1223,9 +1173,7 @@ public RebalanceTask(final long hostId, final long currentOwnerId, final long fu @Override protected void runInContext() { try { - if (logger.isDebugEnabled()) { - logger.debug("Rebalancing host id=" + hostId); - } + logger.debug("Rebalancing host id=" + hostId); rebalanceHost(hostId, currentOwnerId, futureOwnerId); } catch (final Exception e) { logger.warn("Unable to rebalance host id=" + hostId, e); @@ -1234,9 +1182,7 @@ protected void runInContext() { } private String handleScheduleHostScanTaskCommand(final ScheduleHostScanTaskCommand cmd) { - if (logger.isDebugEnabled()) { - logger.debug("Intercepting resource manager command: " + _gson.toJson(cmd)); - } + logger.debug("Intercepting resource manager command: " + _gson.toJson(cmd)); try { scheduleHostScanTask(); @@ -1271,9 +1217,7 @@ public String getName() { @Override public String dispatch(final ClusterServicePdu pdu) { - if (logger.isDebugEnabled()) { - logger.debug("Dispatch ->" + pdu.getAgentId() + ", json: " + pdu.getJsonPackage()); - } + logger.debug("Dispatch ->" + pdu.getAgentId() + ", json: " + pdu.getJsonPackage()); Command[] cmds = null; try { @@ -1286,15 +1230,11 @@ public String dispatch(final ClusterServicePdu pdu) { if (cmds.length == 1 && cmds[0] instanceof ChangeAgentCommand) { // intercepted final ChangeAgentCommand cmd = (ChangeAgentCommand)cmds[0]; - if (logger.isDebugEnabled()) { - logger.debug("Intercepting command for agent change: agent " + cmd.getAgentId() + " event: " + cmd.getEvent()); - } + logger.debug("Intercepting command for agent change: agent " + cmd.getAgentId() + " event: " + cmd.getEvent()); boolean result = false; try { result = executeAgentUserRequest(cmd.getAgentId(), cmd.getEvent()); - if (logger.isDebugEnabled()) { - logger.debug("Result is " + result); - } + logger.debug("Result is " + result); } catch (final AgentUnavailableException e) { logger.warn("Agent is unavailable", e); @@ -1307,15 +1247,11 @@ public String dispatch(final ClusterServicePdu pdu) { } else if (cmds.length == 1 && cmds[0] instanceof TransferAgentCommand) { final TransferAgentCommand cmd = (TransferAgentCommand)cmds[0]; - if (logger.isDebugEnabled()) { - logger.debug("Intercepting command for agent rebalancing: agent " + cmd.getAgentId() + " event: " + cmd.getEvent()); - } + logger.debug("Intercepting command for agent rebalancing: agent " + cmd.getAgentId() + " event: " + cmd.getEvent()); boolean result = false; try { result = rebalanceAgent(cmd.getAgentId(), cmd.getEvent(), cmd.getCurrentOwner(), cmd.getFutureOwner()); - if (logger.isDebugEnabled()) { - logger.debug("Result is " + result); - } + logger.debug("Result is " + result); } catch (final AgentUnavailableException e) { logger.warn("Agent is unavailable", e); @@ -1354,25 +1290,19 @@ public String dispatch(final ClusterServicePdu pdu) { try { final long startTick = System.currentTimeMillis(); - if (logger.isDebugEnabled()) { - logger.debug("Dispatch -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage()); - } + logger.debug("Dispatch -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage()); final Answer[] answers = sendToAgent(pdu.getAgentId(), cmds, pdu.isStopOnError()); if (answers != null) { final String jsonReturn = _gson.toJson(answers); - if (logger.isDebugEnabled()) { - logger.debug("Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " + (System.currentTimeMillis() - startTick) + " ms, return result: " - + jsonReturn); - } + logger.debug("Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " + (System.currentTimeMillis() - startTick) + " ms, return result: " + + jsonReturn); return jsonReturn; } else { - if (logger.isDebugEnabled()) { - logger.debug( - "Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " + (System.currentTimeMillis() - startTick) + " ms, return null result"); - } + logger.debug( + "Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " + (System.currentTimeMillis() - startTick) + " ms, return null result"); } } catch (final AgentUnavailableException e) { logger.warn("Agent is unavailable", e); @@ -1432,9 +1362,7 @@ private Runnable getAgentRebalanceScanTask() { @Override protected void runInContext() { try { - if (logger.isTraceEnabled()) { - logger.trace("Agent rebalance task check, management server id:" + _nodeId); - } + logger.trace("Agent rebalance task check, management server id:" + _nodeId); // initiate agent lb task will be scheduled and executed only once, and only when number of agents // loaded exceeds _connectedAgentsThreshold if (!_agentLbHappened) { @@ -1469,14 +1397,10 @@ protected void runInContext() { @Override public void rescan() { // schedule a scan task immediately - if (logger.isDebugEnabled()) { - logger.debug("Scheduling a host scan task"); - } + logger.debug("Scheduling a host scan task"); // schedule host scan task on current MS scheduleHostScanTask(); - if (logger.isDebugEnabled()) { - logger.debug("Notifying all peer MS to schedule host scan task"); - } + logger.debug("Notifying all peer MS to schedule host scan task"); } @Override diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java index 969af3d1ba61..1e51e9ef55dd 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java @@ -60,9 +60,7 @@ public DirectAgentAttache(AgentManagerImpl agentMgr, long id, String name, Serve @Override public void disconnect(Status state) { - if (logger.isDebugEnabled()) { - logger.debug("Processing disconnect " + _id + "(" + _name + ")"); - } + logger.debug("Processing disconnect " + _id + "(" + _name + ")"); for (ScheduledFuture future : _futures) { future.cancel(false); @@ -142,9 +140,7 @@ private synchronized void queueTask(Task task) { } private synchronized void scheduleFromQueue() { - if (logger.isTraceEnabled()) { - logger.trace("Agent attache=" + _id + ", task queue size=" + tasks.size() + ", outstanding tasks=" + _outstandingTaskCount.get()); - } + logger.trace("Agent attache=" + _id + ", task queue size=" + tasks.size() + ", outstanding tasks=" + _outstandingTaskCount.get()); while (!tasks.isEmpty() && _outstandingTaskCount.get() < _agentMgr.getDirectAgentThreadCap()) { _outstandingTaskCount.incrementAndGet(); _agentMgr.getDirectAgentPool().execute(tasks.remove()); @@ -178,14 +174,10 @@ protected synchronized void runInContext() { if (cmd.getContextParam("logid") != null) { ThreadContext.put("logcontextid", cmd.getContextParam("logid")); } - if (logger.isDebugEnabled()) { - logger.debug("Ping from " + _id + "(" + _name + ")"); - } + logger.debug("Ping from " + _id + "(" + _name + ")"); long seq = _seq++; - if (logger.isTraceEnabled()) { - logger.trace("SeqA " + _id + "-" + seq + ": " + new Request(_id, -1, cmd, false).toString()); - } + logger.trace("SeqA " + _id + "-" + seq + ": " + new Request(_id, -1, cmd, false).toString()); _agentMgr.handleCommands(DirectAgentAttache.this, seq, new Command[] {cmd}); } else { @@ -236,9 +228,7 @@ protected void runInContext() { Command[] cmds = _req.getCommands(); boolean stopOnError = _req.stopOnError(); - if (logger.isDebugEnabled()) { - logger.debug(log(seq, "Executing request")); - } + logger.debug(log(seq, "Executing request")); ArrayList answers = new ArrayList(cmds.length); for (int i = 0; i < cmds.length; i++) { Answer answer = null; @@ -270,9 +260,7 @@ protected void runInContext() { } Response resp = new Response(_req, answers.toArray(new Answer[answers.size()])); - if (logger.isDebugEnabled()) { - logger.debug(log(seq, "Response Received: ")); - } + logger.debug(log(seq, "Response Received: ")); processAnswers(seq, resp); } catch (Exception e) { @@ -298,9 +286,7 @@ protected void runInContext() { Command[] cmds = _req.getCommands(); boolean stopOnError = _req.stopOnError(); - if (logger.isDebugEnabled()) { - logger.debug(log(seq, "Executing request")); - } + logger.debug(log(seq, "Executing request")); ArrayList answers = new ArrayList(cmds.length); for (int i = 0; i < cmds.length; i++) { Answer answer = null; @@ -333,9 +319,7 @@ protected void runInContext() { } Response resp = new Response(_req, answers.toArray(new Answer[answers.size()])); - if (logger.isDebugEnabled()) { - logger.debug(log(seq, "Response Received: ")); - } + logger.debug(log(seq, "Response Received: ")); processAnswers(seq, resp); } catch (Throwable t) { diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/SynchronousListener.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/SynchronousListener.java index b5687e2636a1..d97aed2f3fbe 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/SynchronousListener.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/SynchronousListener.java @@ -71,8 +71,7 @@ public synchronized boolean processAnswers(long agentId, long seq, Answer[] resp @Override public synchronized boolean processDisconnect(long agentId, Status state) { - if (logger.isTraceEnabled()) - logger.trace("Agent disconnected, agent id: " + agentId + ", state: " + state + ". Will notify waiters"); + logger.trace("Agent disconnected, agent id: " + agentId + ", state: " + state + ". Will notify waiters"); _disconnected = true; notifyAll(); @@ -128,10 +127,8 @@ public synchronized Answer[] waitFor(int s) throws InterruptedException { } profiler.stop(); - if (logger.isTraceEnabled()) { - logger.trace("Synchronized command - sending completed, time: " + profiler.getDurationInMillis() + ", answer: " + - (_answers != null ? _answers[0].toString() : "null")); - } + logger.trace("Synchronized command - sending completed, time: " + profiler.getDurationInMillis() + ", answer: " + + (_answers != null ? _answers[0].toString() : "null")); return _answers; } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 5e7be6d448ac..2a0b7e700e48 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -457,15 +457,13 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t final LinkedHashMap> auxiliaryNetworks, final DeploymentPlan plan, final HypervisorType hyperType, final Map> extraDhcpOptions, final Map datadiskTemplateToDiskOfferingMap) throws InsufficientCapacityException { - logger.info(String.format("allocating virtual machine from template:%s with hostname:%s and %d networks", template.getUuid(), vmInstanceName, auxiliaryNetworks.size())); + logger.info("allocating virtual machine from template:{} with hostname:{} and {} networks", template.getUuid(), vmInstanceName, auxiliaryNetworks.size()); VMInstanceVO persistedVm = null; try { final VMInstanceVO vm = _vmDao.findVMByInstanceName(vmInstanceName); final Account owner = _entityMgr.findById(Account.class, vm.getAccountId()); - if (logger.isDebugEnabled()) { - logger.debug("Allocating entries for VM: " + vm); - } + logger.debug("Allocating entries for VM: " + vm); vm.setDataCenterId(plan.getDataCenterId()); if (plan.getPodId() != null) { @@ -482,9 +480,7 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t } final Long rootDiskSizeFinal = rootDiskSize; - if (logger.isDebugEnabled()) { - logger.debug("Allocating nics for " + persistedVm); - } + logger.debug("Allocating nics for " + persistedVm); try { if (!vmProfile.getBootArgs().contains("ExternalLoadBalancerVm")) { @@ -494,9 +490,7 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t throw new CloudRuntimeException("Concurrent operation while trying to allocate resources for the VM", e); } - if (logger.isDebugEnabled()) { - logger.debug("Allocating disks for " + persistedVm); - } + logger.debug("Allocating disks for " + persistedVm); allocateRootVolume(persistedVm, template, rootDiskOfferingInfo, owner, rootDiskSizeFinal); @@ -525,9 +519,7 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t CallContext.unregister(); } - if (logger.isDebugEnabled()) { - logger.debug("Allocation completed for VM: " + persistedVm); - } + logger.debug("Allocation completed for VM: " + persistedVm); } catch (InsufficientCapacityException | CloudRuntimeException e) { // Failed VM will be in Stopped. Transition it to Error, so it can be expunged by ExpungeTask or similar try { @@ -535,7 +527,7 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t stateTransitTo(persistedVm, VirtualMachine.Event.OperationFailedToError, null); } } catch (NoTransitionException nte) { - logger.error(String.format("Failed to transition %s in %s state to Error state", persistedVm, persistedVm.getState().toString())); + logger.error("Failed to transition {} in {} state to Error state", persistedVm, persistedVm.getState().toString()); } throw e; } @@ -550,7 +542,7 @@ private void allocateRootVolume(VMInstanceVO vm, VirtualMachineTemplate template volumeMgr.allocateRawVolume(Type.ROOT, rootVolumeName, rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(), rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vm, template, owner, null); } else if (template.getFormat() == ImageFormat.BAREMETAL) { - logger.debug(String.format("%s has format [%s]. Skipping ROOT volume [%s] allocation.", template.toString(), ImageFormat.BAREMETAL, rootVolumeName)); + logger.debug("%s has format [{}]. Skipping ROOT volume [{}] allocation.", template.toString(), ImageFormat.BAREMETAL, rootVolumeName); } else { volumeMgr.allocateTemplatedVolumes(Type.ROOT, rootVolumeName, rootDiskOfferingInfo.getDiskOffering(), rootDiskSizeFinal, rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), template, vm, owner); @@ -599,9 +591,7 @@ private boolean isValidSystemVMType(VirtualMachine vm) { protected void advanceExpunge(VMInstanceVO vm) throws ResourceUnavailableException, OperationTimedoutException, ConcurrentOperationException { if (vm == null || vm.getRemoved() != null) { - if (logger.isDebugEnabled()) { - logger.debug("Unable to find vm or vm is expunged: " + vm); - } + logger.debug("Unable to find vm or vm is expunged: " + vm); return; } @@ -619,20 +609,18 @@ protected void advanceExpunge(VMInstanceVO vm) throws ResourceUnavailableExcepti throw new CloudRuntimeException("Unable to expunge " + vm, e); } - if (logger.isDebugEnabled()) { - logger.debug("Expunging vm " + vm); - } + logger.debug("Expunging vm " + vm); final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType()); List vmNics = profile.getNics(); - logger.debug(String.format("Cleaning up NICS [%s] of %s.", vmNics.stream().map(nic -> nic.toString()).collect(Collectors.joining(", ")),vm.toString())); + logger.debug("Cleaning up NICS [{}] of {}.", vmNics.stream().map(nic -> nic.toString()).collect(Collectors.joining(", ")),vm.toString()); final List nicExpungeCommands = hvGuru.finalizeExpungeNics(vm, profile.getNics()); _networkMgr.cleanupNics(profile); - logger.debug(String.format("Cleaning up hypervisor data structures (ex. SRs in XenServer) for managed storage. Data from %s.", vm.toString())); + logger.debug("Cleaning up hypervisor data structures (ex. SRs in XenServer) for managed storage. Data from {}.", vm.toString()); final List volumeExpungeCommands = hvGuru.finalizeExpungeVolumes(vm); @@ -699,15 +687,15 @@ protected void handleUnsuccessfulCommands(Commands cmds, VMInstanceVO vm) throws String vmToString = vm.toString(); if (cmds.isSuccessful()) { - logger.debug(String.format("The commands [%s] to %s were successful.", cmdsStr, vmToString)); + logger.debug("The commands [{}] to {} were successful.", cmdsStr, vmToString); return; } - logger.info(String.format("The commands [%s] to %s were unsuccessful. Handling answers.", cmdsStr, vmToString)); + logger.info("The commands [{}] to {} were unsuccessful. Handling answers.", cmdsStr, vmToString); Answer[] answers = cmds.getAnswers(); if (answers == null) { - logger.debug(String.format("There are no answers to commands [%s] to %s.", cmdsStr, vmToString)); + logger.debug("There are no answers to commands [{}] to {}.", cmdsStr, vmToString); return; } @@ -719,7 +707,7 @@ protected void handleUnsuccessfulCommands(Commands cmds, VMInstanceVO vm) throws throw new CloudRuntimeException(message); } - logger.debug(String.format("Commands [%s] to %s got answer [%s].", cmdsStr, vmToString, details)); + logger.debug("Commands [{}] to {} got answer [{}].", cmdsStr, vmToString, details); } } @@ -729,9 +717,7 @@ private void addAllExpungeCommandsFromList(List cmdList, Commands cmds, } for (final Command command : cmdList) { command.setBypassHostMaintenance(isValidSystemVMType(vm)); - if (logger.isTraceEnabled()) { - logger.trace(String.format("Adding expunge command [%s] for VM [%s]", command.toString(), vm.toString())); - } + logger.trace("Adding expunge command [{}] for VM [{}]", command.toString(), vm.toString()); cmds.addCommand(command); } } @@ -783,12 +769,12 @@ private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { Answer answer = _agentMgr.easySend(hostId, cmd); if (answer == null) { - logger.warn(String.format("Unable to get an answer to the modify targets command. Targets [%s].", cmd.getTargets().stream().map(target -> target.toString()).collect(Collectors.joining(", ")))); + logger.warn("Unable to get an answer to the modify targets command. Targets [{}].", () -> cmd.getTargets().stream().map(target -> target.toString()).collect(Collectors.joining(", "))); return; } if (!answer.getResult()) { - logger.warn(String.format("Unable to modify targets [%s] on the host [%s].", cmd.getTargets().stream().map(target -> target.toString()).collect(Collectors.joining(", ")), hostId)); + logger.warn("Unable to modify targets [{}] on the host [{}].", () -> cmd.getTargets().stream().map(target -> target.toString()).collect(Collectors.joining(", ")), () -> hostId); } } @@ -851,24 +837,18 @@ protected boolean checkWorkItems(final VMInstanceVO vm, final State state) throw while (true) { final ItWorkVO vo = _workDao.findByOutstandingWork(vm.getId(), state); if (vo == null) { - if (logger.isDebugEnabled()) { - logger.debug("Unable to find work for VM: " + vm + " and state: " + state); - } + logger.debug("Unable to find work for VM: " + vm + " and state: " + state); return true; } if (vo.getStep() == Step.Done) { - if (logger.isDebugEnabled()) { - logger.debug("Work for " + vm + " is " + vo.getStep()); - } + logger.debug("Work for " + vm + " is " + vo.getStep()); return true; } final VMInstanceVO instance = _vmDao.findById(vm.getId()); if (instance != null && instance.getState() == State.Running) { - if (logger.isDebugEnabled()) { - logger.debug("VM is already started in DB: " + vm); - } + logger.debug("VM is already started in DB: " + vm); return true; } @@ -907,9 +887,7 @@ public Ternary doInTransaction(final final ReservationContextImpl context = new ReservationContextImpl(work.getId(), journal, caller, account); if (stateTransitTo(vm, Event.StartRequested, null, work.getId())) { - if (logger.isDebugEnabled()) { - logger.debug("Successfully transitioned to start state for " + vm + " reservation id = " + work.getId()); - } + logger.debug("Successfully transitioned to start state for " + vm + " reservation id = " + work.getId()); return new Ternary<>(vm, context, work); } @@ -922,9 +900,7 @@ public Ternary doInTransaction(final return result; } } catch (final NoTransitionException e) { - if (logger.isDebugEnabled()) { - logger.debug("Unable to transition into Starting state due to " + e.getMessage()); - } + logger.debug("Unable to transition into Starting state due to " + e.getMessage()); } final VMInstanceVO instance = _vmDao.findById(vmId); @@ -932,15 +908,11 @@ public Ternary doInTransaction(final throw new ConcurrentOperationException("Unable to acquire lock on " + vm); } - if (logger.isDebugEnabled()) { - logger.debug("Determining why we're unable to update the state to Starting for " + instance + ". Retry=" + retry); - } + logger.debug("Determining why we're unable to update the state to Starting for " + instance + ". Retry=" + retry); final State state = instance.getState(); if (state == State.Running) { - if (logger.isDebugEnabled()) { - logger.debug("VM is already started: " + vm); - } + logger.debug("VM is already started: " + vm); return null; } @@ -998,11 +970,9 @@ public void advanceStart(final String vmUuid, final Map":params.get(VirtualMachineProfile.Param.BootIntoSetup)))); - } + logger.debug("start parameter value of {} == {} during dispatching", + VirtualMachineProfile.Param.BootIntoSetup.getName(), + (params == null?"":params.get(VirtualMachineProfile.Param.BootIntoSetup))); final VirtualMachine vm = _vmDao.findByUuid(vmUuid); VmWorkJobVO placeHolder = createPlaceHolderWork(vm.getId()); @@ -1014,11 +984,10 @@ public void advanceStart(final String vmUuid, final Map":params.get(VirtualMachineProfile.Param.BootIntoSetup)))); - } + logger.debug("start parameter value of {} == {} during processing of queued job", + VirtualMachineProfile.Param.BootIntoSetup.getName(), + (params == null?"":params.get(VirtualMachineProfile.Param.BootIntoSetup))); + final Outcome outcome = startVmThroughJobQueue(vmUuid, params, planToDeploy, planner); retrieveVmFromJobOutcome(outcome, vmUuid, "startVm"); @@ -1058,13 +1027,13 @@ protected void checkIfTemplateNeededForCreatingVmVolumes(VMInstanceVO vm) { final VMTemplateVO template = _templateDao.findById(vm.getTemplateId()); if (template == null) { String msg = "Template for the VM instance can not be found, VM instance configuration needs to be updated"; - logger.error(String.format("%s. Template ID: %d seems to be removed", msg, vm.getTemplateId())); + logger.error("{}. Template ID: {} seems to be removed", msg, vm.getTemplateId()); throw new CloudRuntimeException(msg); } final VMTemplateZoneVO templateZoneVO = templateZoneDao.findByZoneTemplate(vm.getDataCenterId(), template.getId()); if (templateZoneVO == null) { String msg = "Template for the VM instance can not be found in the zone ID: %s, VM instance configuration needs to be updated"; - logger.error(String.format("%s. %s", msg, template)); + logger.error("{}. {}", msg, template); throw new CloudRuntimeException(msg); } } @@ -1116,10 +1085,8 @@ public void orchestrateStart(final String vmUuid, final Map vlanToPersistenceMap = getVlanToPersistenceMapForVM(vm.getId()); @@ -1691,9 +1644,7 @@ public boolean unmanage(String vmUuid) { @Override public Boolean doInTransaction(TransactionStatus status) { - if (logger.isDebugEnabled()) { - logger.debug("Unmanaging vm " + vm); - } + logger.debug("Unmanaging vm " + vm); final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); final VirtualMachineGuru guru = getVmGuru(vm); @@ -1745,7 +1696,7 @@ private void unmanageVMVolumes(VMInstanceVO vm) { * - If 'unmanage.vm.preserve.nics' = false: then the NICs are removed while unmanaging */ private void unmanageVMNics(VirtualMachineProfile profile, VMInstanceVO vm) { - logger.debug(String.format("Cleaning up NICs of %s.", vm.toString())); + logger.debug("Cleaning up NICs of {}.", vm.toString()); Boolean preserveNics = UnmanagedVMsManager.UnmanageVMPreserveNic.valueIn(vm.getDataCenterId()); if (BooleanUtils.isTrue(preserveNics)) { logger.debug("Preserve NICs configuration enabled"); @@ -1833,7 +1784,7 @@ protected boolean sendStop(final VirtualMachineGuru guru, final VirtualMachinePr } } catch (final AgentUnavailableException | OperationTimedoutException e) { - logger.warn(String.format("Unable to stop %s due to [%s].", vm.toString(), e.getMessage()), e); + logger.warn("Unable to stop {} due to [{}].", vm.toString(), e.getMessage(), e); if (!force) { return false; } @@ -1915,21 +1866,21 @@ protected void releaseVmResources(final VirtualMachineProfile profile, final boo final State state = vm.getState(); try { _networkMgr.release(profile, forced); - logger.debug(String.format("Successfully released network resources for the VM %s in %s state", vm, state)); + logger.debug("Successfully released network resources for the VM {} in {} state", vm, state); } catch (final Exception e) { - logger.warn(String.format("Unable to release some network resources for the VM %s in %s state", vm, state), e); + logger.warn("Unable to release some network resources for the VM {} in {} state", vm, state, e); } try { if (vm.getHypervisorType() != HypervisorType.BareMetal) { volumeMgr.release(profile); - logger.debug(String.format("Successfully released storage resources for the VM %s in %s state", vm, state)); + logger.debug("Successfully released storage resources for the VM {} in {} state", vm, state); } } catch (final Exception e) { - logger.warn(String.format("Unable to release storage resources for the VM %s in %s state", vm, state), e); + logger.warn("Unable to release storage resources for the VM {} in {} state", vm, state, e); } - logger.debug(String.format("Successfully cleaned up resources for the VM %s in %s state", vm, state)); + logger.debug("Successfully cleaned up resources for the VM {} in {} state", vm, state); } @Override @@ -2031,31 +1982,23 @@ private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnabl ConcurrentOperationException { final State state = vm.getState(); if (state == State.Stopped) { - if (logger.isDebugEnabled()) { - logger.debug("VM is already stopped: " + vm); - } + logger.debug("VM is already stopped: " + vm); return; } if (state == State.Destroyed || state == State.Expunging || state == State.Error) { - if (logger.isDebugEnabled()) { - logger.debug("Stopped called on " + vm + " but the state is " + state); - } + logger.debug("Stopped called on " + vm + " but the state is " + state); return; } final ItWorkVO work = _workDao.findByOutstandingWork(vm.getId(), vm.getState()); if (work != null) { - if (logger.isDebugEnabled()) { - logger.debug("Found an outstanding work item for this vm " + vm + " with state:" + vm.getState() + ", work id:" + work.getId()); - } + logger.debug("Found an outstanding work item for this vm " + vm + " with state:" + vm.getState() + ", work id:" + work.getId()); } final Long hostId = vm.getHostId(); if (hostId == null) { if (!cleanUpEvenIfUnableToStop) { - if (logger.isDebugEnabled()) { - logger.debug("HostId is null but this is not a forced stop, cannot stop vm " + vm + " with state:" + vm.getState()); - } + logger.debug("HostId is null but this is not a forced stop, cannot stop vm " + vm + " with state:" + vm.getState()); throw new CloudRuntimeException("Unable to stop " + vm); } try { @@ -2065,9 +2008,7 @@ private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnabl } if (work != null) { - if (logger.isDebugEnabled()) { - logger.debug("Updating work item to Done, id:" + work.getId()); - } + logger.debug("Updating work item to Done, id:" + work.getId()); work.setStep(Step.Done); _workDao.update(work.getId(), work); } @@ -2092,14 +2033,12 @@ private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnabl throw new CloudRuntimeException("We cannot stop " + vm + " when it is in state " + vm.getState()); } final boolean doCleanup = true; - if (logger.isDebugEnabled()) { - logger.warn("Unable to transition the state but we're moving on because it's forced stop", e1); - } + logger.warn("Unable to transition the state but we're moving on because it's forced stop", e1); if (doCleanup) { if (cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.StopRequested, cleanUpEvenIfUnableToStop)) { try { - if (logger.isDebugEnabled() && work != null) { + if (work != null) { logger.debug("Updating work item to Done, id:" + work.getId()); } if (!changeState(vm, Event.AgentReportStopped, null, work, Step.Done)) { @@ -2111,9 +2050,7 @@ private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnabl throw new CloudRuntimeException("Unable to stop " + vm, e); } } else { - if (logger.isDebugEnabled()) { - logger.debug("Failed to cleanup VM: " + vm); - } + logger.debug("Failed to cleanup VM: " + vm); throw new CloudRuntimeException("Failed to cleanup " + vm + " , current state " + vm.getState()); } } @@ -2163,7 +2100,7 @@ private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnabl } } catch (AgentUnavailableException | OperationTimedoutException e) { - logger.warn(String.format("Unable to stop %s due to [%s].", profile.toString(), e.toString()), e); + logger.warn("Unable to stop {} due to [{}].", profile.toString(), e.toString(), e); } finally { if (!stopped) { if (!cleanUpEvenIfUnableToStop) { @@ -2189,17 +2126,13 @@ private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnabl } } - if (logger.isDebugEnabled()) { - logger.debug(vm + " is stopped on the host. Proceeding to release resource held."); - } + logger.debug(vm + " is stopped on the host. Proceeding to release resource held."); releaseVmResources(profile, cleanUpEvenIfUnableToStop); try { if (work != null) { - if (logger.isDebugEnabled()) { - logger.debug("Updating the outstanding work item to Done, id:" + work.getId()); - } + logger.debug("Updating the outstanding work item to Done, id:" + work.getId()); work.setStep(Step.Done); _workDao.update(work.getId(), work); } @@ -2250,15 +2183,11 @@ public boolean stateTransitTo(final VirtualMachine vm1, final VirtualMachine.Eve public void destroy(final String vmUuid, final boolean expunge) throws AgentUnavailableException, OperationTimedoutException, ConcurrentOperationException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); if (vm == null || vm.getState() == State.Destroyed || vm.getState() == State.Expunging || vm.getRemoved() != null) { - if (logger.isDebugEnabled()) { - logger.debug("Unable to find vm or vm is destroyed: " + vm); - } + logger.debug("Unable to find vm or vm is destroyed: " + vm); return; } - if (logger.isDebugEnabled()) { - logger.debug("Destroying vm " + vm + ", expunge flag " + (expunge ? "on" : "off")); - } + logger.debug("Destroying vm " + vm + ", expunge flag " + (expunge ? "on" : "off")); advanceStop(vmUuid, VmDestroyForcestop.value()); @@ -2370,11 +2299,9 @@ private void orchestrateStorageMigration(final String vmUuid, final Map volumeToPoolMap = prepareVmStorageMigration(vm, volumeToPool); try { - if(logger.isDebugEnabled()) { - logger.debug(String.format("Offline migration of %s vm %s with volumes", - vm.getHypervisorType().toString(), - vm.getInstanceName())); - } + logger.debug(String.format("Offline migration of %s vm %s with volumes", + vm.getHypervisorType().toString(), + vm.getInstanceName())); migrateThroughHypervisorOrStorage(vm, volumeToPoolMap); @@ -2452,9 +2379,7 @@ private void markVolumesInPool(VMInstanceVO vm, Answer[] hypervisorMigrationResu throw new CloudRuntimeException(String.format("VM ID: %s migration failed. %s", vm.getUuid(), hypervisorMigrationResults[0].getDetails())); } for (Answer answer : hypervisorMigrationResults) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("Received an %s: %s", answer.getClass().getSimpleName(), answer)); - } + logger.debug(String.format("Received an %s: %s", answer.getClass().getSimpleName(), answer)); if (answer instanceof MigrateVmToPoolAnswer) { relevantAnswer = (MigrateVmToPoolAnswer) answer; } @@ -2467,14 +2392,9 @@ private void markVolumesInPool(VMInstanceVO vm, Answer[] hypervisorMigrationResu results = new ArrayList<>(); } List volumes = _volsDao.findUsableVolumesForInstance(vm.getId()); - if(logger.isDebugEnabled()) { - String msg = String.format("Found %d volumes for VM %s(uuid:%s, id:%d)", results.size(), vm.getInstanceName(), vm.getUuid(), vm.getId()); - logger.debug(msg); - } + logger.debug("Found {} volumes for VM {}(uuid:{}, id:{})", results.size(), vm.getInstanceName(), vm.getUuid(), vm.getId()); for (VolumeObjectTO result : results ) { - if(logger.isDebugEnabled()) { - logger.debug(String.format("Updating volume (%d) with path '%s' on pool '%s'", result.getId(), result.getPath(), result.getDataStoreUuid())); - } + logger.debug("Updating volume ({}) with path '{}' on pool '{}'", result.getId(), result.getPath(), result.getDataStoreUuid()); VolumeVO volume = _volsDao.findById(result.getId()); StoragePool pool = _storagePoolDao.findPoolByUUID(result.getDataStoreUuid()); if (volume == null || pool == null) { @@ -2605,10 +2525,7 @@ private void postStorageMigrationCleanup(VMInstanceVO vm, Map vlanToPersistenceMap = getVlanToPersistenceMapForVM(vmInstance.getId()); if (MapUtils.isNotEmpty(vlanToPersistenceMap)) { - logger.debug(String.format("Setting VLAN persistence to [%s] as part of migrate command for VM [%s].", new Gson().toJson(vlanToPersistenceMap), virtualMachineTO)); + logger.debug("Setting VLAN persistence to [{}] as part of migrate command for VM [{}].", new Gson().toJson(vlanToPersistenceMap), virtualMachineTO); migrateCommand.setVlanToPersistenceMap(vlanToPersistenceMap); } @@ -2885,15 +2798,15 @@ protected MigrateCommand buildMigrateCommand(VMInstanceVO vmInstance, VirtualMac Map answerDpdkInterfaceMapping = prepareForMigrationAnswer.getDpdkInterfaceMapping(); if (MapUtils.isNotEmpty(answerDpdkInterfaceMapping) && dpdkInterfaceMapping != null) { - logger.debug(String.format("Setting DPDK interface mapping to [%s] as part of migrate command for VM [%s].", new Gson().toJson(vlanToPersistenceMap), - virtualMachineTO)); + logger.debug("Setting DPDK interface mapping to [{}] as part of migrate command for VM [{}].", new Gson().toJson(vlanToPersistenceMap), + virtualMachineTO); dpdkInterfaceMapping.putAll(answerDpdkInterfaceMapping); migrateCommand.setDpdkInterfaceMapping(dpdkInterfaceMapping); } Integer newVmCpuShares = prepareForMigrationAnswer.getNewVmCpuShares(); if (newVmCpuShares != null) { - logger.debug(String.format("Setting CPU shares to [%d] as part of migrate command for VM [%s].", newVmCpuShares, virtualMachineTO)); + logger.debug("Setting CPU shares to [{}] as part of migrate command for VM [{}].", newVmCpuShares, virtualMachineTO); migrateCommand.setNewVmCpuShares(newVmCpuShares); } @@ -2967,7 +2880,7 @@ protected Map buildMapUsingUserInformation(VirtualMachinePr volume.getUuid(), targetPool.getUuid(), profile.getUuid(), targetHost.getUuid())); } if (currentPool.getId() == targetPool.getId()) { - logger.info(String.format("The volume [%s] is already allocated in storage pool [%s].", volume.getUuid(), targetPool.getUuid())); + logger.info("The volume [{}] is already allocated in storage pool [{}].", volume.getUuid(), targetPool.getUuid()); } volumeToPoolObjectMap.put(volume, targetPool); } @@ -3435,23 +3348,21 @@ private void orchestrateMigrateAway(final String vmUuid, final long srcHostId, f logger.warn("Unable to find destination for migrating the vm " + profile); throw new InsufficientServerCapacityException("Unable to find a server to migrate to.", DataCenter.class, host.getDataCenterId()); } - if (logger.isDebugEnabled()) { - logger.debug("Found destination " + dest + " for migrating to."); - } + logger.debug("Found destination " + dest + " for migrating to."); excludes.addHost(dest.getHost().getId()); try { migrate(vm, srcHostId, dest); return; } catch (ResourceUnavailableException | ConcurrentOperationException e) { - logger.warn(String.format("Unable to migrate %s to %s due to [%s]", vm.toString(), dest.getHost().toString(), e.getMessage()), e); + logger.warn("Unable to migrate {} to {} due to [{}]", vm.toString(), dest.getHost().toString(), e.getMessage(), e); } try { advanceStop(vmUuid, true); throw new CloudRuntimeException("Unable to migrate " + vm); } catch (final ResourceUnavailableException | ConcurrentOperationException | OperationTimedoutException e) { - logger.error(String.format("Unable to stop %s due to [%s].", vm.toString(), e.getMessage()), e); + logger.error("Unable to stop {} due to [{}].", vm.toString(), e.getMessage(), e); throw new CloudRuntimeException("Unable to migrate " + vm); } } @@ -3537,10 +3448,8 @@ public void advanceReboot(final String vmUuid, final Map":params.get(VirtualMachineProfile.Param.BootIntoSetup)))); - } + logger.debug("reboot parameter value of {} == {} at orchestration", VirtualMachineProfile.Param.BootIntoSetup.getName(), + (params == null? "":params.get(VirtualMachineProfile.Param.BootIntoSetup))); orchestrateReboot(vmUuid, params); } finally { if (placeHolder != null) { @@ -3548,10 +3457,8 @@ public void advanceReboot(final String vmUuid, final Map":params.get(VirtualMachineProfile.Param.BootIntoSetup)))); - } + logger.debug("reboot parameter value of {} == {} through job-queue", VirtualMachineProfile.Param.BootIntoSetup.getName(), + (params == null? "":params.get(VirtualMachineProfile.Param.BootIntoSetup))); final Outcome outcome = rebootVmThroughJobQueue(vmUuid, params); retrieveVmFromJobOutcome(outcome, vmUuid, "rebootVm"); @@ -3610,9 +3517,7 @@ private void checkAndSetEnterSetupMode(VirtualMachineTO vmTo, Map listStalledVMInTransitionStateOnUpHost(final long hostId, fin l.add(rs.getLong(1)); } } catch (SQLException e) { - logger.error(String.format("Unable to execute SQL [%s] with params {\"h.id\": %s, \"i.power_state_update_time\": \"%s\"} due to [%s].", sql, hostId, cutTimeStr, e.getMessage()), e); + logger.error("Unable to execute SQL [{}] with params {\"h.id\": {}, \"i.power_state_update_time\": \"{}\"} due to [{}].", sql, hostId, cutTimeStr, e.getMessage(), e); } } return l; @@ -5023,7 +4915,7 @@ private List listVMInTransitionStateWithRecentReportOnUpHost(final long ho l.add(rs.getLong(1)); } } catch (final SQLException e) { - logger.error(String.format("Unable to execute SQL [%s] with params {\"h.id\": %s, \"i.power_state_update_time\": \"%s\", \"j.job_status\": %s} due to [%s].", sql, hostId, cutTimeStr, jobStatusInProgress, e.getMessage()), e); + logger.error("Unable to execute SQL [{}] with params {\"h.id\": {}, \"i.power_state_update_time\": \"{}\", \"j.job_status\": {}} due to [{}].", sql, hostId, cutTimeStr, jobStatusInProgress, e.getMessage(), e); } return l; } @@ -5051,7 +4943,7 @@ private List listStalledVMInTransitionStateOnDisconnectedHosts(final Date l.add(rs.getLong(1)); } } catch (final SQLException e) { - logger.error(String.format("Unable to execute SQL [%s] with params {\"i.power_state_update_time\": \"%s\", \"j.job_status\": %s} due to [%s].", sql, cutTimeStr, jobStatusInProgress, e.getMessage()), e); + logger.error("Unable to execute SQL [{}] with params {\"i.power_state_update_time\": \"{}\", \"j.job_status\": {}} due to [{}].", sql, cutTimeStr, jobStatusInProgress, e.getMessage(), e); } return l; } @@ -5329,9 +5221,7 @@ public Outcome addVmToNetworkThroughJobQueue( } workJob = pendingWorkJobs.get(0); } else { - if (logger.isTraceEnabled()) { - logger.trace(String.format("no jobs to add network %s for vm %s yet", network, vm)); - } + logger.trace(String.format("no jobs to add network %s for vm %s yet", network, vm)); workJob = createVmWorkJobToAddNetwork(vm, network, requested, context, user, account); } @@ -5447,9 +5337,7 @@ private Pair orchestrateStart(final VmWorkStart work) th VMInstanceVO vm = findVmById(work.getVmId()); Boolean enterSetup = (Boolean)work.getParams().get(VirtualMachineProfile.Param.BootIntoSetup); - if (logger.isDebugEnabled()) { - logger.debug(String.format("orchestrating VM start for '%s' %s set to %s", vm.getInstanceName(), VirtualMachineProfile.Param.BootIntoSetup, enterSetup)); - } + logger.debug("orchestrating VM start for '{}' {} set to {}", vm.getInstanceName(), VirtualMachineProfile.Param.BootIntoSetup, enterSetup); try { orchestrateStart(vm.getUuid(), work.getParams(), work.getPlan(), _dpMgr.getDeploymentPlannerByName(work.getDeploymentPlanner())); @@ -5800,9 +5688,7 @@ public Pair findClusterAndHostIdForVm(VirtualMachine vm, boolean ski Long clusterId = null; if(hostId == null) { hostId = vm.getLastHostId(); - if (logger.isDebugEnabled()) { - logger.debug(String.format("host id is null, using last host id %d", hostId) ); - } + logger.debug("host id is null, using last host id {}", hostId); } if (hostId == null) { return findClusterAndHostIdForVmFromVolumes(vm.getId()); @@ -5944,7 +5830,7 @@ protected VMInstanceVO findVmById(Long vmId) { VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, vmId); if (vm == null) { - logger.warn(String.format("Could not find VM [%s].", vmId)); + logger.warn("Could not find VM [{}].", vmId); } assert vm != null; diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java index bbd4510f6f59..779d62eed47e 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java @@ -61,8 +61,7 @@ public void processHostVmStateReport(long hostId, Map report, boolean force) { - if (logger.isDebugEnabled()) - logger.debug("Process host VM state report from ping process. host: " + hostId); + logger.debug("Process host VM state report from ping process. host: " + hostId); Map translatedInfo = convertVmStateReport(report); processReport(hostId, translatedInfo, force); @@ -70,25 +69,18 @@ public void processHostVmStatePingReport(long hostId, Map translatedInfo, boolean force) { - if (logger.isDebugEnabled()) { - logger.debug("Process VM state report. host: " + hostId + ", number of records in report: " + translatedInfo.size()); - } + logger.debug("Process VM state report. host: " + hostId + ", number of records in report: " + translatedInfo.size()); for (Map.Entry entry : translatedInfo.entrySet()) { - if (logger.isDebugEnabled()) - logger.debug("VM state report. host: " + hostId + ", vm id: " + entry.getKey() + ", power state: " + entry.getValue()); + logger.debug("VM state report. host: " + hostId + ", vm id: " + entry.getKey() + ", power state: " + entry.getValue()); if (_instanceDao.updatePowerState(entry.getKey(), hostId, entry.getValue(), DateUtil.currentGMTTime())) { - if (logger.isInfoEnabled()) { - logger.debug("VM state report is updated. host: " + hostId + ", vm id: " + entry.getKey() + ", power state: " + entry.getValue()); - } + logger.debug("VM state report is updated. host: " + hostId + ", vm id: " + entry.getKey() + ", power state: " + entry.getValue()); _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, entry.getKey()); } else { - if (logger.isTraceEnabled()) { - logger.trace("VM power state does not change, skip DB writing. vm id: " + entry.getKey()); - } + logger.trace("VM power state does not change, skip DB writing. vm id: " + entry.getKey()); } } @@ -107,9 +99,7 @@ private void processReport(long hostId, Map tra // here we need to be wary of out of band migration as opposed to other, more unexpected state changes if (vmsThatAreMissingReport.size() > 0) { Date currentTime = DateUtil.currentGMTTime(); - if (logger.isDebugEnabled()) { - logger.debug("Run missing VM report. current time: " + currentTime.getTime()); - } + logger.debug("Run missing VM report. current time: " + currentTime.getTime()); // 2 times of sync-update interval for graceful period long milliSecondsGracefullPeriod = mgmtServiceConf.getPingInterval() * 2000L; @@ -138,16 +128,14 @@ private void processReport(long hostId, Map tra } } - if (logger.isInfoEnabled()) { - String lastTime = new SimpleDateFormat("yyyy/MM/dd'T'HH:mm:ss.SSS'Z'").format(vmStateUpdateTime); - logger.debug( - String.format("Detected missing VM. host: %d, vm id: %d(%s), power state: %s, last state update: %s" - , hostId - , instance.getId() - , instance.getUuid() - , VirtualMachine.PowerState.PowerReportMissing - , lastTime)); - } + String lastTime = new SimpleDateFormat("yyyy/MM/dd'T'HH:mm:ss.SSS'Z'").format(vmStateUpdateTime); + logger.debug( + String.format("Detected missing VM. host: %d, vm id: %d(%s), power state: %s, last state update: %s" + , hostId + , instance.getId() + , instance.getUuid() + , VirtualMachine.PowerState.PowerReportMissing + , lastTime)); long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime(); @@ -158,15 +146,11 @@ private void processReport(long hostId, Map tra // between the startime of this job and the currentTime of this missing-branch // an update might have occurred that we should not override in case of out of band migration if (_instanceDao.updatePowerState(instance.getId(), hostId, VirtualMachine.PowerState.PowerReportMissing, startTime)) { - if (logger.isDebugEnabled()) { - logger.debug("VM state report is updated. host: " + hostId + ", vm id: " + instance.getId() + ", power state: PowerReportMissing "); - } + logger.debug("VM state report is updated. host: " + hostId + ", vm id: " + instance.getId() + ", power state: PowerReportMissing "); _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, instance.getId()); } else { - if (logger.isDebugEnabled()) { - logger.debug("VM power state does not change, skip DB writing. vm id: " + instance.getId()); - } + logger.debug("VM power state does not change, skip DB writing. vm id: " + instance.getId()); } } else { logger.debug("vm id: " + instance.getId() + " - time since last state update(" + milliSecondsSinceLastStateUpdate + "ms) has not passed graceful period yet"); @@ -174,8 +158,7 @@ private void processReport(long hostId, Map tra } } - if (logger.isDebugEnabled()) - logger.debug("Done with process of VM state report. host: " + hostId); + logger.debug("Done with process of VM state report. host: " + hostId); } @Override diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobDispatcher.java b/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobDispatcher.java index c7e30e366a94..ea037ac3e761 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobDispatcher.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VmWorkJobDispatcher.java @@ -75,8 +75,7 @@ public void runJob(AsyncJob job) { return; } - if (logger.isDebugEnabled()) - logger.debug("Run VM work job: " + cmd + " for VM " + work.getVmId() + ", job origin: " + job.getRelated()); + logger.debug("Run VM work job: " + cmd + " for VM " + work.getVmId() + ", job origin: " + job.getRelated()); try { if (_handlers == null || _handlers.isEmpty()) { logger.error("Invalid startup configuration, no work job handler is found. cmd: " + job.getCmd() + ", job info: " + job.getCmdInfo() @@ -103,8 +102,7 @@ public void runJob(AsyncJob job) { CallContext.unregister(); } } finally { - if (logger.isDebugEnabled()) - logger.debug("Done with run of VM work job: " + cmd + " for VM " + work.getVmId() + ", job origin: " + job.getRelated()); + logger.debug("Done with run of VM work job: " + cmd + " for VM " + work.getVmId() + ", job origin: " + job.getRelated()); } } catch(InvalidParameterValueException e) { logger.error("Unable to complete " + job + ", job origin:" + job.getRelated()); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java index 8d4fa21754cb..a94cbb2bf181 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java @@ -214,9 +214,7 @@ public String reserveVirtualMachine(VMEntityVO vmEntityVO, DeploymentPlanner pla if (reservationId != null) { return reservationId; } else { - if (logger.isDebugEnabled()) { - logger.debug("Cannot finalize the VM reservation for this destination found, retrying"); - } + logger.debug("Cannot finalize the VM reservation for this destination found, retrying"); exclude.addHost(dest.getHost().getId()); continue; } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java index 31230442f170..c260f48dcf8c 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java @@ -97,15 +97,15 @@ public boolean filesReadyToMigrate(Long srcDataStoreId, List setupNetwork(final Account owner, final NetworkOf .getBroadcastDomainType() == BroadcastDomainType.Vxlan)) { final List configs = _networksDao.listBy(owner.getId(), offering.getId(), plan.getDataCenterId()); if (configs.size() > 0) { - if (logger.isDebugEnabled()) { - logger.debug("Found existing network configuration for offering " + offering + ": " + configs.get(0)); - } + logger.debug("Found existing network configuration for offering " + offering + ": " + configs.get(0)); if (errorIfAlreadySetup) { final InvalidParameterValueException ex = new InvalidParameterValueException( @@ -829,9 +827,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { public void allocate(final VirtualMachineProfile vm, final LinkedHashMap> networks, final Map> extraDhcpOptions) throws InsufficientCapacityException, ConcurrentOperationException { - if (logger.isTraceEnabled()) { - logger.trace(String.format("allocating networks for %s(template %s); %d networks", vm.getInstanceName(), vm.getTemplate().getUuid(), networks.size())); - } + logger.trace("allocating networks for {}(template {}); {} networks", vm.getInstanceName(), vm.getTemplate().getUuid(), networks.size()); int deviceId = 0; int size; size = determineNumberOfNicsRequired(vm, networks); @@ -1419,9 +1415,7 @@ public Pair implementNetwork(final long networkId, final throw ex; } - if (logger.isDebugEnabled()) { - logger.debug("Lock is acquired for network id " + networkId + " as a part of network implement"); - } + logger.debug("Lock is acquired for network id " + networkId + " as a part of network implement"); try { if (isNetworkImplemented(network)) { @@ -1430,9 +1424,7 @@ public Pair implementNetwork(final long networkId, final return implemented; } - if (logger.isDebugEnabled()) { - logger.debug("Asking " + guru.getName() + " to implement " + network); - } + logger.debug("Asking " + guru.getName() + " to implement " + network); final NetworkOfferingVO offering = _networkOfferingDao.findById(network.getNetworkOfferingId()); @@ -1497,9 +1489,7 @@ public Pair implementNetwork(final long networkId, final } _networksDao.releaseFromLockTable(networkId); - if (logger.isDebugEnabled()) { - logger.debug("Lock is released for network id " + networkId + " as a part of network implement"); - } + logger.debug("Lock is released for network id " + networkId + " as a part of network implement"); } } @@ -1598,9 +1588,7 @@ private void implementNetworkElements(final DeployDestination dest, final Reserv + network.getPhysicalNetworkId()); } - if (logger.isDebugEnabled()) { - logger.debug("Asking " + element.getName() + " to implement " + network); - } + logger.debug("Asking " + element.getName() + " to implement " + network); if (!element.implement(network, offering, dest, context)) { CloudRuntimeException ex = new CloudRuntimeException("Failed to implement provider " + element.getProvider().getName() + " for network with specified id"); @@ -1943,7 +1931,7 @@ private void setHypervisorHostnameInNetwork(VirtualMachineProfile vm, DeployDest logger.error(errorMsg); } } catch (ResourceUnavailableException e) { - logger.error(String.format("%s, error states %s", errorMsg, e)); + logger.error("{}, error states {}", errorMsg, e); } } } @@ -2065,9 +2053,7 @@ public NicProfile prepareNic(final VirtualMachineProfile vmProfile, final Deploy throw new CloudRuntimeException("Service provider " + element.getProvider().getName() + " either doesn't exist or is not enabled in physical network id: " + network.getPhysicalNetworkId()); } - if (logger.isDebugEnabled()) { - logger.debug("Asking " + element.getName() + " to prepare for " + nic); - } + logger.debug("Asking " + element.getName() + " to prepare for " + nic); if (!prepareElement(element, network, profile, vmProfile, dest, context)) { throw new InsufficientAddressCapacityException("unable to configure the dhcp service, due to insufficiant address capacity", Network.class, network.getId()); } @@ -2365,9 +2351,7 @@ public Pair doInTransaction(final TransactionStatus status) final List providersToImplement = getNetworkProviders(network.getId()); for (final NetworkElement element : networkElements) { if (providersToImplement.contains(element.getProvider())) { - if (logger.isDebugEnabled()) { - logger.debug("Asking " + element.getName() + " to release " + profile); - } + logger.debug("Asking " + element.getName() + " to release " + profile); //NOTE: Context appear to never be used in release method //implementations. Consider removing it from interface Element element.release(network, profile, vmProfile, null); @@ -2378,9 +2362,7 @@ public Pair doInTransaction(final TransactionStatus status) @Override public void cleanupNics(final VirtualMachineProfile vm) { - if (logger.isDebugEnabled()) { - logger.debug("Cleaning network for vm: " + vm.getId()); - } + logger.debug("Cleaning network for vm: " + vm.getId()); final List nics = _nicDao.listByVmId(vm.getId()); for (final NicVO nic : nics) { @@ -2432,9 +2414,7 @@ protected void removeNic(final VirtualMachineProfile vm, final NicVO nic) { final List providersToImplement = getNetworkProviders(network.getId()); for (final NetworkElement element : networkElements) { if (providersToImplement.contains(element.getProvider())) { - if (logger.isDebugEnabled()) { - logger.debug("Asking " + element.getName() + " to release " + nic); - } + logger.debug("Asking " + element.getName() + " to release " + nic); try { element.release(network, profile, vm, null); } catch (final ConcurrentOperationException ex) { @@ -3068,9 +3048,7 @@ public Boolean doInTransaction(final TransactionStatus status) { boolean result = false; if (success) { - if (logger.isDebugEnabled()) { - logger.debug("Network id=" + networkId + " is shutdown successfully, cleaning up corresponding resources now."); - } + logger.debug("Network id=" + networkId + " is shutdown successfully, cleaning up corresponding resources now."); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, networkFinal.getGuruName()); final NetworkProfile profile = convertNetworkToNetworkProfile(networkFinal.getId()); guru.shutdown(profile, _networkOfferingDao.findById(networkFinal.getNetworkOfferingId())); @@ -3108,9 +3086,7 @@ public Boolean doInTransaction(final TransactionStatus status) { } finally { if (network != null) { _networksDao.releaseFromLockTable(network.getId()); - if (logger.isDebugEnabled()) { - logger.debug("Lock is released for network " + network + " as a part of network shutdown"); - } + logger.debug("Lock is released for network " + network + " as a part of network shutdown"); } } } @@ -3149,9 +3125,7 @@ public boolean shutdownNetworkElementsAndResources(final ReservationContext cont for (final NetworkElement element : networkElements) { if (providersToShutdown.contains(element.getProvider())) { try { - if (logger.isDebugEnabled()) { - logger.debug("Sending network shutdown to " + element.getName()); - } + logger.debug("Sending network shutdown to " + element.getName()); if (!element.shutdown(network, context, cleanupElements)) { logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName()); success = false; @@ -3261,9 +3235,7 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con for (final NetworkElement element : networkElements) { if (providersToDestroy.contains(element.getProvider())) { try { - if (logger.isDebugEnabled()) { - logger.debug("Sending destroy to " + element); - } + logger.debug("Sending destroy to " + element); if (!element.destroy(network, context)) { success = false; @@ -3283,9 +3255,7 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con } if (success) { - if (logger.isDebugEnabled()) { - logger.debug("Network id=" + networkId + " is destroyed successfully, cleaning up corresponding resources now."); - } + logger.debug("Network id=" + networkId + " is destroyed successfully, cleaning up corresponding resources now."); final NetworkVO networkFinal = network; try { @@ -3420,20 +3390,16 @@ public void reallyRun() { } if (!networkDetailsDao.findDetails(Network.AssociatedNetworkId, String.valueOf(networkId), null).isEmpty()) { - logger.debug(String.format("Network %s is associated to a shared network, skipping", networkId)); + logger.debug("Network {} is associated to a shared network, skipping", networkId); continue; } final Long time = _lastNetworkIdsToFree.remove(networkId); if (time == null) { - if (logger.isDebugEnabled()) { - logger.debug("We found network " + networkId + " to be free for the first time. Adding it to the list: " + currentTime); - } + logger.debug("We found network " + networkId + " to be free for the first time. Adding it to the list: " + currentTime); stillFree.put(networkId, currentTime); } else if (time > currentTime - netGcWait) { - if (logger.isDebugEnabled()) { - logger.debug("Network " + networkId + " is still free but it's not time to shutdown yet: " + time); - } + logger.debug("Network " + networkId + " is still free but it's not time to shutdown yet: " + time); stillFree.put(networkId, time); } else { shutdownList.add(networkId); @@ -3933,9 +3899,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal // Mark all PF rules as revoked and apply them on the backend (not in the DB) final List pfRules = _portForwardingRulesDao.listByNetwork(networkId); - if (logger.isDebugEnabled()) { - logger.debug("Releasing " + pfRules.size() + " port forwarding rules for network id=" + networkId + " as a part of shutdownNetworkRules"); - } + logger.debug("Releasing " + pfRules.size() + " port forwarding rules for network id=" + networkId + " as a part of shutdownNetworkRules"); for (final PortForwardingRuleVO pfRule : pfRules) { logger.trace("Marking pf rule " + pfRule + " with Revoke state"); @@ -3955,9 +3919,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal // Mark all static rules as revoked and apply them on the backend (not in the DB) final List firewallStaticNatRules = _firewallDao.listByNetworkAndPurpose(networkId, Purpose.StaticNat); final List staticNatRules = new ArrayList(); - if (logger.isDebugEnabled()) { - logger.debug("Releasing " + firewallStaticNatRules.size() + " static nat rules for network id=" + networkId + " as a part of shutdownNetworkRules"); - } + logger.debug("Releasing " + firewallStaticNatRules.size() + " static nat rules for network id=" + networkId + " as a part of shutdownNetworkRules"); for (final FirewallRuleVO firewallStaticNatRule : firewallStaticNatRules) { logger.trace("Marking static nat rule " + firewallStaticNatRule + " with Revoke state"); @@ -4005,9 +3967,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal // revoke all firewall rules for the network w/o applying them on the DB final List firewallRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress); - if (logger.isDebugEnabled()) { - logger.debug("Releasing " + firewallRules.size() + " firewall ingress rules for network id=" + networkId + " as a part of shutdownNetworkRules"); - } + logger.debug("Releasing " + firewallRules.size() + " firewall ingress rules for network id=" + networkId + " as a part of shutdownNetworkRules"); for (final FirewallRuleVO firewallRule : firewallRules) { logger.trace("Marking firewall ingress rule " + firewallRule + " with Revoke state"); @@ -4025,9 +3985,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal } final List firewallEgressRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Egress); - if (logger.isDebugEnabled()) { - logger.debug("Releasing " + firewallEgressRules.size() + " firewall egress rules for network id=" + networkId + " as a part of shutdownNetworkRules"); - } + logger.debug("Releasing " + firewallEgressRules.size() + " firewall egress rules for network id=" + networkId + " as a part of shutdownNetworkRules"); try { // delete default egress rule @@ -4059,9 +4017,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal } if (network.getVpcId() != null) { - if (logger.isDebugEnabled()) { - logger.debug("Releasing Network ACL Items for network id=" + networkId + " as a part of shutdownNetworkRules"); - } + logger.debug("Releasing Network ACL Items for network id=" + networkId + " as a part of shutdownNetworkRules"); try { //revoke all Network ACLs for the network w/o applying them in the DB @@ -4148,9 +4104,7 @@ public void processConnect(final Host host, final StartupCommand cmd, final bool dcId = dc.getId(); final HypervisorType hypervisorType = startup.getHypervisorType(); - if (logger.isDebugEnabled()) { - logger.debug("Host's hypervisorType is: " + hypervisorType); - } + logger.debug("Host's hypervisorType is: " + hypervisorType); final List networkInfoList = new ArrayList(); @@ -4177,9 +4131,7 @@ public void processConnect(final Host host, final StartupCommand cmd, final bool } // send the names to the agent - if (logger.isDebugEnabled()) { - logger.debug("Sending CheckNetworkCommand to check the Network is setup correctly on Agent"); - } + logger.debug("Sending CheckNetworkCommand to check the Network is setup correctly on Agent"); final CheckNetworkCommand nwCmd = new CheckNetworkCommand(networkInfoList); final CheckNetworkAnswer answer = (CheckNetworkAnswer) _agentMgr.easySend(hostId, nwCmd); @@ -4198,9 +4150,7 @@ public void processConnect(final Host host, final StartupCommand cmd, final bool if (answer.needReconnect()) { throw new ConnectionException(false, "Reinitialize agent after network setup."); } - if (logger.isDebugEnabled()) { - logger.debug("Network setup is correct on Agent"); - } + logger.debug("Network setup is correct on Agent"); return; } } @@ -4360,7 +4310,7 @@ public NicProfile createNicForVm(final Network network, final NicProfile request private boolean getNicProfileDefaultNic(NicProfile nicProfile) { if (nicProfile != null) { - logger.debug(String.format("Using requested nic profile isDefaultNic value [%s].", nicProfile.isDefaultNic())); + logger.debug("Using requested nic profile isDefaultNic value [{}].", nicProfile.isDefaultNic()); return nicProfile.isDefaultNic(); } @@ -4677,9 +4627,9 @@ private String generateNewMacAddressIfForced(Network network, String macAddress, " and forced flag is disabled"); } try { - logger.debug(String.format("Generating a new mac address on network %s as the mac address %s already exists", network.getName(), macAddress)); + logger.debug("Generating a new mac address on network {} as the mac address {} already exists", network.getName(), macAddress); String newMacAddress = _networkModel.getNextAvailableMacAddressInNetwork(network.getId()); - logger.debug(String.format("Successfully generated the mac address %s, using it instead of the conflicting address %s", newMacAddress, macAddress)); + logger.debug("Successfully generated the mac address {}, using it instead of the conflicting address {}", newMacAddress, macAddress); return newMacAddress; } catch (InsufficientAddressCapacityException e) { String msg = String.format("Could not generate a new mac address on network %s", network.getName()); @@ -4690,9 +4640,7 @@ private String generateNewMacAddressIfForced(Network network, String macAddress, @Override public void unmanageNics(VirtualMachineProfile vm) { - if (logger.isDebugEnabled()) { - logger.debug("Unmanaging NICs for VM: " + vm.getId()); - } + logger.debug("Unmanaging NICs for VM: " + vm.getId()); VirtualMachine virtualMachine = vm.getVirtualMachine(); final List nics = _nicDao.listByVmId(vm.getId()); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index 1faf46361c64..db2cdedb466a 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -159,7 +159,7 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto } storageCapacities.put(srcDataStoreId, new Pair<>(null, null)); if (migrationPolicy == MigrationPolicy.COMPLETE) { - logger.debug(String.format("Setting source image store: %s to read-only", srcDatastore.getId())); + logger.debug("Setting source image store: {} to read-only", srcDatastore.getId()); storageService.updateImageStoreStatus(srcDataStoreId, true); } @@ -200,7 +200,7 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto } if (chosenFileForMigration.getPhysicalSize() > storageCapacities.get(destDatastoreId).first()) { - logger.debug(String.format("%s: %s too large to be migrated to %s", chosenFileForMigration.getType().name() , chosenFileForMigration.getUuid(), destDatastoreId)); + logger.debug("{}: {} too large to be migrated to {}", chosenFileForMigration.getType().name() , chosenFileForMigration.getUuid(), destDatastoreId); skipped += 1; continue; } @@ -267,7 +267,7 @@ public MigrationResponse migrateResources(Long srcImgStoreId, Long destImgStoreI } if (chosenFileForMigration.getPhysicalSize() > storageCapacities.get(destImgStoreId).first()) { - logger.debug(String.format("%s: %s too large to be migrated to %s", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid(), destImgStoreId)); + logger.debug("{}: %s too large to be migrated to {}", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid(), destImgStoreId); continue; } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 52fd962ba0a9..32f53c6f7bb1 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -346,13 +346,13 @@ private Optional getPreferredStoragePool(List poolList if (storagePool.isPresent()) { String storagePoolToString = getReflectOnlySelectedFields(storagePool.get()); - logger.debug(String.format("The storage pool [%s] was specified for this account [%s] and will be used for allocation.", storagePoolToString, vm.getAccountId())); + logger.debug("The storage pool [{}] was specified for this account [{}] and will be used for allocation.", storagePoolToString, vm.getAccountId()); } else { String globalStoragePoolUuid = StorageManager.PreferredStoragePool.value(); storagePool = getMatchingStoragePool(globalStoragePoolUuid, poolList); - storagePool.ifPresent(pool -> logger.debug(String.format("The storage pool [%s] was specified in the Global Settings and will be used for allocation.", - getReflectOnlySelectedFields(pool)))); + storagePool.ifPresent(pool -> logger.debug("The storage pool [{}] was specified in the Global Settings and will be used for allocation.", + getReflectOnlySelectedFields(pool))); } return storagePool; } @@ -375,23 +375,21 @@ public StoragePool findStoragePool(DiskProfile dskCh, DataCenter dc, Pod pod, Lo StorageUtil.traceLogStoragePools(poolList, logger, "pools to choose from: "); // Check if the preferred storage pool can be used. If yes, use it. Optional storagePool = getPreferredStoragePool(poolList, vm); - if (logger.isTraceEnabled()) { - logger.trace(String.format("we have a preferred pool: %b", storagePool.isPresent())); - } + logger.trace("we have a preferred pool: {}", storagePool.isPresent()); StoragePool storage; if (storagePool.isPresent()) { storage = (StoragePool)this.dataStoreMgr.getDataStore(storagePool.get().getId(), DataStoreRole.Primary); - logger.debug(String.format("VM [%s] has a preferred storage pool [%s]. Volume Orchestrator found this storage using Storage Pool Allocator [%s] and will" - + " use it.", vm, storage, allocator.getClass().getSimpleName())); + logger.debug("VM [{}] has a preferred storage pool [{}]. Volume Orchestrator found this storage using Storage Pool Allocator [{}] and will" + + " use it.", vm, storage, allocator.getClass().getSimpleName()); } else { storage = (StoragePool)dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary); - logger.debug(String.format("VM [%s] does not have a preferred storage pool or it cannot be used. Volume Orchestrator will use the available Storage Pool" - + " [%s], which was discovered using Storage Pool Allocator [%s].", vm, storage, allocator.getClass().getSimpleName())); + logger.debug("VM [{}] does not have a preferred storage pool or it cannot be used. Volume Orchestrator will use the available Storage Pool" + + " [{}], which was discovered using Storage Pool Allocator [{}].", vm, storage, allocator.getClass().getSimpleName()); } return storage; } - logger.debug(String.format("Could not find any available Storage Pool using Storage Pool Allocator [%s].", allocator.getClass().getSimpleName())); + logger.debug("Could not find any available Storage Pool using Storage Pool Allocator [{}].", allocator.getClass().getSimpleName()); } logger.info("Volume Orchestrator could not find any available Storage Pool."); return null; @@ -540,9 +538,7 @@ public VolumeInfo createVolumeFromSnapshot(Volume volume, Snapshot snapshot, Use if (pool != null) { String poolToString = getReflectOnlySelectedFields(pool); - if (logger.isDebugEnabled()) { - logger.debug(String.format("Found a suitable pool [%s] to create the volume [%s] in.", poolToString, volumeToString)); - } + logger.debug("Found a suitable pool [{}] to create the volume [{}] in.", poolToString, volumeToString); break; } } @@ -703,10 +699,8 @@ public VolumeInfo createVolume(VolumeInfo volumeInfo, VirtualMachine vm, Virtual String poolToString = getReflectOnlySelectedFields(pool); - if (logger.isDebugEnabled()) { - logger.debug(String.format("Trying to create volume [%s] on storage pool [%s].", - volumeToString, poolToString)); - } + logger.debug("Trying to create volume [{}] on storage pool [{}].", + volumeToString, poolToString); DataStore store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); for (int i = 0; i < 2; i++) { // retry one more time in case of template reload is required for Vmware case @@ -722,7 +716,7 @@ public VolumeInfo createVolume(VolumeInfo volumeInfo, VirtualMachine vm, Virtual VolumeApiResult result = future.get(); if (result.isFailed()) { if (result.getResult().contains(REQUEST_TEMPLATE_RELOAD) && (i == 0)) { - logger.debug(String.format("Retrying to deploy template [%s] for VMware, attempt 2/2. ", templateToString)); + logger.debug("Retrying to deploy template [{}] for VMware, attempt 2/2. ", templateToString); continue; } else { String msg = String.format("Failed to create volume [%s] due to [%s].", volumeToString, result.getResult()); @@ -901,11 +895,11 @@ private DiskProfile allocateTemplatedVolume(Type type, String name, DiskOffering } else { rootDisksize = rootDisksize * 1024 * 1024 * 1024; if (rootDisksize > size) { - logger.debug(String.format("Using root disk size of [%s] bytes for the volume [%s].", toHumanReadableSize(rootDisksize), name)); + logger.debug("Using root disk size of [{}] bytes for the volume [{}].", toHumanReadableSize(rootDisksize), name); size = rootDisksize; } else { - logger.debug(String.format("The specified root disk size of [%s] bytes is smaller than the template. Using root disk size of [%s] bytes for the volume [%s].", - toHumanReadableSize(rootDisksize), size, name)); + logger.debug("The specified root disk size of [{}] bytes is smaller than the template. Using root disk size of [{}] bytes for the volume [{}].", + toHumanReadableSize(rootDisksize), size, name); } } } @@ -981,8 +975,8 @@ public List allocateTemplatedVolumes(Type type, String name, DiskOf if (template.isDeployAsIs() && vm.getType() != VirtualMachine.Type.SecondaryStorageVm) { List runningSSVMs = secondaryStorageVmDao.getSecStorageVmListInStates(null, vm.getDataCenterId(), State.Running); if (CollectionUtils.isEmpty(runningSSVMs)) { - logger.info(String.format("Could not find a running SSVM in datacenter [%s] for deploying VM as is. Not deploying VM [%s] as is.", - vm.getDataCenterId(), vm)); + logger.info("Could not find a running SSVM in datacenter [{}] for deploying VM as is. Not deploying VM [{}] as is.", + vm.getDataCenterId(), vm); } else { UserVmDetailVO configurationDetail = userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.DEPLOY_AS_IS_CONFIGURATION); if (configurationDetail != null) { @@ -1016,7 +1010,7 @@ public List allocateTemplatedVolumes(Type type, String name, DiskOf volumeSize = templateAsIsDisks.get(number).getVirtualSize(); deviceId = templateAsIsDisks.get(number).getDiskNumber(); } - logger.info(String.format("Adding disk object [%s] to VM [%s]", volumeName, vm)); + logger.info("Adding disk object [{}] to VM [{}]", volumeName, vm); DiskProfile diskProfile = allocateTemplatedVolume(type, volumeName, offering, volumeSize, minIops, maxIops, template, vm, owner, deviceId, configurationId); profiles.add(diskProfile); @@ -1113,18 +1107,14 @@ public VolumeInfo createVolumeOnPrimaryStorage(VirtualMachine vm, VolumeInfo vol VirtualMachineTemplate rootDiskTmplt = _entityMgr.findById(VirtualMachineTemplate.class, vm.getTemplateId()); DataCenter dcVO = _entityMgr.findById(DataCenter.class, vm.getDataCenterId()); - if (logger.isTraceEnabled()) { - logger.trace(String.format("storage-pool %s/%s is associated with pod %d",storagePool.getName(), storagePool.getUuid(), storagePool.getPodId())); - } + logger.trace("storage-pool {}/{} is associated with pod {}",storagePool.getName(), storagePool.getUuid(), storagePool.getPodId()); Long podId = storagePool.getPodId() != null ? storagePool.getPodId() : vm.getPodIdToDeployIn(); Pod pod = _entityMgr.findById(Pod.class, podId); ServiceOffering svo = _entityMgr.findById(ServiceOffering.class, vm.getServiceOfferingId()); DiskOffering diskVO = _entityMgr.findById(DiskOffering.class, volumeInfo.getDiskOfferingId()); Long clusterId = storagePool.getClusterId(); - if (logger.isTraceEnabled()) { - logger.trace(String.format("storage-pool %s/%s is associated with cluster %d",storagePool.getName(), storagePool.getUuid(), clusterId)); - } + logger.trace("storage-pool {}/{} is associated with cluster {}",storagePool.getName(), storagePool.getUuid(), clusterId); Long hostId = vm.getHostId(); if (hostId == null && storagePool.isLocal()) { List poolHosts = storagePoolHostDao.listByPoolId(storagePool.getId()); @@ -1164,9 +1154,7 @@ protected VolumeVO switchVolume(final VolumeVO existingVolume, final VirtualMach Long volTemplateId = existingVolume.getTemplateId(); long vmTemplateId = vm.getTemplateId(); if (volTemplateId != null && volTemplateId.longValue() != vmTemplateId) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("switchVolume: Old volume's templateId [%s] does not match the VM's templateId [%s]. Updating templateId in the new volume.", volTemplateId, vmTemplateId)); - } + logger.debug("switchVolume: Old volume's templateId [{}] does not match the VM's templateId [{}]. Updating templateId in the new volume.", volTemplateId, vmTemplateId); templateIdToUse = vmTemplateId; } @@ -1178,16 +1166,16 @@ public VolumeVO doInTransaction(TransactionStatus status) { try { stateTransitTo(existingVolume, Volume.Event.DestroyRequested); } catch (NoTransitionException e) { - logger.error(String.format("Unable to destroy existing volume [%s] due to [%s].", volumeToString, e.getMessage())); + logger.error("Unable to destroy existing volume [{}] due to [{}].", volumeToString, e.getMessage()); } // In case of VMware VM will continue to use the old root disk until expunged, so force expunge old root disk if (vm.getHypervisorType() == HypervisorType.VMware) { - logger.info(String.format("Trying to expunge volume [%s] from primary data storage.", volumeToString)); + logger.info("Trying to expunge volume [{}] from primary data storage.", volumeToString); AsyncCallFuture future = volService.expungeVolumeAsync(volFactory.getVolume(existingVolume.getId())); try { future.get(); } catch (Exception e) { - logger.error(String.format("Failed to expunge volume [%s] from primary data storage due to [%s].", volumeToString, e.getMessage())); + logger.error("Failed to expunge volume [{}] from primary data storage due to [{}].", volumeToString, e.getMessage()); logger.debug("Exception: ", e); } } @@ -1214,9 +1202,7 @@ public void release(long vmId, long hostId) { HostVO host = _hostDao.findById(hostId); - if (logger.isDebugEnabled()) { - logger.debug(String.format("Releasing [%s] volumes for VM [%s] from host [%s].", volumesForVm.size(), _userVmDao.findById(vmId), host)); - } + logger.debug("Releasing [{}] volumes for VM [{}] from host [{}].", volumesForVm.size(), _userVmDao.findById(vmId), host); for (VolumeVO volumeForVm : volumesForVm) { VolumeInfo volumeInfo = volFactory.getVolume(volumeForVm.getId()); @@ -1239,9 +1225,7 @@ public void release(long vmId, long hostId) { public void cleanupVolumes(long vmId) throws ConcurrentOperationException { VMInstanceVO vm = _userVmDao.findById(vmId); - if (logger.isDebugEnabled()) { - logger.debug(String.format("Cleaning storage for VM [%s].", vm)); - } + logger.debug("Cleaning storage for VM [{}].", vm); final List volumesForVm = _volsDao.findByInstance(vmId); final List toBeExpunged = new ArrayList(); @@ -1257,13 +1241,11 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (!volumeAlreadyDestroyed) { destroyVolumeInContext(vol); } else { - logger.debug(String.format("Skipping destroy for the volume [%s] as it is in [%s] state.", volumeToString, vol.getState().toString())); + logger.debug("Skipping destroy for the volume [{}] as it is in [{}] state.", volumeToString, vol.getState().toString()); } toBeExpunged.add(vol); } else { - if (logger.isDebugEnabled()) { - logger.debug(String.format("Detaching volume [%s].", volumeToString)); - } + logger.debug("Detaching volume [{}].", volumeToString); if (vm.getHypervisorType().equals(HypervisorType.VMware)) { _volumeApiService.detachVolumeViaDestroyVM(vmId, vol.getId()); } @@ -1281,7 +1263,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { try { future.get(); } catch (InterruptedException | ExecutionException e) { - logger.error(String.format("Failed to expunge volume [%s] due to [%s].", expungeToString, e.getMessage())); + logger.error("Failed to expunge volume [{}] due to [{}].", expungeToString, e.getMessage()); logger.debug("Exception: ", e); } } @@ -1399,12 +1381,12 @@ public Volume liveMigrateVolume(Volume volume, StoragePool destPool) { try { VolumeApiResult result = future.get(); if (result.isFailed()) { - logger.error(String.format("Volume [%s] migration failed due to [%s].", volToString, result.getResult())); + logger.error("Volume [{}] migration failed due to [{}].", volToString, result.getResult()); return null; } return result.getVolume(); } catch (InterruptedException | ExecutionException e) { - logger.error(String.format("Volume [%s] migration failed due to [%s].", volToString, e.getMessage())); + logger.error("Volume [{}] migration failed due to [{}].", volToString, e.getMessage()); logger.debug("Exception: ", e); return null; } @@ -1445,7 +1427,7 @@ public void migrateVolumes(VirtualMachine vm, VirtualMachineTO vmTo, Host srcHos throw new CloudRuntimeException(msg); } } catch (InterruptedException | ExecutionException e) { - logger.error(String.format("Failed to migrate VM [%s] along with its volumes due to [%s].", vm, e.getMessage())); + logger.error("Failed to migrate VM [{}] along with its volumes due to [{}].", vm, e.getMessage()); logger.debug("Exception: ", e); } } @@ -1467,7 +1449,7 @@ public boolean storageMigration(VirtualMachineProfile vm, Map entry : volumeStoragePoolMap.entrySet()) { Volume result = migrateVolume(entry.getKey(), entry.getValue()); if (result == null) { @@ -1492,9 +1472,7 @@ public boolean storageMigration(VirtualMachineProfile vm, Map vols = _volsDao.findUsableVolumesForInstance(vm.getId()); - if (logger.isDebugEnabled()) { - logger.debug(String.format("Preparing to migrate [%s] volumes for VM [%s].", vols.size(), vm.getVirtualMachine())); - } + logger.debug("Preparing to migrate [{}] volumes for VM [{}].", vols.size(), vm.getVirtualMachine()); for (VolumeVO vol : vols) { VolumeInfo volumeInfo = volFactory.getVolume(vol.getId()); @@ -1616,17 +1594,13 @@ private List getTasks(List vols, Map tasks.add(task); } else { if (vol.isRecreatable()) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("Volume [%s] will be recreated on storage pool [%s], assigned by deploymentPlanner.", volToString, assignedPoolToString)); - } + logger.debug("Volume [{}] will be recreated on storage pool [{}], assigned by deploymentPlanner.", volToString, assignedPoolToString); VolumeTask task = new VolumeTask(VolumeTaskType.RECREATE, vol, null); tasks.add(task); } else { if (assignedPool.getId() != vol.getPoolId()) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("Mismatch with the storage pool [%s] assigned by deploymentPlanner and the one associated with the volume [%s].", - assignedPoolToString, volToString)); - } + logger.debug("Mismatch with the storage pool [{}] assigned by deploymentPlanner and the one associated with the volume [{}].", + assignedPoolToString, volToString); DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, vol.getDiskOfferingId()); if (diskOffering.isUseLocalStorage()) { // Currently migration of local volume is not supported so bail out @@ -1645,10 +1619,8 @@ private List getTasks(List vols, Map storageMigrationEnabled = StorageMigrationEnabled.value(); } if (storageMigrationEnabled) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("Shared volume [%s] will be migrated to the storage pool [%s], assigned by deploymentPlanner.", - volToString, assignedPoolToString)); - } + logger.debug("Shared volume [{}] will be migrated to the storage pool [{}], assigned by deploymentPlanner.", + volToString, assignedPoolToString); VolumeTask task = new VolumeTask(VolumeTaskType.MIGRATE, vol, assignedPool); tasks.add(task); } else { @@ -1671,10 +1643,8 @@ private List getTasks(List vols, Map StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId()); - if (logger.isDebugEnabled()) { - logger.debug(String.format("No need to recreate the volume [%s] since it already has an assigned pool: [%s]. Adding disk to the VM.", - volToString, pool.getUuid())); - } + logger.debug("No need to recreate the volume [{}] since it already has an assigned pool: [{}]. Adding disk to the VM.", + volToString, pool.getUuid()); VolumeTask task = new VolumeTask(VolumeTaskType.NOP, vol, pool); tasks.add(task); } @@ -1687,7 +1657,7 @@ protected void checkAndUpdateVolumeAccountResourceCount(VolumeVO originalEntry, if (Objects.equals(originalEntry.getSize(), updateEntry.getSize())) { return; } - logger.debug(String.format("Size mismatch found for %s after creation, old size: %d, new size: %d. Updating resource count", updateEntry, originalEntry.getSize(), updateEntry.getSize())); + logger.debug("Size mismatch found for {} after creation, old size: {}, new size: {}. Updating resource count", updateEntry, originalEntry.getSize(), updateEntry.getSize()); if (ObjectUtils.anyNull(originalEntry.getSize(), updateEntry.getSize())) { _resourceLimitMgr.recalculateResourceCount(updateEntry.getAccountId(), updateEntry.getDomainId(), ResourceType.primary_storage.getOrdinal()); @@ -1709,7 +1679,7 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro if (recreate && (dest.getStorageForDisks() == null || dest.getStorageForDisks().get(vol) == null)) { destPool = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary); String destPoolToString = getReflectOnlySelectedFields(destPool); - logger.debug(String.format("Existing pool: [%s].", destPoolToString)); + logger.debug("Existing pool: [{}].", destPoolToString); } else { StoragePool pool = dest.getStorageForDisks().get(vol); destPool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); @@ -1731,9 +1701,7 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro dest.getStorageForDisks().put(newVol, poolWithOldVol); dest.getStorageForDisks().remove(vol); } - if (logger.isDebugEnabled()) { - logger.debug(String.format("Created new volume [%s] from old volume [%s].", newVolToString, volToString)); - } + logger.debug("Created new volume [{}] from old volume [{}].", newVolToString, volToString); } VolumeInfo volume = volFactory.getVolume(newVol.getId(), destPool); Long templateId = newVol.getTemplateId(); @@ -1754,7 +1722,7 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro } else { final VirtualMachineTemplate template = _entityMgr.findById(VirtualMachineTemplate.class, templateId); if (template == null) { - logger.error(String.format("Failed to find template: %d for %s", templateId, volume)); + logger.error("Failed to find template: {} for {}", templateId, volume); throw new CloudRuntimeException(String.format("Failed to find template for volume ID: %s", volume.getUuid())); } TemplateInfo templ = tmplFactory.getReadyTemplateOnImageStore(templateId, dest.getDataCenter().getId()); @@ -1766,8 +1734,8 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro if (!primaryDataStore.isManaged()) { templ = tmplFactory.getReadyBypassedTemplateOnPrimaryStore(templateId, destPool.getId(), dest.getHost().getId()); } else { - logger.debug(String.format("Directly downloading template [%s] on host [%s] and copying it to the managed storage pool [%s].", - templateId, dest.getHost().getUuid(), destPool.getUuid())); + logger.debug("Directly downloading template [{}] on host [{}] and copying it to the managed storage pool [{}].", + templateId, dest.getHost().getUuid(), destPool.getUuid()); templ = volService.createManagedStorageTemplate(templateId, destPool.getId(), dest.getHost().getId()); } @@ -2007,12 +1975,12 @@ private void cleanupVolumeDuringAttachFailure(Long volumeId, Long vmId) { String volumeToString = getReflectOnlySelectedFields(volume); if (volume.getState().equals(Volume.State.Creating)) { - logger.debug(String.format("Removing volume [%s], as it was leftover from the last management server stop.", volumeToString)); + logger.debug("Removing volume [{}], as it was leftover from the last management server stop.", volumeToString); _volsDao.remove(volume.getId()); } if (volume.getState().equals(Volume.State.Attaching)) { - logger.warn(String.format("Volume [%s] failed to attach to the VM [%s] on the last management server stop, changing state back to Ready.", volumeToString, _userVmDao.findById(vmId))); + logger.warn("Volume [{}] failed to attach to the VM [{}] on the last management server stop, changing state back to Ready.", volumeToString, _userVmDao.findById(vmId)); volume.setState(Volume.State.Ready); _volsDao.update(volumeId, volume); } @@ -2035,11 +2003,11 @@ private void cleanupVolumeDuringMigrationFailure(Long volumeId, Long destPoolId) if (duplicateVol != null) { String duplicateVolToString = getReflectOnlySelectedFields(duplicateVol); - logger.debug(String.format("Removing volume [%s] from storage pool [%s] because it's duplicated.", duplicateVolToString, destPoolToString)); + logger.debug("Removing volume [{}] from storage pool [{}] because it's duplicated.", duplicateVolToString, destPoolToString); _volsDao.remove(duplicateVol.getId()); } - logger.debug(String.format("Changing volume [%s] state from Migrating to Ready in case of migration failure.", volumeToString)); + logger.debug("Changing volume [{}] state from Migrating to Ready in case of migration failure.", volumeToString); volume.setState(Volume.State.Ready); _volsDao.update(volumeId, volume); } @@ -2053,7 +2021,7 @@ private void cleanupVolumeDuringSnapshotFailure(Long volumeId, Long snapshotId) String volumeToString = getReflectOnlySelectedFields(volume); if (volume.getState() == Volume.State.Snapshotting) { - logger.debug(String.format("Changing volume [%s] state back to Ready.", volumeToString)); + logger.debug("Changing volume [{}] state back to Ready.", volumeToString); volume.setState(Volume.State.Ready); _volsDao.update(volume.getId(), volume); } @@ -2077,7 +2045,7 @@ public void cleanupStorageJobs() { cleanupVolumeDuringSnapshotFailure(work.getVolumeId(), work.getSnapshotId()); } } catch (Exception e) { - logger.error(String.format("Clean up job failed due to [%s]. Will continue with other clean up jobs.", e.getMessage())); + logger.error("Clean up job failed due to [{}]. Will continue with other clean up jobs.", e.getMessage()); logger.debug("Exception: ", e); } } @@ -2155,8 +2123,8 @@ public void updateVolumeDiskChain(long volumeId, String path, String chainInfo, } if (needUpdate) { - logger.info(String.format("Updating volume's disk chain info. Volume: [%s]. Path: [%s] -> [%s], Disk Chain Info: [%s] -> [%s].", - volToString, vol.getPath(), path, vol.getChainInfo(), chainInfo)); + logger.info("Updating volume's disk chain info. Volume: [{}]. Path: [{}] -> [{}], Disk Chain Info: [{}] -> [{}].", + volToString, vol.getPath(), path, vol.getChainInfo(), chainInfo); vol.setPath(path); vol.setChainInfo(chainInfo); if (updatedDataStoreUUID != null) { @@ -2268,7 +2236,7 @@ public DiskProfile updateImportedVolume(Type type, DiskOffering offering, Virtua @Override public void unmanageVolumes(long vmId) { if (logger.isDebugEnabled()) { - logger.debug(String.format("Unmanaging storage for VM [%s].", _userVmDao.findById(vmId))); + logger.debug("Unmanaging storage for VM [{}].", _userVmDao.findById(vmId)); } final List volumesForVm = _volsDao.findByInstance(vmId); @@ -2281,7 +2249,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { boolean volumeAlreadyDestroyed = (vol.getState() == Volume.State.Destroy || vol.getState() == Volume.State.Expunged || vol.getState() == Volume.State.Expunging); if (volumeAlreadyDestroyed) { - logger.debug(String.format("Skipping Destroy for the volume [%s] as it is in [%s] state.", volToString, vol.getState().toString())); + logger.debug("Skipping Destroy for the volume [{}] as it is in [{}] state.", volToString, vol.getState().toString()); } else { volService.unmanageVolume(vol.getId()); } From f0231b8a3a21560c5ebd2318d24a0b29afd35ecd Mon Sep 17 00:00:00 2001 From: "klaus.freitas.scclouds" Date: Fri, 1 Mar 2024 10:20:17 -0300 Subject: [PATCH 2/6] adding some more log refactor --- .../com/cloud/agent/manager/AgentAttache.java | 47 ++-- .../cloud/agent/manager/AgentManagerImpl.java | 150 +++++++------ .../agent/manager/ClusteredAgentAttache.java | 18 +- .../manager/ClusteredAgentManagerImpl.java | 202 +++++++++--------- .../agent/manager/ConnectedAgentAttache.java | 2 +- .../agent/manager/DirectAgentAttache.java | 40 ++-- 6 files changed, 224 insertions(+), 235 deletions(-) diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java index d9dce6a00c22..22c0b3fd71a9 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java @@ -105,6 +105,8 @@ public int compare(final Object o1, final Object o2) { } }; + protected static String LOG_SEQ_FORMATTED_STRING; + protected final long _id; protected String _name = null; protected final ConcurrentHashMap _waitForList; @@ -137,6 +139,7 @@ protected AgentAttache(final AgentManagerImpl agentMgr, final long id, final Str _requests = new LinkedList(); _agentMgr = agentMgr; _nextSequence = new Long(s_rand.nextInt(Short.MAX_VALUE)).longValue() << 48; + LOG_SEQ_FORMATTED_STRING = String.format("Seq %d-{}: {}", _id); } public synchronized long getNextSequence() { @@ -197,7 +200,7 @@ protected void cancel(final Request req) { } protected synchronized void cancel(final long seq) { - logger.debug(log(seq, "Cancelling.")); + logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Cancelling."); final Listener listener = _waitForList.remove(seq); if (listener != null) { listener.processDisconnect(_id, Status.Disconnected); @@ -216,12 +219,8 @@ protected synchronized int findRequest(final long seq) { return Collections.binarySearch(_requests, seq, s_seqComparator); } - protected String log(final long seq, final String msg) { - return "Seq " + _id + "-" + seq + ": " + msg; - } - protected void registerListener(final long seq, final Listener listener) { - logger.trace(log(seq, "Registering listener")); + logger.trace(LOG_SEQ_FORMATTED_STRING, seq, "Registering listener"); if (listener.getTimeout() != -1) { s_listenerExecutor.schedule(new Alarm(seq), listener.getTimeout(), TimeUnit.SECONDS); } @@ -229,7 +228,7 @@ protected void registerListener(final long seq, final Listener listener) { } protected Listener unregisterListener(final long sequence) { - logger.trace(log(sequence, "Unregistering listener")); + logger.trace(LOG_SEQ_FORMATTED_STRING, sequence, "Unregistering listener"); return _waitForList.remove(sequence); } @@ -261,7 +260,7 @@ public int getNonRecurringListenersSize() { final Listener monitor = entry.getValue(); if (!monitor.isRecurring()) { //TODO - remove this debug statement later - logger.debug("Listener is " + entry.getValue() + " waiting on " + entry.getKey()); + logger.debug("Listener is {} waiting on {}", entry.getValue(), entry.getKey()); nonRecurringListenersList.add(monitor); } } @@ -284,10 +283,10 @@ public boolean processAnswers(final long seq, final Response resp) { if (answers[0] != null && answers[0].getResult()) { processed = true; } - logger.debug(log(seq, "Unable to find listener.")); + logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Unable to find listener."); } else { processed = monitor.processAnswers(_id, seq, answers); - logger.trace(log(seq, (processed ? "" : " did not ") + " processed ")); + logger.trace(LOG_SEQ_FORMATTED_STRING, seq, (processed ? "" : " did not ") + " processed "); if (!monitor.isRecurring()) { unregisterListener(seq); } @@ -313,7 +312,7 @@ protected void cancelAllCommands(final Status state, final boolean cancelActive) final Map.Entry entry = it.next(); it.remove(); final Listener monitor = entry.getValue(); - logger.debug(log(entry.getKey(), "Sending disconnect to " + monitor.getClass())); + logger.debug(LOG_SEQ_FORMATTED_STRING, entry.getKey(), "Sending disconnect to " + monitor.getClass()); monitor.processDisconnect(_id, state); } } @@ -345,7 +344,7 @@ public void send(final Request req, final Listener listener) throws AgentUnavail if (listener != null) { registerListener(seq, listener); } - logger.debug(log(seq, "Routed from " + req.getManagementServerId())); + logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Routed from " + req.getManagementServerId()); synchronized (this) { try { @@ -367,16 +366,14 @@ public void send(final Request req, final Listener listener) throws AgentUnavail if (req.executeInSequence() && _currentSequence == null) { _currentSequence = seq; - if (logger.isTraceEnabled()) { - logger.trace(log(seq, " is current sequence")); - } + logger.trace(LOG_SEQ_FORMATTED_STRING, seq, " is current sequence"); } } catch (AgentUnavailableException e) { - logger.info(log(seq, "Unable to send due to " + e.getMessage())); + logger.info(LOG_SEQ_FORMATTED_STRING, seq, "Unable to send due to " + e.getMessage()); cancel(seq); throw e; } catch (Exception e) { - logger.warn(log(seq, "Unable to send due to "), e); + logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Unable to send due to " + e.getMessage(), e); cancel(seq); throw new AgentUnavailableException("Problem due to other exception " + e.getMessage(), _id); } @@ -395,7 +392,7 @@ public Answer[] send(final Request req, final int wait) throws AgentUnavailableE try { answers = sl.waitFor(wait); } catch (final InterruptedException e) { - logger.debug(log(seq, "Interrupted")); + logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Interrupted"); } if (answers != null) { new Response(req, answers).logD("Received: ", false); @@ -412,16 +409,16 @@ public Answer[] send(final Request req, final int wait) throws AgentUnavailableE final Long current = _currentSequence; if (current != null && seq != current) { - logger.debug(log(seq, "Waited too long.")); + logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Waited too long."); throw new OperationTimedoutException(req.getCommands(), _id, seq, wait, false); } - logger.debug(log(seq, "Waiting some more time because this is the current command")); + logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Waiting some more time because this is the current command"); } throw new OperationTimedoutException(req.getCommands(), _id, seq, wait * 2, true); } catch (OperationTimedoutException e) { - logger.warn(log(seq, "Timed out on " + req.toString())); + logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Timed out on " + req.toString()); cancel(seq); final Long current = _currentSequence; if (req.executeInSequence() && (current != null && current == seq)) { @@ -429,7 +426,7 @@ public Answer[] send(final Request req, final int wait) throws AgentUnavailableE } throw e; } catch (Exception e) { - logger.warn(log(seq, "Exception while waiting for answer"), e); + logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Exception while waiting for answer", e); cancel(seq); final Long current = _currentSequence; if (req.executeInSequence() && (current != null && current == seq)) { @@ -444,16 +441,16 @@ public Answer[] send(final Request req, final int wait) throws AgentUnavailableE protected synchronized void sendNext(final long seq) { _currentSequence = null; if (_requests.isEmpty()) { - logger.debug(log(seq, "No more commands found")); + logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "No more commands found"); return; } Request req = _requests.pop(); - logger.debug(log(req.getSequence(), "Sending now. is current sequence.")); + logger.debug(LOG_SEQ_FORMATTED_STRING, req.getSequence(), "Sending now. is current sequence."); try { send(req); } catch (AgentUnavailableException e) { - logger.debug(log(req.getSequence(), "Unable to send the next sequence")); + logger.debug(LOG_SEQ_FORMATTED_STRING, req.getSequence(), "Unable to send the next sequence"); cancel(req.getSequence()); } _currentSequence = req.getSequence(); diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index b276a26dd002..b693729f8413 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -208,12 +208,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public boolean configure(final String name, final Map params) throws ConfigurationException { - logger.info("Ping Timeout is " + mgmtServiceConf.getPingTimeout()); + logger.info("Ping Timeout is {}.", mgmtServiceConf.getPingTimeout()); final int threads = DirectAgentLoadSize.value(); _nodeId = ManagementServerNode.getManagementServerId(); - logger.info("Configuring AgentManagerImpl. management server node id(msid): " + _nodeId); + logger.info("Configuring AgentManagerImpl. management server node id(msid): {}.", _nodeId); final long lastPing = (System.currentTimeMillis() >> 10) - mgmtServiceConf.getTimeout(); _hostDao.markHostsAsDisconnected(_nodeId, lastPing); @@ -229,13 +229,13 @@ public boolean configure(final String name, final Map params) th _connectExecutor.allowCoreThreadTimeOut(true); _connection = new NioServer("AgentManager", Port.value(), Workers.value() + 10, this, caService); - logger.info("Listening on " + Port.value() + " with " + Workers.value() + " workers"); + logger.info("Listening on {} with {} workers.", Port.value(), Workers.value()); // executes all agent commands other than cron and ping _directAgentExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgent")); // executes cron and ping agent commands _cronJobExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgentCronJob")); - logger.debug("Created DirectAgentAttache pool with size: " + DirectAgentPoolSize.value()); + logger.debug("Created DirectAgentAttache pool with size: {}.", DirectAgentPoolSize.value()); _directAgentThreadCap = Math.round(DirectAgentPoolSize.value() * DirectAgentThreadCap.value()) + 1; // add 1 to always make the value > 0 _monitorExecutor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("AgentMonitor")); @@ -266,7 +266,7 @@ public int registerForHostEvents(final Listener listener, final boolean connecti _cmdMonitors.add(new Pair(_monitorId, listener)); } } - logger.debug("Registering listener " + listener.getClass().getSimpleName() + " with id " + _monitorId); + logger.debug("Registering listener {} with id {}", listener.getClass().getSimpleName(), _monitorId); return _monitorId; } } @@ -286,7 +286,7 @@ public int registerForInitialConnects(final StartupCommandProcessor creator, fin @Override public void unregisterForHostEvents(final int id) { - logger.debug("Deregistering " + id); + logger.debug("Deregistering {}", id); _hostMonitors.remove(id); } @@ -301,14 +301,14 @@ private AgentControlAnswer handleControlCommand(final AgentAttache attache, fina } } - logger.warn("No handling of agent control command: " + cmd + " sent from " + attache.getId()); + logger.warn("No handling of agent control command: {} sent from {}", cmd, attache.getId()); return new AgentControlAnswer(cmd); } public void handleCommands(final AgentAttache attache, final long sequence, final Command[] cmds) { for (final Pair listener : _cmdMonitors) { final boolean processed = listener.second().processCommands(attache.getId(), sequence, cmds); - logger.trace("SeqA " + attache.getId() + "-" + sequence + ": " + (processed ? "processed" : "not processed") + " by " + listener.getClass()); + logger.trace("SeqA {}-{}: {} by {}", attache.getId(), sequence, (processed ? "processed" : "not processed"), listener.getClass()); } } @@ -366,7 +366,7 @@ public Answer send(final Long hostId, final Command cmd) throws AgentUnavailable } if (answers != null && answers[0] instanceof UnsupportedAnswer) { - logger.warn("Unsupported Command: " + answers[0].getDetails()); + logger.warn("Unsupported Command: {}", answers[0].getDetails()); return answers[0]; } @@ -463,11 +463,11 @@ protected Status investigate(final AgentAttache agent) { final Long hostId = agent.getId(); final HostVO host = _hostDao.findById(hostId); if (host != null && host.getType() != null && !host.getType().isVirtual()) { - logger.debug("checking if agent (" + hostId + ") is alive"); + logger.debug("Checking if agent ({}) is alive", hostId); final Answer answer = easySend(hostId, new CheckHealthCommand()); if (answer != null && answer.getResult()) { final Status status = Status.Up; - logger.debug("agent (" + hostId + ") responded to checkHeathCommand, reporting that agent is " + status); + logger.debug("Agent ({}) responded to checkHealthCommand, reporting that agent is {}", hostId, status); return status; } return _haMgr.investigate(hostId); @@ -481,7 +481,7 @@ protected AgentAttache getAttache(final Long hostId) throws AgentUnavailableExce } final AgentAttache agent = findAttache(hostId); if (agent == null) { - logger.debug("Unable to find agent for " + hostId); + logger.debug("Unable to find agent for {}", hostId); throw new AgentUnavailableException("Unable to find agent ", hostId); } @@ -509,7 +509,7 @@ public void removeAgent(final AgentAttache attache, final Status nextState) { return; } final long hostId = attache.getId(); - logger.debug("Remove Agent : " + hostId); + logger.debug("Remove Agent : {}", hostId); AgentAttache removed = null; boolean conflict = false; synchronized (_agents) { @@ -521,14 +521,14 @@ public void removeAgent(final AgentAttache attache, final Status nextState) { } } if (conflict) { - logger.debug("Agent for host " + hostId + " is created when it is being disconnected"); + logger.debug("Agent for host {} is created when it is being disconnected", hostId); } if (removed != null) { removed.disconnect(nextState); } for (final Pair monitor : _hostMonitors) { - logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName()); + logger.debug("Sending Disconnect to listener: {}", monitor.second().getClass().getName()); monitor.second().processDisconnect(hostId, nextState); } } @@ -536,7 +536,7 @@ public void removeAgent(final AgentAttache attache, final Status nextState) { @Override public void notifyMonitorsOfNewlyAddedHost(long hostId) { for (final Pair monitor : _hostMonitors) { - logger.debug("Sending host added to listener: " + monitor.second().getClass().getSimpleName()); + logger.debug("Sending host added to listener: {}", monitor.second().getClass().getSimpleName()); monitor.second().processHostAdded(hostId); } @@ -546,7 +546,7 @@ protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, fi final long hostId = attache.getId(); final HostVO host = _hostDao.findById(hostId); for (final Pair monitor : _hostMonitors) { - logger.debug("Sending Connect to listener: " + monitor.second().getClass().getSimpleName()); + logger.debug("Sending Connect to listener: {}", monitor.second().getClass().getSimpleName()); for (int i = 0; i < cmd.length; i++) { try { monitor.second().processConnect(host, cmd[i], forRebalance); @@ -566,7 +566,7 @@ protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, fi handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); } else { - logger.error("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage(), e); + logger.error("Monitor {} says there is an error in the connect process for {} due to {}", monitor.second().getClass().getSimpleName(), hostId, e.getMessage(), e); handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); } @@ -683,7 +683,7 @@ private ServerResource loadResourcesWithoutHypervisor(final HostVO host) { try { resource.configure(host.getName(), params); } catch (final ConfigurationException e) { - logger.warn("Unable to configure resource due to " + e.getMessage()); + logger.warn("Unable to configure resource due to {}", e.getMessage()); return null; } @@ -706,14 +706,14 @@ protected boolean loadDirectlyConnectedHost(final HostVO host, final boolean for // load the respective discoverer final Discoverer discoverer = _resourceMgr.getMatchingDiscover(host.getHypervisorType()); if (discoverer == null) { - logger.info("Could not to find a Discoverer to load the resource: " + host.getId() + " for hypervisor type: " + host.getHypervisorType()); + logger.info("Could not to find a Discoverer to load the resource: {} for hypervisor type: {}", host.getId(), host.getHypervisorType()); resource = loadResourcesWithoutHypervisor(host); } else { resource = discoverer.reloadResource(host); } if (resource == null) { - logger.warn("Unable to load the resource: " + host.getId()); + logger.warn("Unable to load the resource: {}", host.getId()); return false; } @@ -739,7 +739,7 @@ protected boolean loadDirectlyConnectedHost(final HostVO host, final boolean for } protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) throws ConnectionException { - logger.debug("create DirectAgentAttache for " + host.getId()); + logger.debug("create DirectAgentAttache for {}", host.getId()); final DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getName(), resource, host.isInMaintenanceStates()); AgentAttache old = null; @@ -760,14 +760,12 @@ public boolean stop() { _connection.stop(); } - logger.info("Disconnecting agents: " + _agents.size()); + logger.info("Disconnecting agents: {}", _agents.size()); synchronized (_agents) { for (final AgentAttache agent : _agents.values()) { final HostVO host = _hostDao.findById(agent.getId()); if (host == null) { - if (logger.isDebugEnabled()) { - logger.debug("Cant not find host " + agent.getId()); - } + logger.debug("Cant not find host {}", agent.getId()); } else { if (!agent.forForward()) { agentStatusTransitTo(host, Event.ManagementServerDown, _nodeId); @@ -852,22 +850,22 @@ protected boolean handleDisconnectWithInvestigation(final AgentAttache attache, if (nextStatus == Status.Alert) { /* OK, we are going to the bad status, let's see what happened */ - logger.info("Investigating why host " + hostId + " has disconnected with event " + event); + logger.info("Investigating why host {} has disconnected with event", hostId, event); Status determinedState = investigate(attache); // if state cannot be determined do nothing and bail out if (determinedState == null) { if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) { - logger.warn("Agent " + hostId + " state cannot be determined for more than " + AlertWait + "(" + AlertWait.value() + ") seconds, will go to Alert state"); + logger.warn("Agent {} state cannot be determined for more than {}({}) seconds, will go to Alert state", hostId, AlertWait, AlertWait.value()); determinedState = Status.Alert; } else { - logger.warn("Agent " + hostId + " state cannot be determined, do nothing"); + logger.warn("Agent {} state cannot be determined, do nothing", hostId); return false; } } final Status currentStatus = host.getStatus(); - logger.info("The agent from host " + hostId + " state determined is " + determinedState); + logger.info("The agent from host {} state determined is {}", hostId, determinedState); if (determinedState == Status.Down) { final String message = "Host is down: " + host.getId() + "-" + host.getName() + ". Starting HA on the VMs"; @@ -882,15 +880,15 @@ protected boolean handleDisconnectWithInvestigation(final AgentAttache attache, agentStatusTransitTo(host, Status.Event.Ping, _nodeId); return false; } else if (determinedState == Status.Disconnected) { - logger.warn("Agent is disconnected but the host is still up: " + host.getId() + "-" + host.getName() + + logger.warn("Agent is disconnected but the host is still up: {}-{}", host.getId(), host.getName() + '-' + host.getResourceState()); if (currentStatus == Status.Disconnected || (currentStatus == Status.Up && host.getResourceState() == ResourceState.PrepareForMaintenance)) { if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) { - logger.warn("Host " + host.getId() + " has been disconnected past the wait time it should be disconnected."); + logger.warn("Host {} has been disconnected past the wait time it should be disconnected.", host.getId()); event = Status.Event.WaitedTooLong; } else { - logger.debug("Host " + host.getId() + " has been determined to be disconnected but it hasn't passed the wait time yet."); + logger.debug("Host {} has been determined to be disconnected but it hasn't passed the wait time yet.", host.getId()); return false; } } else if (currentStatus == Status.Up) { @@ -913,7 +911,7 @@ protected boolean handleDisconnectWithInvestigation(final AgentAttache attache, "In availability zone " + host.getDataCenterId() + ", host is in alert state: " + host.getId() + "-" + host.getName()); } } else { - logger.debug("The next status of agent " + host.getId() + " is not Alert, no need to investigate what happened"); + logger.debug("The next status of agent {} is not Alert, no need to investigate what happened", host.getId()); } } handleDisconnectWithoutInvestigation(attache, event, true, true); @@ -954,12 +952,12 @@ public Answer easySend(final Long hostId, final Command cmd) { try { final Host h = _hostDao.findById(hostId); if (h == null || h.getRemoved() != null) { - logger.debug("Host with id " + hostId + " doesn't exist"); + logger.debug("Host with id {} doesn't exist", hostId); return null; } final Status status = h.getStatus(); if (!status.equals(Status.Up) && !status.equals(Status.Connecting)) { - logger.debug("Can not send command " + cmd + " due to Host " + hostId + " is not up"); + logger.debug("Can not send command {} due to Host {} not being up", cmd, hostId); return null; } final Answer answer = send(hostId, cmd); @@ -969,7 +967,7 @@ public Answer easySend(final Long hostId, final Command cmd) { } if (logger.isDebugEnabled() && answer.getDetails() != null) { - logger.debug("Details from executing " + cmd.getClass() + ": " + answer.getDetails()); + logger.debug("Details from executing {}: {}", cmd.getClass(), answer.getDetails()); } return answer; @@ -978,7 +976,7 @@ public Answer easySend(final Long hostId, final Command cmd) { logger.warn(e.getMessage()); return null; } catch (final OperationTimedoutException e) { - logger.warn("Operation timed out: " + e.getMessage()); + logger.warn("Operation timed out: {}", e.getMessage()); return null; } catch (final Exception e) { logger.warn("Exception while sending", e); @@ -1009,7 +1007,7 @@ public void reconnect(final long hostId) throws AgentUnavailableException { } if (host.getStatus() == Status.Disconnected) { - logger.debug("Host is already disconnected, no work to be done: " + hostId); + logger.debug("Host is already disconnected, no work to be done: {}", hostId); return; } @@ -1027,7 +1025,7 @@ public void reconnect(final long hostId) throws AgentUnavailableException { @Override public void notifyMonitorsOfHostAboutToBeRemoved(long hostId) { for (final Pair monitor : _hostMonitors) { - logger.debug("Sending host about to be removed to listener: " + monitor.second().getClass().getSimpleName()); + logger.debug("Sending host about to be removed to listener: {}", monitor.second().getClass().getSimpleName()); monitor.second().processHostAboutToBeRemoved(hostId); } @@ -1036,7 +1034,7 @@ public void notifyMonitorsOfHostAboutToBeRemoved(long hostId) { @Override public void notifyMonitorsOfRemovedHost(long hostId, long clusterId) { for (final Pair monitor : _hostMonitors) { - logger.debug("Sending host removed to listener: " + monitor.second().getClass().getSimpleName()); + logger.debug("Sending host removed to listener: {}", monitor.second().getClass().getSimpleName()); monitor.second().processHostRemoved(hostId, clusterId); } @@ -1044,7 +1042,7 @@ public void notifyMonitorsOfRemovedHost(long hostId, long clusterId) { public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException { if (event == Event.AgentDisconnected) { - logger.debug("Received agent disconnect event for host " + hostId); + logger.debug("Received agent disconnect event for host {}", hostId); AgentAttache attache = null; attache = findAttache(hostId); if (attache != null) { @@ -1056,7 +1054,7 @@ public boolean executeUserRequest(final long hostId, final Event event) throws A try { reconnect(hostId); } catch (CloudRuntimeException e) { - logger.debug("Error on shutdown request for hostID: " + hostId, e); + logger.debug("Error on shutdown request for hostID: {}", hostId, e); return false; } return true; @@ -1071,7 +1069,7 @@ public boolean isAgentAttached(final long hostId) { } protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) throws ConnectionException { - logger.debug("create ConnectedAgentAttache for " + host.getId()); + logger.debug("create ConnectedAgentAttache for {}", host.getId()); final AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); @@ -1109,7 +1107,7 @@ private AgentAttache sendReadyAndGetAttache(HostVO host, ReadyCommand ready, Lin ready.setMsHostList(newMSList); ready.setLbAlgorithm(indirectAgentLB.getLBAlgorithmName()); ready.setLbCheckInterval(indirectAgentLB.getLBPreferredHostCheckInterval(host.getClusterId())); - logger.debug("Agent's management server host list is not up to date, sending list update:" + newMSList); + logger.debug("Agent's management server host list is not up to date, sending list update: {}", newMSList); } attache = createAttacheForConnect(host, link); @@ -1170,26 +1168,26 @@ public SimulateStartTask(final long id, final ServerResource resource, final Map @Override protected void runInContext() { try { - logger.debug("Simulating start for resource " + resource.getName() + " id " + id); + logger.debug("Simulating start for resource {} id {}", resource.getName(), id); if (tapLoadingAgents(id, TapAgentsAction.Add)) { try { final AgentAttache agentattache = findAttache(id); if (agentattache == null) { - logger.debug("Creating agent for host " + id); + logger.debug("Creating agent for host {}", id); _resourceMgr.createHostAndAgent(id, resource, details, false, null, false); - logger.debug("Completed creating agent for host " + id); + logger.debug("Completed creating agent for host {}", id); } else { - logger.debug("Agent already created in another thread for host " + id + ", ignore this"); + logger.debug("Agent already created in another thread for host {}, ignore this", id); } } finally { tapLoadingAgents(id, TapAgentsAction.Del); } } else { - logger.debug("Agent creation already getting processed in another thread for host " + id + ", ignore this"); + logger.debug("Agent creation already getting processed in another thread for host {}, ignore this", id); } } catch (final Exception e) { - logger.warn("Unable to simulate start on resource " + id + " name " + resource.getName(), e); + logger.warn("Unable to simulate start on resource {} name {}", id, resource.getName(), e); } } } @@ -1215,7 +1213,7 @@ protected void runInContext() { final AgentAttache attache = handleConnectedAgent(_link, startups, _request); if (attache == null) { - logger.warn("Unable to create attache for agent: " + _request); + logger.warn("Unable to create attache for agent: {}", _request); } } } @@ -1238,7 +1236,7 @@ protected void connectAgent(final Link link, final Command[] cmds, final Request try { link.send(response.toBytes()); } catch (final ClosedChannelException e) { - logger.debug("Failed to send startupanswer: " + e.toString()); + logger.debug("Failed to send startupanswer: {}", e.toString()); } _connectExecutor.execute(new HandleAgentConnectTask(link, cmds, request)); } @@ -1302,7 +1300,7 @@ protected void processRequest(final Link link, final Request request) { if (attache == null) { if (!(cmd instanceof StartupCommand)) { - logger.warn("Throwing away a request because it came through as the first command on a connect: " + request); + logger.warn("Throwing away a request because it came through as the first command on a connect: {}", request); } else { // submit the task for execution request.logD("Scheduling the first command "); @@ -1319,14 +1317,14 @@ protected void processRequest(final Link link, final Request request) { if (logger.isDebugEnabled()) { if (cmd instanceof PingRoutingCommand) { logD = false; - logger.debug("Ping from Routing host " + hostId + "(" + hostName + ")"); - logger.trace("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request); + logger.debug("Ping from Routing host {}({})", hostId, hostName); + logger.trace("SeqA {}-{}: Processing {}", hostId, request.getSequence(), request); } else if (cmd instanceof PingCommand) { logD = false; - logger.debug("Ping from " + hostId + "(" + hostName + ")"); - logger.trace("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request); + logger.debug("Ping from {}({})", hostId, hostName); + logger.trace("SeqA {}-{}: Processing {}", hostId, request.getSequence(), request); } else { - logger.debug("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request); + logger.debug("SeqA {}-{}: {}", hostId, request.getSequence(), request); } } @@ -1351,7 +1349,7 @@ protected void processRequest(final Link link, final Request request) { } else if (cmd instanceof ShutdownCommand) { final ShutdownCommand shutdown = (ShutdownCommand)cmd; final String reason = shutdown.getReason(); - logger.info("Host " + attache.getId() + " has informed us that it is shutting down with reason " + reason + " and detail " + shutdown.getDetail()); + logger.info("Host {} has informed us that it is shutting down with reason {} and detail {}", attache.getId(), reason, shutdown.getDetail()); if (reason.equals(ShutdownCommand.Update)) { // disconnectWithoutInvestigation(attache, Event.UpdateNeeded); throw new CloudRuntimeException("Agent update not implemented"); @@ -1389,7 +1387,7 @@ protected void processRequest(final Link link, final Request request) { _alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId()); } } else { - logger.debug("Not processing " + PingRoutingCommand.class.getSimpleName() + " for agent id=" + cmdHostId + "; can't find the host in the DB"); + logger.debug("Not processing {} for agent id={}; can't find the host in the DB", PingRoutingCommand.class.getSimpleName(), cmdHostId); } } if (host!= null && host.getStatus() != Status.Up && gatewayAccessible) { @@ -1399,7 +1397,7 @@ protected void processRequest(final Link link, final Request request) { } else if (cmd instanceof ReadyAnswer) { final HostVO host = _hostDao.findById(attache.getId()); if (host == null) { - logger.debug("Cant not find host " + attache.getId()); + logger.debug("Cant not find host {}", attache.getId()); } answer = new Answer(cmd); } else { @@ -1415,23 +1413,23 @@ protected void processRequest(final Link link, final Request request) { final Response response = new Response(request, answers, _nodeId, attache.getId()); if (logD) { - logger.debug("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response); + logger.debug("SeqA {}-: Sending {}", attache.getId(), response.getSequence(), response); } else { - logger.trace("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response); + logger.trace("SeqA {}-: Sending {}" + attache.getId(), response.getSequence(), response); } try { link.send(response.toBytes()); } catch (final ClosedChannelException e) { - logger.warn("Unable to send response because connection is closed: " + response); + logger.warn("Unable to send response because connection is closed: {}", response); } } protected void processResponse(final Link link, final Response response) { final AgentAttache attache = (AgentAttache)link.attachment(); if (attache == null) { - logger.warn("Unable to process: " + response); + logger.warn("Unable to process: {}", response); } else if (!attache.processAnswers(response.getSequence(), response)) { - logger.info("Host " + attache.getId() + " - Seq " + response.getSequence() + ": Response is not processed: " + response); + logger.info("Host {} - Seq {}: Response is not processed: {}", attache.getId(), response.getSequence(), response); } } @@ -1464,7 +1462,7 @@ protected void doTask(final Task task) throws TaskExecutionException { if (attache != null) { disconnectWithInvestigation(attache, Event.AgentDisconnected); } else { - logger.info("Connection from " + link.getIpAddress() + " closed but no cleanup was done."); + logger.info("Connection from {} closed but no cleanup was done.", link.getIpAddress()); link.close(); link.terminated(); } @@ -1507,7 +1505,7 @@ public boolean agentStatusTransitTo(final HostVO host, final Status.Event e, fin try { return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao); } catch (final NoTransitionException e1) { - logger.debug("Cannot transit agent status with event " + e + " for host " + host.getId() + ", name=" + host.getName() + ", management server id is " + msId); + logger.debug("Cannot transit agent status with event {} for host {}, name={}, management server id is {}", e, host.getId(), host.getName(), msId); throw new CloudRuntimeException("Cannot transit agent status with event " + e + " for host " + host.getId() + ", management server id is " + msId + "," + e1.getMessage()); } } finally { @@ -1536,7 +1534,7 @@ public void disconnectWithInvestigation(final AgentAttache attache, final Status protected boolean isHostOwnerSwitched(final long hostId) { final HostVO host = _hostDao.findById(hostId); if (host == null) { - logger.warn("Can't find the host " + hostId); + logger.warn("Can't find the host {}", hostId); return false; } return isHostOwnerSwitched(host); @@ -1561,7 +1559,7 @@ private void disconnectInternal(final long hostId, final Status.Event event, fin } else { /* Agent is still in connecting process, don't allow to disconnect right away */ if (tapLoadingAgents(hostId, TapAgentsAction.Contains)) { - logger.info("Host " + hostId + " is being loaded so no disconnects needed."); + logger.info("Host {} is being loaded no disconnects needed.", hostId); return; } @@ -1660,17 +1658,17 @@ protected void runInContext() { /* * Host is in non-operation state, so no investigation and direct put agent to Disconnected */ - logger.debug("Ping timeout but agent " + agentId + " is in resource state of " + resourceState + ", so no investigation"); + logger.debug("Ping timeout but agent {} is in resource state of {}, so no investigation", agentId, resourceState); disconnectWithoutInvestigation(agentId, Event.ShutdownRequested); } else { final HostVO host = _hostDao.findById(agentId); if (host != null && (host.getType() == Host.Type.ConsoleProxy || host.getType() == Host.Type.SecondaryStorageVM || host.getType() == Host.Type.SecondaryStorageCmdExecutor)) { - logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: " + host.getId()); + logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: {}", host.getId()); disconnectWithoutInvestigation(agentId, Event.ShutdownRequested); } else { - logger.debug("Ping timeout for agent " + agentId + ", do invstigation"); + logger.debug("Ping timeout for agent {}, do investigation", agentId); disconnectWithInvestigation(agentId, Event.PingTimeout); } } @@ -1709,7 +1707,7 @@ protected List findAgentsBehindOnPing() { } if (agentsBehind.size() > 0) { - logger.info("Found the following agents behind on ping: " + agentsBehind); + logger.info("Found the following agents behind on ping: {}", agentsBehind); } return agentsBehind; @@ -1892,7 +1890,7 @@ private void sendCommandToAgents(Map> hostsPerZone, Map 0) { - logger.debug("Found " + hosts.size() + " unmanaged direct hosts, processing connect for them..."); + logger.debug("Found {} unmanaged direct hosts, processing connect for them...", hosts.size()); for (final HostVO host : hosts) { try { final AgentAttache agentattache = findAttache(host.getId()); if (agentattache != null) { // already loaded, skip if (agentattache.forForward()) { - if (logger.isInfoEnabled()) { - logger.info(host + " is detected down, but we have a forward attache running, disconnect this one before launching the host"); - } + logger.info("{} is detected down, but we have a forward attache running, disconnect this one before launching the host", host); removeAgent(agentattache, Status.Disconnected); } else { continue; } } - if (logger.isDebugEnabled()) { - logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ")"); - } + logger.debug("Loading directly connected host {}({})", host.getId(), host.getName()); loadDirectlyConnectedHost(host, false); } catch (final Throwable e) { - logger.warn(" can not load directly connected host " + host.getId() + "(" + host.getName() + ") due to ", e); + logger.warn(" can not load directly connected host {}({}) due to ", host.getId(), host.getName(), e); } } } @@ -237,7 +233,7 @@ protected void runInContext() { try { runDirectAgentScanTimerTask(); } catch (final Throwable e) { - logger.error("Unexpected exception " + e.getMessage(), e); + logger.error("Unexpected exception {}", e.getMessage(), e); } } } @@ -248,7 +244,7 @@ public Task create(final Task.Type type, final Link link, final byte[] data) { } protected AgentAttache createAttache(final long id) { - logger.debug("create forwarding ClusteredAgentAttache for " + id); + logger.debug("create forwarding ClusteredAgentAttache for {}", id); final HostVO host = _hostDao.findById(id); final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getName()); AgentAttache old = null; @@ -265,7 +261,7 @@ protected AgentAttache createAttache(final long id) { @Override protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) { - logger.debug("create ClusteredAgentAttache for " + host.getId()); + logger.debug("create ClusteredAgentAttache for {}", host.getId()); final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); AgentAttache old = null; @@ -325,7 +321,7 @@ protected boolean handleDisconnect(final AgentAttache agent, final Status.Event @Override public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException { if (event == Event.AgentDisconnected) { - logger.debug("Received agent disconnect event for host " + hostId); + logger.debug("Received agent disconnect event for host {}", hostId); final AgentAttache attache = findAttache(hostId); if (attache != null) { // don't process disconnect if the host is being rebalanced @@ -333,7 +329,7 @@ public boolean executeUserRequest(final long hostId, final Event event) throws A final HostTransferMapVO transferVO = _hostTransferDao.findById(hostId); if (transferVO != null) { if (transferVO.getFutureOwner() == _nodeId && transferVO.getState() == HostTransferState.TransferStarted) { - logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId + " as the host is being connected to " + _nodeId); + logger.debug("Not processing {} event for the host id={} as the host is being connected to {}",Event.AgentDisconnected, hostId, _nodeId); return true; } } @@ -342,7 +338,7 @@ public boolean executeUserRequest(final long hostId, final Event event) throws A // don't process disconnect if the disconnect came for the host via delayed cluster notification, // but the host has already reconnected to the current management server if (!attache.forForward()) { - logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId + " as the host is directly connected to the current management server " + _nodeId); + logger.debug("Not processing {} event for the host id={} as the host is directly connected to the current management server {}", Event.AgentDisconnected, hostId, _nodeId); return true; } @@ -381,18 +377,18 @@ public void notifyNodesInClusterToScheduleHostScanTask() { } protected void logT(final byte[] bytes, final String msg) { - logger.trace("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " - + (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg); + logger.trace("Seq {}-{}: MgmtId {} : {}", Request.getAgentId(bytes), Request.getSequence(bytes), + Request.getManagementServerId(bytes), (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg); } protected void logD(final byte[] bytes, final String msg) { - logger.debug("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " - + (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg); + logger.debug("Seq {}-{}: MgmtId {} : {}", Request.getAgentId(bytes), Request.getSequence(bytes), + Request.getManagementServerId(bytes), (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg); } protected void logI(final byte[] bytes, final String msg) { - logger.info("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " - + (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg); + logger.info("Seq {}-{}: MgmtId {} : {}", Request.getAgentId(bytes), Request.getSequence(bytes), + Request.getManagementServerId(bytes), (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg); } public boolean routeToPeer(final String peer, final byte[] bytes) { @@ -453,7 +449,7 @@ public void closePeer(final String peerName) { try { ch.close(); } catch (final IOException e) { - logger.warn("Unable to close peer socket connection to " + peerName); + logger.warn("Unable to close peer socket connection to {}", peerName); } } _peers.remove(peerName); @@ -469,13 +465,13 @@ public SocketChannel connectToPeer(final String peerName, final SocketChannel pr try { prevCh.close(); } catch (final Exception e) { - logger.info("[ignored]" + "failed to get close resource for previous channel Socket: " + e.getLocalizedMessage()); + logger.info("[ignored] failed to get close resource for previous channel Socket: {}", e.getLocalizedMessage()); } } if (ch == null || ch == prevCh) { final ManagementServerHost ms = _clusterMgr.getPeer(peerName); if (ms == null) { - logger.info("Unable to find peer: " + peerName); + logger.info("Unable to find peer: {}", peerName); return null; } final String ip = ms.getServiceIP(); @@ -507,7 +503,7 @@ public SocketChannel connectToPeer(final String peerName, final SocketChannel pr ch1.close(); throw new IOException("SSL: Fail to init SSL! " + e); } - logger.debug("Connection to peer opened: " + peerName + ", ip: " + ip); + logger.debug("Connection to peer opened: {}, ip: {}", peerName, ip); _peers.put(peerName, ch1); _sslEngines.put(peerName, sslEngine); return ch1; @@ -516,15 +512,15 @@ public SocketChannel connectToPeer(final String peerName, final SocketChannel pr try { ch1.close(); } catch (final IOException ex) { - logger.error("failed to close failed peer socket: " + ex); + logger.error("failed to close failed peer socket: {}", ex); } } - logger.warn("Unable to connect to peer management server: " + peerName + ", ip: " + ip + " due to " + e.getMessage(), e); + logger.warn("Unable to connect to peer management server: {}, ip {} due to {}", peerName, ip, e.getMessage(), e); return null; } } - logger.trace("Found open channel for peer: " + peerName); + logger.trace("Found open channel for peer: {}", peerName); return ch; } } @@ -549,7 +545,7 @@ protected AgentAttache getAttache(final Long hostId) throws AgentUnavailableExce AgentAttache agent = findAttache(hostId); if (agent == null || !agent.forForward()) { if (isHostOwnerSwitched(host)) { - logger.debug("Host " + hostId + " has switched to another management server, need to update agent map with a forwarding agent attache"); + logger.debug("Host {} has switched to another management server, need to update agent map with a forwarding agent attache", hostId); agent = createAttache(hostId); } } @@ -567,10 +563,10 @@ public boolean stop() { if (_peers != null) { for (final SocketChannel ch : _peers.values()) { try { - logger.info("Closing: " + ch.toString()); + logger.info("Closing: {}", ch.toString()); ch.close(); } catch (final IOException e) { - logger.info("[ignored] error on closing channel: " + ch.toString(), e); + logger.info("[ignored] error on closing channel: {}", ch.toString(), e); } } } @@ -685,11 +681,11 @@ protected void doTask(final Task task) throws TaskExecutionException { final Response response = Response.parse(data); final AgentAttache attache = findAttache(response.getAgentId()); if (attache == null) { - logger.info("SeqA " + response.getAgentId() + "-" + response.getSequence() + "Unable to find attache to forward " + response.toString()); + logger.info("SeqA {}-{} Unable to find attache to forward {}", response.getAgentId(), response.getSequence(), response.toString()); return; } if (!attache.processAnswers(response.getSequence(), response)) { - logger.info("SeqA " + attache.getId() + "-" + response.getSequence() + ": Response is not processed: " + response.toString()); + logger.info("SeqA {}-{}: Response is not processed: {}", attache.getId(), response.getSequence(), response.toString()); } } return; @@ -716,12 +712,12 @@ public void onManagementNodeJoined(final List no @Override public void onManagementNodeLeft(final List nodeList, final long selfNodeId) { for (final ManagementServerHost vo : nodeList) { - logger.info("Marking hosts as disconnected on Management server" + vo.getMsid()); + logger.info("Marking hosts as disconnected on Management server {}", vo.getMsid()); final long lastPing = (System.currentTimeMillis() >> 10) - mgmtServiceConf.getTimeout(); _hostDao.markHostsAsDisconnected(vo.getMsid(), lastPing); outOfBandManagementDao.expireServerOwnership(vo.getMsid()); haConfigDao.expireServerOwnership(vo.getMsid()); - logger.info("Deleting entries from op_host_transfer table for Management server " + vo.getMsid()); + logger.info("Deleting entries from op_host_transfer table for Management server {}", vo.getMsid()); cleanupTransferMap(vo.getMsid()); } } @@ -748,7 +744,7 @@ public boolean executeRebalanceRequest(final long agentId, final long currentOwn try { result = rebalanceHost(agentId, currentOwnerId, futureOwnerId); } catch (final Exception e) { - logger.warn("Unable to rebalance host id=" + agentId, e); + logger.warn("Unable to rebalance host id={}", agentId, e); } } return result; @@ -785,13 +781,13 @@ protected synchronized void runInContext() { cancelled = true; } } catch (final Throwable e) { - logger.error("Unexpected exception " + e.toString(), e); + logger.error("Unexpected exception {}", e.toString(), e); } } } public void startRebalanceAgents() { - logger.debug("Management server " + _nodeId + " is asking other peers to rebalance their agents"); + logger.debug("Management server {} is asking other peers to rebalance their agents", _nodeId); final List allMS = _mshostDao.listBy(ManagementServerHost.State.Up); final QueryBuilder sc = QueryBuilder.create(HostVO.class); sc.and(sc.entity().getManagementServerId(), Op.NNULL); @@ -803,8 +799,8 @@ public void startRebalanceAgents() { if (!allManagedAgents.isEmpty() && !allMS.isEmpty()) { avLoad = allManagedAgents.size() / allMS.size(); } else { - logger.debug("There are no hosts to rebalance in the system. Current number of active management server nodes in the system is " + allMS.size() + "; number of managed agents is " - + allManagedAgents.size()); + logger.debug("There are no hosts to rebalance in the system. Current number of active management server nodes in the system is {};" + + "number of managed agents is {}", allMS.size(), allManagedAgents.size()); return; } @@ -826,14 +822,14 @@ public void startRebalanceAgents() { } if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) { - logger.debug("Found " + hostsToRebalance.size() + " hosts to rebalance from management server " + node.getMsid()); + logger.debug("Found {} hosts to rebalance from management server {}", hostsToRebalance.size(), node.getMsid()); for (final HostVO host : hostsToRebalance) { final long hostId = host.getId(); - logger.debug("Asking management server " + node.getMsid() + " to give away host id=" + hostId); + logger.debug("Asking management server {} to give away host id={}", node.getMsid(), hostId); boolean result = true; if (_hostTransferDao.findById(hostId) != null) { - logger.warn("Somebody else is already rebalancing host id: " + hostId); + logger.warn("Somebody else is already rebalancing host id: {}", hostId); continue; } @@ -842,11 +838,11 @@ public void startRebalanceAgents() { transfer = _hostTransferDao.startAgentTransfering(hostId, node.getMsid(), _nodeId); final Answer[] answer = sendRebalanceCommand(node.getMsid(), hostId, node.getMsid(), _nodeId, Event.RequestAgentRebalance); if (answer == null) { - logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid()); + logger.warn("Failed to get host id={} from management server {}", hostId, node.getMsid()); result = false; } } catch (final Exception ex) { - logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid(), ex); + logger.warn("Failed to get host id={} from management server {}", hostId, node.getMsid(), ex); result = false; } finally { if (transfer != null) { @@ -861,7 +857,7 @@ public void startRebalanceAgents() { } } } else { - logger.debug("Found no hosts to rebalance from the management server " + node.getMsid()); + logger.debug("Found no hosts to rebalance from the management server {}", node.getMsid()); } } } @@ -875,14 +871,14 @@ private Answer[] sendRebalanceCommand(final long peer, final long agentId, final final Command[] cmds = commands.toCommands(); try { - logger.debug("Forwarding " + cmds[0].toString() + " to " + peer); + logger.debug("Forwarding {} to {}", cmds[0].toString(), peer); final String peerName = Long.toString(peer); final String cmdStr = _gson.toJson(cmds); final String ansStr = _clusterMgr.execute(peerName, agentId, cmdStr, true); final Answer[] answers = _gson.fromJson(ansStr, Answer[].class); return answers; } catch (final Exception e) { - logger.warn("Caught exception while talking to " + currentOwnerId, e); + logger.warn("Caught exception while talking to {}", currentOwnerId, e); return null; } } @@ -906,7 +902,7 @@ public Boolean propagateAgentEvent(final long agentId, final Event event) throws return null; } - logger.debug("Propagating agent change request event:" + event.toString() + " to agent:" + agentId); + logger.debug("Propagating agent change request event: {} to agent: {}", event.toString(), agentId); final Command[] cmds = new Command[1]; cmds[0] = new ChangeAgentCommand(agentId, event); @@ -917,7 +913,7 @@ public Boolean propagateAgentEvent(final long agentId, final Event event) throws final Answer[] answers = _gson.fromJson(ansStr, Answer[].class); - logger.debug("Result for agent change is " + answers[0].getResult()); + logger.debug("Result for agent change is {}", answers[0].getResult()); return answers[0].getResult(); } @@ -927,10 +923,10 @@ private Runnable getTransferScanTask() { @Override protected void runInContext() { try { - logger.trace("Clustered agent transfer scan check, management server id:" + _nodeId); + logger.trace("Clustered agent transfer scan check, management server id: {}", _nodeId); synchronized (_agentToTransferIds) { if (_agentToTransferIds.size() > 0) { - logger.debug("Found " + _agentToTransferIds.size() + " agents to transfer"); + logger.debug("Found {} agents to transfer", _agentToTransferIds.size()); // for (Long hostId : _agentToTransferIds) { for (final Iterator iterator = _agentToTransferIds.iterator(); iterator.hasNext();) { final Long hostId = iterator.next(); @@ -946,14 +942,14 @@ protected void runInContext() { final HostTransferMapVO transferMap = _hostTransferDao.findActiveHostTransferMapByHostId(hostId, new Date(cutTime.getTime() - rebalanceTimeOut)); if (transferMap == null) { - logger.debug("Timed out waiting for the host id=" + hostId + " to be ready to transfer, skipping rebalance for the host"); + logger.debug("Timed out waiting for the host id={} to be ready to transfer, skipping rebalance for the host" + hostId); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; } if (transferMap.getInitialOwner() != _nodeId || attache == null || attache.forForward()) { - logger.debug("Management server " + _nodeId + " doesn't own host id=" + hostId + " any more, skipping rebalance for the host"); + logger.debug("Management server {} doesn't own host id={} any more, skipping rebalance for the host", _nodeId, hostId); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; @@ -961,7 +957,7 @@ protected void runInContext() { final ManagementServerHostVO ms = _mshostDao.findByMsid(transferMap.getFutureOwner()); if (ms != null && ms.getState() != ManagementServerHost.State.Up) { - logger.debug("Can't transfer host " + hostId + " as it's future owner is not in UP state: " + ms + ", skipping rebalance for the host"); + logger.debug("Can't transfer host {} as it's future owner is not in UP state: {}, skipping rebalance for the host", hostId, ms); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; @@ -972,17 +968,17 @@ protected void runInContext() { try { _executor.execute(new RebalanceTask(hostId, transferMap.getInitialOwner(), transferMap.getFutureOwner())); } catch (final RejectedExecutionException ex) { - logger.warn("Failed to submit rebalance task for host id=" + hostId + "; postponing the execution"); + logger.warn("Failed to submit rebalance task for host id={}; postponing the execution", hostId); continue; } } else { - logger.debug("Agent " + hostId + " can't be transferred yet as its request queue size is " + attache.getQueueSize() + " and listener queue size is " - + attache.getNonRecurringListenersSize()); + logger.debug("Agent {} can't be transferred yet as its request queue size is {} and listener queue size is {}", + hostId, attache.getQueueSize(), attache.getNonRecurringListenersSize()); } } } else { - logger.trace("Found no agents to be transferred by the management server " + _nodeId); + logger.trace("Found no agents to be transferred by the management server {}", _nodeId); } } @@ -994,7 +990,7 @@ protected void runInContext() { } private boolean setToWaitForRebalance(final long hostId, final long currentOwnerId, final long futureOwnerId) { - logger.debug("Adding agent " + hostId + " to the list of agents to transfer"); + logger.debug("Adding agent {} to the list of agents to transfer", hostId); synchronized (_agentToTransferIds) { return _agentToTransferIds.add(hostId); } @@ -1016,22 +1012,22 @@ protected boolean rebalanceHost(final long hostId, final long currentOwnerId, fi } } catch (final Exception ex) { - logger.warn("Host " + hostId + " failed to connect to the management server " + futureOwnerId + " as a part of rebalance process", ex); + logger.warn("Host {} failed to connect to the management server {} as a part of rebalance process", hostId, futureOwnerId, ex); result = false; } if (result) { - logger.debug("Successfully transferred host id=" + hostId + " to management server " + futureOwnerId); + logger.debug("Successfully transferred host id={} to management server {}", hostId, futureOwnerId); finishRebalance(hostId, futureOwnerId, Event.RebalanceCompleted); } else { - logger.warn("Failed to transfer host id=" + hostId + " to management server " + futureOwnerId); + logger.warn("Failed to transfer host id={} to management server {}", hostId, futureOwnerId); finishRebalance(hostId, futureOwnerId, Event.RebalanceFailed); } } else if (futureOwnerId == _nodeId) { final HostVO host = _hostDao.findById(hostId); try { - logger.debug("Disconnecting host " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification"); + logger.debug("Disconnecting host {}({}) as a part of rebalance process without notification", host.getId(), host.getName()); final AgentAttache attache = findAttache(hostId); if (attache != null) { @@ -1039,22 +1035,21 @@ protected boolean rebalanceHost(final long hostId, final long currentOwnerId, fi } if (result) { - logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process"); + logger.debug("Loading directly connected host {}({}) to the management server {} as a part of rebalance process", host.getId(), host.getName(), _nodeId); result = loadDirectlyConnectedHost(host, true); } else { - logger.warn("Failed to disconnect " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification"); + logger.warn("Failed to disconnect {}({}) as a part of rebalance process without notification" + host.getId(), host.getName()); } } catch (final Exception ex) { - logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process due to:", - ex); + logger.warn("Failed to load directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId, ex); result = false; } if (result) { - logger.debug("Successfully loaded directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process"); + logger.debug("Successfully loaded directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId); } else { - logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process"); + logger.warn("Failed to load directly connected host {}({}) to the management server {} a part of rebalance process without notification", host.getId(), host.getName(), _nodeId); } } @@ -1064,11 +1059,11 @@ protected boolean rebalanceHost(final long hostId, final long currentOwnerId, fi protected void finishRebalance(final long hostId, final long futureOwnerId, final Event event) { final boolean success = event == Event.RebalanceCompleted ? true : false; - logger.debug("Finishing rebalancing for the agent " + hostId + " with event " + event); + logger.debug("Finishing rebalancing for the agent {} with event {}", hostId, event); final AgentAttache attache = findAttache(hostId); if (attache == null || !(attache instanceof ClusteredAgentAttache)) { - logger.debug("Unable to find forward attache for the host id=" + hostId + ", assuming that the agent disconnected already"); + logger.debug("Unable to find forward attache for the host id={} assuming that the agent disconnected already", hostId); _hostTransferDao.completeAgentTransfer(hostId); return; } @@ -1083,7 +1078,7 @@ protected void finishRebalance(final long hostId, final long futureOwnerId, fina // 2) Get all transfer requests and route them to peer Request requestToTransfer = forwardAttache.getRequestToTransfer(); while (requestToTransfer != null) { - logger.debug("Forwarding request " + requestToTransfer.getSequence() + " held in transfer attache " + hostId + " from the management server " + _nodeId + " to " + futureOwnerId); + logger.debug("Forwarding request {} held in transfer attache {} from the management server {} to {}", requestToTransfer.getSequence(), hostId, _nodeId, futureOwnerId); final boolean routeResult = routeToPeer(Long.toString(futureOwnerId), requestToTransfer.getBytes()); if (!routeResult) { logD(requestToTransfer.getBytes(), "Failed to route request to peer"); @@ -1092,23 +1087,23 @@ protected void finishRebalance(final long hostId, final long futureOwnerId, fina requestToTransfer = forwardAttache.getRequestToTransfer(); } - logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance to " + futureOwnerId); + logger.debug("Management server {} completed agent {} rebalance to {}", _nodeId, hostId, futureOwnerId); } else { failRebalance(hostId); } - logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance"); + logger.debug("Management server {} completed agent {} rebalance", _nodeId, hostId); _hostTransferDao.completeAgentTransfer(hostId); } protected void failRebalance(final long hostId) { try { - logger.debug("Management server " + _nodeId + " failed to rebalance agent " + hostId); + logger.debug("Management server {} failed to rebalance agent {}", _nodeId, hostId); _hostTransferDao.completeAgentTransfer(hostId); handleDisconnectWithoutInvestigation(findAttache(hostId), Event.RebalanceFailed, true, true); } catch (final Exception ex) { - logger.warn("Failed to reconnect host id=" + hostId + " as a part of failed rebalance task cleanup"); + logger.warn("Failed to reconnect host id={} as a part of failed rebalance task cleanup", hostId); } } @@ -1126,18 +1121,18 @@ protected boolean startRebalance(final long hostId) { handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true, true); final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(hostId); if (forwardAttache == null) { - logger.warn("Unable to create a forward attache for the host " + hostId + " as a part of rebalance process"); + logger.warn("Unable to create a forward attache for the host {} as a part of rebalance process", hostId); return false; } - logger.debug("Putting agent id=" + hostId + " to transfer mode"); + logger.debug("Putting agent id={} to transfer mode", hostId); forwardAttache.setTransferMode(true); _agents.put(hostId, forwardAttache); } else { if (attache == null) { - logger.warn("Attache for the agent " + hostId + " no longer exists on management server " + _nodeId + ", can't start host rebalancing"); + logger.warn("Attache for the agent {} no longer exists on management server, can't start host rebalancing", hostId, _nodeId); } else { - logger.warn("Attache for the agent " + hostId + " has request queue size= " + attache.getQueueSize() + " and listener queue size " + attache.getNonRecurringListenersSize() - + ", can't start host rebalancing"); + logger.warn("Attache for the agent {} has request queue size= {} and listener queue size {}, can't start host rebalancing", + hostId, attache.getQueueSize(), attache.getNonRecurringListenersSize()); } return false; } @@ -1173,25 +1168,24 @@ public RebalanceTask(final long hostId, final long currentOwnerId, final long fu @Override protected void runInContext() { try { - logger.debug("Rebalancing host id=" + hostId); + logger.debug("Rebalancing host id={}", hostId); rebalanceHost(hostId, currentOwnerId, futureOwnerId); } catch (final Exception e) { - logger.warn("Unable to rebalance host id=" + hostId, e); + logger.warn("Unable to rebalance host id={}", hostId, e); } } } private String handleScheduleHostScanTaskCommand(final ScheduleHostScanTaskCommand cmd) { - logger.debug("Intercepting resource manager command: " + _gson.toJson(cmd)); + logger.debug("Intercepting resource manager command: {}", _gson.toJson(cmd)); try { scheduleHostScanTask(); } catch (final Exception e) { // Scheduling host scan task in peer MS is a best effort operation during host add, regular host scan // happens at fixed intervals anyways. So handling any exceptions that may be thrown - logger.warn( - "Exception happened while trying to schedule host scan task on mgmt server " + _clusterMgr.getSelfPeerName() + ", ignoring as regular host scan happens at fixed interval anyways", - e); + logger.warn("Exception happened while trying to schedule host scan task on mgmt server {}, ignoring as regular host scan happens at fixed " + + "interval anyways", _clusterMgr.getSelfPeerName(), e); return null; } @@ -1217,7 +1211,7 @@ public String getName() { @Override public String dispatch(final ClusterServicePdu pdu) { - logger.debug("Dispatch ->" + pdu.getAgentId() + ", json: " + pdu.getJsonPackage()); + logger.debug("Dispatch ->{}, json: {}", pdu.getAgentId(), pdu.getJsonPackage()); Command[] cmds = null; try { @@ -1230,11 +1224,11 @@ public String dispatch(final ClusterServicePdu pdu) { if (cmds.length == 1 && cmds[0] instanceof ChangeAgentCommand) { // intercepted final ChangeAgentCommand cmd = (ChangeAgentCommand)cmds[0]; - logger.debug("Intercepting command for agent change: agent " + cmd.getAgentId() + " event: " + cmd.getEvent()); + logger.debug("Intercepting command for agent change: agent {} event: {}", cmd.getAgentId(), cmd.getEvent()); boolean result = false; try { result = executeAgentUserRequest(cmd.getAgentId(), cmd.getEvent()); - logger.debug("Result is " + result); + logger.debug("Result is {}", result); } catch (final AgentUnavailableException e) { logger.warn("Agent is unavailable", e); @@ -1247,11 +1241,11 @@ public String dispatch(final ClusterServicePdu pdu) { } else if (cmds.length == 1 && cmds[0] instanceof TransferAgentCommand) { final TransferAgentCommand cmd = (TransferAgentCommand)cmds[0]; - logger.debug("Intercepting command for agent rebalancing: agent " + cmd.getAgentId() + " event: " + cmd.getEvent()); + logger.debug("Intercepting command for agent rebalancing: agent {} event: {}", cmd.getAgentId(), cmd.getEvent()); boolean result = false; try { result = rebalanceAgent(cmd.getAgentId(), cmd.getEvent(), cmd.getCurrentOwner(), cmd.getFutureOwner()); - logger.debug("Result is " + result); + logger.debug("Result is {}", result); } catch (final AgentUnavailableException e) { logger.warn("Agent is unavailable", e); @@ -1266,12 +1260,12 @@ public String dispatch(final ClusterServicePdu pdu) { } else if (cmds.length == 1 && cmds[0] instanceof PropagateResourceEventCommand) { final PropagateResourceEventCommand cmd = (PropagateResourceEventCommand)cmds[0]; - logger.debug("Intercepting command to propagate event " + cmd.getEvent().name() + " for host " + cmd.getHostId()); + logger.debug("Intercepting command to propagate event {} for host {}", cmd.getEvent().name(), cmd.getHostId()); boolean result = false; try { result = _resourceMgr.executeUserRequest(cmd.getHostId(), cmd.getEvent()); - logger.debug("Result is " + result); + logger.debug("Result is {}", result); } catch (final AgentUnavailableException ex) { logger.warn("Agent is unavailable", ex); return null; @@ -1290,19 +1284,19 @@ public String dispatch(final ClusterServicePdu pdu) { try { final long startTick = System.currentTimeMillis(); - logger.debug("Dispatch -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage()); + logger.debug("Dispatch -> {}, json: {}", pdu.getAgentId(), pdu.getJsonPackage()); final Answer[] answers = sendToAgent(pdu.getAgentId(), cmds, pdu.isStopOnError()); if (answers != null) { final String jsonReturn = _gson.toJson(answers); - logger.debug("Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " + (System.currentTimeMillis() - startTick) + " ms, return result: " - + jsonReturn); + logger.debug("Completed dispatching -> {}, json: {} in {} ms, return result: {}", pdu.getAgentId(), + pdu.getJsonPackage(), (System.currentTimeMillis() - startTick), jsonReturn); return jsonReturn; } else { - logger.debug( - "Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " + (System.currentTimeMillis() - startTick) + " ms, return null result"); + logger.debug("Completed dispatching -> {}, json: {} in {} ms, return null result", pdu.getAgentId(), + pdu.getJsonPackage(), (System.currentTimeMillis() - startTick)); } } catch (final AgentUnavailableException e) { logger.warn("Agent is unavailable", e); @@ -1362,7 +1356,7 @@ private Runnable getAgentRebalanceScanTask() { @Override protected void runInContext() { try { - logger.trace("Agent rebalance task check, management server id:" + _nodeId); + logger.trace("Agent rebalance task check, management server id:{}", _nodeId); // initiate agent lb task will be scheduled and executed only once, and only when number of agents // loaded exceeds _connectedAgentsThreshold if (!_agentLbHappened) { @@ -1379,11 +1373,11 @@ protected void runInContext() { if (allHostsCount > 0.0) { final double load = managedHostsCount / allHostsCount; if (load > ConnectedAgentThreshold.value()) { - logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " + ConnectedAgentThreshold.value()); + logger.debug("Scheduling agent rebalancing task as the average agent load {} is more than the threshold {}", load, ConnectedAgentThreshold.value()); scheduleRebalanceAgents(); _agentLbHappened = true; } else { - logger.debug("Not scheduling agent rebalancing task as the average load " + load + " has not crossed the threshold " + ConnectedAgentThreshold.value()); + logger.debug("Not scheduling agent rebalancing task as the average load {} has not crossed the threshold", load, ConnectedAgentThreshold.value()); } } } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java index 81c026348c8c..c8e24301b292 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java @@ -98,7 +98,7 @@ protected void finalize() throws Throwable { assert _link == null : "Duh...Says you....Forgot to call disconnect()!"; synchronized (this) { if (_link != null) { - logger.warn("Lost attache " + _id + "(" + _name + ")"); + logger.warn("Lost attache {} ({})", _id, _name); disconnect(Status.Alert); } } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java index 1e51e9ef55dd..c7ba7e3fcacd 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java @@ -115,7 +115,7 @@ public void process(Answer[] answers) { if (answers != null && answers[0] instanceof StartupAnswer) { StartupAnswer startup = (StartupAnswer)answers[0]; int interval = startup.getPingInterval(); - logger.info("StartupAnswer received " + startup.getHostId() + " Interval = " + interval); + logger.info("StartupAnswer received {} Interval = {}", startup.getHostId(), interval); _futures.add(_agentMgr.getCronJobPool().scheduleAtFixedRate(new PingTask(), interval, interval, TimeUnit.SECONDS)); } } @@ -126,7 +126,7 @@ protected void finalize() throws Throwable { assert _resource == null : "Come on now....If you're going to dabble in agent code, you better know how to close out our resources. Ever considered why there's a method called disconnect()?"; synchronized (this) { if (_resource != null) { - logger.warn("Lost attache for " + _id + "(" + _name + ")"); + logger.warn("Lost attache for {}({})", _id, _name); disconnect(Status.Alert); } } @@ -140,7 +140,7 @@ private synchronized void queueTask(Task task) { } private synchronized void scheduleFromQueue() { - logger.trace("Agent attache=" + _id + ", task queue size=" + tasks.size() + ", outstanding tasks=" + _outstandingTaskCount.get()); + logger.trace("Agent attache={}, task queue size={}, outstanding tasks={}", _id, tasks.size(), _outstandingTaskCount.get()); while (!tasks.isEmpty() && _outstandingTaskCount.get() < _agentMgr.getDirectAgentThreadCap()) { _outstandingTaskCount.incrementAndGet(); _agentMgr.getDirectAgentPool().execute(tasks.remove()); @@ -152,7 +152,7 @@ protected class PingTask extends ManagedContextRunnable { protected synchronized void runInContext() { try { if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) { - logger.warn("PingTask execution for direct attache(" + _id + ") has reached maximum outstanding limit(" + _agentMgr.getDirectAgentThreadCap() + "), bailing out"); + logger.warn("PingTask execution for direct attache({}) has reached maximum outstanding limit({}), bailing out", _id, _agentMgr.getDirectAgentThreadCap()); return; } @@ -167,21 +167,21 @@ protected synchronized void runInContext() { } if (cmd == null) { - logger.warn("Unable to get current status on " + _id + "(" + _name + ")"); + logger.warn("Unable to get current status on {}({})", _id, _name); return; } if (cmd.getContextParam("logid") != null) { ThreadContext.put("logcontextid", cmd.getContextParam("logid")); } - logger.debug("Ping from " + _id + "(" + _name + ")"); + logger.debug("Ping from {}({})", _id, _name); long seq = _seq++; - logger.trace("SeqA " + _id + "-" + seq + ": " + new Request(_id, -1, cmd, false).toString()); + logger.trace("SeqA {}-{}: {}", _id, seq, new Request(_id, -1, cmd, false).toString()); _agentMgr.handleCommands(DirectAgentAttache.this, seq, new Command[] {cmd}); } else { - logger.debug("Unable to send ping because agent is disconnected " + _id + "(" + _name + ")"); + logger.debug("Unable to send ping because agent is disconnected {}", _id, _name); } } catch (Exception e) { logger.warn("Unable to complete the ping task", e); @@ -210,7 +210,7 @@ private void bailout() { Response resp = new Response(_req, answers.toArray(new Answer[answers.size()])); processAnswers(seq, resp); } catch (Exception e) { - logger.warn(log(seq, "Exception caught in bailout "), e); + logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Exception caught in bailout ", e); } } @@ -219,7 +219,7 @@ protected void runInContext() { long seq = _req.getSequence(); try { if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) { - logger.warn("CronTask execution for direct attache(" + _id + ") has reached maximum outstanding limit(" + _agentMgr.getDirectAgentThreadCap() + "), bailing out"); + logger.warn("CronTask execution for direct attache({}) has reached maximum outstanding limit({}), bailing out", _id, _agentMgr.getDirectAgentThreadCap()); bailout(); return; } @@ -228,7 +228,7 @@ protected void runInContext() { Command[] cmds = _req.getCommands(); boolean stopOnError = _req.stopOnError(); - logger.debug(log(seq, "Executing request")); + logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Executing request"); ArrayList answers = new ArrayList(cmds.length); for (int i = 0; i < cmds.length; i++) { Answer answer = null; @@ -247,24 +247,24 @@ protected void runInContext() { answer = new Answer(cmds[i], false, "Agent is disconnected"); } } catch (Exception e) { - logger.warn(log(seq, "Exception Caught while executing command"), e); + logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Exception Caught while executing command", e); answer = new Answer(cmds[i], false, e.toString()); } answers.add(answer); if (!answer.getResult() && stopOnError) { if (i < cmds.length - 1 && logger.isDebugEnabled()) { - logger.debug(log(seq, "Cancelling because one of the answers is false and it is stop on error.")); + logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Cancelling because one of the answers is false and it is stop on error."); } break; } } Response resp = new Response(_req, answers.toArray(new Answer[answers.size()])); - logger.debug(log(seq, "Response Received: ")); + logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Response Received: "); processAnswers(seq, resp); } catch (Exception e) { - logger.warn(log(seq, "Exception caught "), e); + logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Exception caught ", e); } finally { _outstandingCronTaskCount.decrementAndGet(); } @@ -286,7 +286,7 @@ protected void runInContext() { Command[] cmds = _req.getCommands(); boolean stopOnError = _req.stopOnError(); - logger.debug(log(seq, "Executing request")); + logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Executing request"); ArrayList answers = new ArrayList(cmds.length); for (int i = 0; i < cmds.length; i++) { Answer answer = null; @@ -306,25 +306,25 @@ protected void runInContext() { } } catch (Throwable t) { // Catch Throwable as all exceptions will otherwise be eaten by the executor framework - logger.warn(log(seq, "Throwable caught while executing command"), t); + logger.warn(LOG_SEQ_FORMATTED_STRING, seq, "Throwable caught while executing command", t); answer = new Answer(cmds[i], false, t.toString()); } answers.add(answer); if (!answer.getResult() && stopOnError) { if (i < cmds.length - 1 && logger.isDebugEnabled()) { - logger.debug(log(seq, "Cancelling because one of the answers is false and it is stop on error.")); + logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Cancelling because one of the answers is false and it is stop on error."); } break; } } Response resp = new Response(_req, answers.toArray(new Answer[answers.size()])); - logger.debug(log(seq, "Response Received: ")); + logger.debug(LOG_SEQ_FORMATTED_STRING, seq, "Response Received: "); processAnswers(seq, resp); } catch (Throwable t) { // This is pretty serious as processAnswers might not be called and the calling process is stuck waiting for the full timeout - logger.error(log(seq, "Throwable caught in runInContext, this will cause the management to become unpredictable"), t); + logger.error(LOG_SEQ_FORMATTED_STRING, seq, "Throwable caught in runInContext, this will cause the management to become unpredictable", t); } finally { _outstandingTaskCount.decrementAndGet(); scheduleFromQueue(); From 2cedec553822ca97a45b84fc15a507a6979d0a75 Mon Sep 17 00:00:00 2001 From: "klaus.freitas.scclouds" Date: Mon, 4 Mar 2024 10:19:24 -0300 Subject: [PATCH 3/6] some more log refactoring for cloud-engine-orchestration module --- .../agent/manager/ClusteredAgentAttache.java | 2 +- .../agent/manager/DirectAgentAttache.java | 2 +- .../agent/manager/SynchronousListener.java | 4 +- .../cloud/vm/VirtualMachineManagerImpl.java | 306 +++++++++--------- 4 files changed, 149 insertions(+), 165 deletions(-) diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java index 41aa019da87e..285ba4ffe60f 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java @@ -105,7 +105,7 @@ public void cancel(final long seq) { @Override public void routeToAgent(final byte[] data) throws AgentUnavailableException { - logger.debug(LOG_SEQ_FORMATTED_STRING, Request.getSequence(data), "Routing from " + Request.getManagementServerId(data))); + logger.debug(LOG_SEQ_FORMATTED_STRING, Request.getSequence(data), "Routing from " + Request.getManagementServerId(data)); if (_link == null) { logger.debug(LOG_SEQ_FORMATTED_STRING, Request.getSequence(data), "Link is closed"); diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java index c7ba7e3fcacd..927da34104ff 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java @@ -60,7 +60,7 @@ public DirectAgentAttache(AgentManagerImpl agentMgr, long id, String name, Serve @Override public void disconnect(Status state) { - logger.debug("Processing disconnect " + _id + "(" + _name + ")"); + logger.debug("Processing disconnect {}({})", _id, _name); for (ScheduledFuture future : _futures) { future.cancel(false); diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/SynchronousListener.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/SynchronousListener.java index d97aed2f3fbe..0e8aff2bccf9 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/SynchronousListener.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/SynchronousListener.java @@ -71,7 +71,7 @@ public synchronized boolean processAnswers(long agentId, long seq, Answer[] resp @Override public synchronized boolean processDisconnect(long agentId, Status state) { - logger.trace("Agent disconnected, agent id: " + agentId + ", state: " + state + ". Will notify waiters"); + logger.trace("Agent disconnected, agent id: {}, state: {}. Will notify waiters", agentId, state); _disconnected = true; notifyAll(); @@ -127,7 +127,7 @@ public synchronized Answer[] waitFor(int s) throws InterruptedException { } profiler.stop(); - logger.trace("Synchronized command - sending completed, time: " + profiler.getDurationInMillis() + ", answer: " + + logger.trace("Synchronized command - sending completed, time: {}, answer: {}", profiler.getDurationInMillis(), (_answers != null ? _answers[0].toString() : "null")); return _answers; } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 2a0b7e700e48..a910f91cbab9 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -480,7 +480,7 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t } final Long rootDiskSizeFinal = rootDiskSize; - logger.debug("Allocating nics for " + persistedVm); + logger.debug("Allocating nics for {}", persistedVm); try { if (!vmProfile.getBootArgs().contains("ExternalLoadBalancerVm")) { @@ -490,7 +490,7 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t throw new CloudRuntimeException("Concurrent operation while trying to allocate resources for the VM", e); } - logger.debug("Allocating disks for " + persistedVm); + logger.debug("Allocating disks for {}", persistedVm); allocateRootVolume(persistedVm, template, rootDiskOfferingInfo, owner, rootDiskSizeFinal); @@ -669,7 +669,7 @@ protected void advanceExpunge(VMInstanceVO vm) throws ResourceUnavailableExcepti if (!cmds.isSuccessful()) { for (final Answer answer : cmds.getAnswers()) { if (!answer.getResult()) { - logger.warn("Failed to expunge vm due to: " + answer.getDetails()); + logger.warn("Failed to expunge vm due to: {}", answer.getDetails()); throw new CloudRuntimeException("Unable to expunge " + vm + " due to " + answer.getDetails()); } } @@ -769,12 +769,14 @@ private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { Answer answer = _agentMgr.easySend(hostId, cmd); if (answer == null) { - logger.warn("Unable to get an answer to the modify targets command. Targets [{}].", () -> cmd.getTargets().stream().map(target -> target.toString()).collect(Collectors.joining(", "))); + logger.warn("Unable to get an answer to the modify targets command. Targets [{}].", + () -> cmd.getTargets().stream().map(target -> target.toString()).collect(Collectors.joining(", "))); return; } if (!answer.getResult()) { - logger.warn("Unable to modify targets [{}] on the host [{}].", () -> cmd.getTargets().stream().map(target -> target.toString()).collect(Collectors.joining(", ")), () -> hostId); + logger.warn("Unable to modify targets [{}] on the host [{}].", + () -> cmd.getTargets().stream().map(target -> target.toString()).collect(Collectors.joining(", ")), () -> hostId); } } @@ -837,33 +839,33 @@ protected boolean checkWorkItems(final VMInstanceVO vm, final State state) throw while (true) { final ItWorkVO vo = _workDao.findByOutstandingWork(vm.getId(), state); if (vo == null) { - logger.debug("Unable to find work for VM: " + vm + " and state: " + state); + logger.debug("Unable to find work for VM: {} and state: {}", vm, state); return true; } if (vo.getStep() == Step.Done) { - logger.debug("Work for " + vm + " is " + vo.getStep()); + logger.debug("Work for {} is {}", vm, vo.getStep()); return true; } final VMInstanceVO instance = _vmDao.findById(vm.getId()); if (instance != null && instance.getState() == State.Running) { - logger.debug("VM is already started in DB: " + vm); + logger.debug("VM is already started in DB: {}", vm); return true; } if (vo.getSecondsTaskIsInactive() > VmOpCancelInterval.value()) { - logger.warn("The task item for vm " + vm + " has been inactive for " + vo.getSecondsTaskIsInactive()); + logger.warn("The task item for vm {} has been inactive for {}", vm, vo.getSecondsTaskIsInactive()); return false; } try { Thread.sleep(VmOpWaitInterval.value()*1000); } catch (final InterruptedException e) { - logger.info("Waiting for " + vm + " but is interrupted"); + logger.info("Waiting for {} but is interrupted", vm); throw new ConcurrentOperationException("Waiting for " + vm + " but is interrupted"); } - logger.debug("Waiting some more to make sure there's no activity on " + vm); + logger.debug("Waited some more to make sure there's no activity on " + vm); } } @@ -887,7 +889,7 @@ public Ternary doInTransaction(final final ReservationContextImpl context = new ReservationContextImpl(work.getId(), journal, caller, account); if (stateTransitTo(vm, Event.StartRequested, null, work.getId())) { - logger.debug("Successfully transitioned to start state for " + vm + " reservation id = " + work.getId()); + logger.debug("Successfully transitioned to start state for {} reservation id = {}", vm, work.getId()); return new Ternary<>(vm, context, work); } @@ -969,7 +971,7 @@ public void advanceStart(final String vmUuid, final Map":params.get(VirtualMachineProfile.Param.BootIntoSetup))); @@ -1085,8 +1087,8 @@ public void orchestrateStart(final String vmUuid, final Map vols = _volsDao.findReadyRootVolumesByInstance(vm.getId()); for (final VolumeVO vol : vols) { final Long volTemplateId = vol.getTemplateId(); if (volTemplateId != null && volTemplateId != template.getId()) { - logger.debug(vol + " of " + vm + " is READY, but template ids don't match, let the planner reassign a new pool"); + logger.debug("{} of {} is READY, but template ids don't match, let the planner reassign a new pool", vol, vm); continue; } @@ -1154,8 +1156,8 @@ public void orchestrateStart(final String vmUuid, final Map(ipAddressDetails.values()), CAManager.CertValidityPeriod.value(), null); final boolean result = caManager.deployCertificate(vmHost, certificate, false, sshAccessDetails); if (!result) { - logger.error("Failed to setup certificate for system vm: " + vm.getInstanceName()); + logger.error("Failed to setup certificate for system vm: {}", vm.getInstanceName()); } return; } catch (final Exception e) { - logger.error("Retrying after catching exception while trying to secure agent for systemvm id=" + vm.getId(), e); + logger.error("Retrying after catching exception while trying to secure agent for systemvm id={}", vm.getId(), e); } } throw new CloudRuntimeException("Failed to setup and secure agent for systemvm id=" + vm.getId()); @@ -1332,20 +1334,20 @@ public void orchestrateStart(final String vmUuid, final Map volumeToPoolMap = prepareVmStorageMigration(vm, volumeToPool); try { - logger.debug(String.format("Offline migration of %s vm %s with volumes", + logger.debug("Offline migration of {} vm {} with volumes", vm.getHypervisorType().toString(), - vm.getInstanceName())); + vm.getInstanceName()); migrateThroughHypervisorOrStorage(vm, volumeToPoolMap); @@ -2337,18 +2333,14 @@ private Answer[] attemptHypervisorMigration(VMInstanceVO vm, Map volumeToPool, Long sourceClusterId, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException { - boolean isDebugEnabled = logger.isDebugEnabled(); - if(isDebugEnabled) { - String msg = String.format("Cleaning up after hypervisor pool migration volumes for VM %s(%s)", vm.getInstanceName(), vm.getUuid()); - logger.debug(msg); - } + logger.debug("Cleaning up after hypervisor pool migration volumes for VM %s(%s)", vm.getInstanceName(), vm.getUuid()); StoragePool rootVolumePool = null; if (MapUtils.isNotEmpty(volumeToPool)) { @@ -2362,10 +2354,7 @@ private void afterHypervisorMigrationCleanup(VMInstanceVO vm, Map volumes = _volsDao.findCreatedByInstance(vm.getId()); for (final VolumeVO volume : volumes) { if (!_storagePoolDao.findById(volume.getPoolId()).getScope().equals(ScopeType.ZONE)) { - logger.info("Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: " - + dest.getHost().getId()); + logger.info("Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: {}", + dest.getHost().getId()); throw new CloudRuntimeException( "Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: " + dest.getHost().getId()); @@ -2618,7 +2607,7 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy final VirtualMachineGuru vmGuru = getVmGuru(vm); if (vm.getState() != State.Running) { - logger.debug("VM is not Running, unable to migrate the vm " + vm); + logger.debug("VM is not Running, unable to migrate the vm {}", vm); throw new CloudRuntimeException("VM is not Running, unable to migrate the vm currently " + vm + " , current state: " + vm.getState().toString()); } @@ -2679,18 +2668,18 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy volumeMgr.release(vm.getId(), dstHostId); } - logger.info("Migration cancelled because state has changed: " + vm); + logger.info("Migration cancelled because state has changed: {}", vm); throw new ConcurrentOperationException("Migration cancelled because state has changed: " + vm); } } catch (final NoTransitionException e1) { _networkMgr.rollbackNicForMigration(vmSrc, profile); volumeMgr.release(vm.getId(), dstHostId); - logger.info("Migration cancelled because " + e1.getMessage()); + logger.info("Migration cancelled because {}", e1.getMessage()); throw new ConcurrentOperationException("Migration cancelled because " + e1.getMessage()); } catch (final CloudRuntimeException e2) { _networkMgr.rollbackNicForMigration(vmSrc, profile); volumeMgr.release(vm.getId(), dstHostId); - logger.info("Migration cancelled because " + e2.getMessage()); + logger.info("Migration cancelled because {}", e2.getMessage()); work.setStep(Step.Done); _workDao.update(work.getId(), work); try { @@ -2714,7 +2703,7 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy } } catch (final OperationTimedoutException e) { if (e.isActive()) { - logger.warn("Active migration command so scheduling a restart for " + vm, e); + logger.warn("Active migration command so scheduling a restart for {}", vm, e); _haMgr.scheduleRestart(vm, true); } throw new AgentUnavailableException("Operation timed out on migrating " + vm, dstHostId); @@ -2730,22 +2719,22 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy try { if (!checkVmOnHost(vm, dstHostId)) { - logger.error("Unable to complete migration for " + vm); + logger.error("Unable to complete migration for {}", vm); try { _agentMgr.send(srcHostId, new Commands(cleanup(vm, dpdkInterfaceMapping)), null); } catch (final AgentUnavailableException e) { - logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId, e); + logger.error("AgentUnavailableException while cleanup on source host: {}", srcHostId, e); } cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true); throw new CloudRuntimeException("Unable to complete migration for " + vm); } } catch (final OperationTimedoutException e) { - logger.warn("Error while checking the vm " + vm + " on host " + dstHostId, e); + logger.warn("Error while checking the vm {} on host {}", vm, dstHostId, e); } migrated = true; } finally { if (!migrated) { - logger.info("Migration was unsuccessful. Cleaning up: " + vm); + logger.info("Migration was unsuccessful. Cleaning up: {}", vm); _networkMgr.rollbackNicForMigration(vmSrc, profile); volumeMgr.release(vm.getId(), dstHostId); @@ -3167,9 +3156,9 @@ private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHo AttachOrDettachConfigDriveCommand dettachCommand = new AttachOrDettachConfigDriveCommand(vm.getInstanceName(), vmData, VmConfigDriveLabel.value(), false); try { _agentMgr.send(srcHost.getId(), dettachCommand); - logger.debug("Deleted config drive ISO for vm " + vm.getInstanceName() + " In host " + srcHost); + logger.debug("Deleted config drive ISO for vm {} in host {}", vm.getInstanceName(), srcHost); } catch (OperationTimedoutException e) { - logger.error("TIme out occurred while exeuting command AttachOrDettachConfigDrive " + e.getMessage(), e); + logger.error("TIme out occurred while exeuting command AttachOrDettachConfigDrive {}", e.getMessage(), e); } } @@ -3181,22 +3170,22 @@ private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHo try { if (!checkVmOnHost(vm, destHostId)) { - logger.error("Vm not found on destination host. Unable to complete migration for " + vm); + logger.error("Vm not found on destination host. Unable to complete migration for {}", vm); try { _agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null); } catch (final AgentUnavailableException e) { - logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId, e); + logger.error("AgentUnavailableException while cleanup on source host: {}", srcHostId, e); } cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true); throw new CloudRuntimeException("VM not found on destination host. Unable to complete migration for " + vm); } } catch (final OperationTimedoutException e) { - logger.error("Error while checking the vm " + vm + " is on host " + destHost, e); + logger.error("Error while checking the vm {} is on host {}", vm, destHost, e); } migrated = true; } finally { if (!migrated) { - logger.info("Migration was unsuccessful. Cleaning up: " + vm); + logger.info("Migration was unsuccessful. Cleaning up: {}", vm); _networkMgr.rollbackNicForMigration(vmSrc, profile); volumeMgr.release(vm.getId(), destHostId); @@ -3239,7 +3228,7 @@ protected void cancelWorkItems(final long nodeId) { try { final List works = _workDao.listWorkInProgressFor(nodeId); for (final ItWorkVO work : works) { - logger.info("Handling unfinished work item: " + work); + logger.info("Handling unfinished work item: {}", work); try { final VMInstanceVO vm = _vmDao.findById(work.getInstanceId()); if (vm != null) { @@ -3260,7 +3249,7 @@ protected void cancelWorkItems(final long nodeId) { } } } catch (final Exception e) { - logger.error("Error while handling " + work, e); + logger.error("Error while handling {}", work, e); } } } finally { @@ -3282,7 +3271,7 @@ public void migrateAway(final String vmUuid, final long srcHostId) throws Insuff try { orchestrateMigrateAway(vmUuid, srcHostId, null); } catch (final InsufficientServerCapacityException e) { - logger.warn("Failed to deploy vm " + vmUuid + " with original planner, sending HAPlanner"); + logger.warn("Failed to deploy vm {} with original planner, sending HAPlanner", vmUuid); orchestrateMigrateAway(vmUuid, srcHostId, _haMgr.getHAPlanner()); } } finally { @@ -3345,10 +3334,10 @@ private void orchestrateMigrateAway(final String vmUuid, final long srcHostId, f throw new CloudRuntimeException(message, e2); } if (dest == null) { - logger.warn("Unable to find destination for migrating the vm " + profile); + logger.warn("Unable to find destination for migrating the vm {}", profile); throw new InsufficientServerCapacityException("Unable to find a server to migrate to.", DataCenter.class, host.getDataCenterId()); } - logger.debug("Found destination " + dest + " for migrating to."); + logger.debug("Found destination {} for migrating to.", dest); excludes.addHost(dest.getHost().getId()); try { @@ -3471,7 +3460,7 @@ private void orchestrateReboot(final String vmUuid, final Map nicDetails = nic.getDetails() == null ? new HashMap<>() : nic.getDetails(); - logger.debug("Found PVLAN type: " + pvlanTypeDetail.getValue() + " on network details, adding it as part of the PlugNicCommand"); + logger.debug("Found PVLAN type: {} on network details, adding it as part of the PlugNicCommand", pvlanTypeDetail.getValue()); nicDetails.putIfAbsent(NetworkOffering.Detail.pvlanType, pvlanTypeDetail.getValue()); nic.setDetails(nicDetails); } @@ -4442,7 +4428,7 @@ public boolean plugNic(final Network network, final NicTO nic, final VirtualMach _agentMgr.send(dest.getHost().getId(), cmds); final PlugNicAnswer plugNicAnswer = cmds.getAnswer(PlugNicAnswer.class); if (plugNicAnswer == null || !plugNicAnswer.getResult()) { - logger.warn("Unable to plug nic for vm " + vm.getName()); + logger.warn("Unable to plug nic for vm {}", vm.getName()); result = false; } } catch (final OperationTimedoutException e) { @@ -4482,14 +4468,14 @@ public boolean unplugNic(final Network network, final NicTO nic, final VirtualMa final UnPlugNicAnswer unplugNicAnswer = cmds.getAnswer(UnPlugNicAnswer.class); if (unplugNicAnswer == null || !unplugNicAnswer.getResult()) { - logger.warn("Unable to unplug nic from router " + router); + logger.warn("Unable to unplug nic from router {}", router); result = false; } } catch (final OperationTimedoutException e) { throw new AgentUnavailableException("Unable to unplug nic from rotuer " + router + " from network " + network, dest.getHost().getId(), e); } } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - logger.debug("Vm " + router.getInstanceName() + " is in " + router.getState() + ", so not sending unplug nic command to the backend"); + logger.debug("Vm {} is in {}, so not sending unplug nic command to the backend", router.getInstanceName(), router.getState()); } else { String message = String.format("Unable to apply unplug nic, VM [%s] is not in the right state (\"Running\"). VM state [%s].", router.toString(), router.getState()); logger.warn(message); @@ -4578,7 +4564,7 @@ private VMInstanceVO orchestrateReConfigureVm(String vmUuid, ServiceOffering old Answer reconfigureAnswer = _agentMgr.send(vm.getHostId(), scaleVmCommand); if (reconfigureAnswer == null || !reconfigureAnswer.getResult()) { - logger.error("Unable to scale vm due to " + (reconfigureAnswer == null ? "" : reconfigureAnswer.getDetails())); + logger.error("Unable to scale vm due to {}", (reconfigureAnswer == null ? "" : reconfigureAnswer.getDetails())); throw new CloudRuntimeException("Unable to scale vm due to " + (reconfigureAnswer == null ? "" : reconfigureAnswer.getDetails())); } @@ -4684,10 +4670,10 @@ protected void HandlePowerStateReport(final String subject, final String senderA break; } } else { - logger.warn("VM " + vmId + " no longer exists when processing VM state report"); + logger.warn("VM {} no longer exists when processing VM state report.", vmId); } } else { - logger.info("There is pending job or HA tasks working on the VM. vm id: " + vmId + ", postpone power-change report by resetting power-change counters"); + logger.info("There is pending job or HA tasks working on the VM. vm id: {}, postpone power-change report by resetting power-change counters.", vmId ); _vmDao.resetVmPowerStateTracking(vmId); } } @@ -4698,7 +4684,7 @@ private void handlePowerOnReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { switch (vm.getState()) { case Starting: - logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); + logger.info("VM {} is at {} and we received a power-on report while there is no pending jobs on it.", vm.getInstanceName(), vm.getState()); try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); @@ -4706,7 +4692,7 @@ private void handlePowerOnReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { logger.warn("Unexpected VM state transition exception, race-condition?", e); } - logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); + logger.info("VM {} is sync-ed to at Running state according to power-on report from hypervisor.", vm.getInstanceName()); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(), VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() @@ -4727,7 +4713,7 @@ private void handlePowerOnReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { case Stopping: case Stopped: - logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); + logger.info("VM {} is at {} and we received a power-on report while there is no pending jobs on it.", vm.getInstanceName(), vm.getState()); try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); @@ -4740,29 +4726,27 @@ private void handlePowerOnReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, vm.getDomainId(), EventTypes.EVENT_VM_START, "Out of band VM power on", vm.getId(), ApiCommandResourceType.VirtualMachine.toString()); - logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); + logger.info("VM {} is sync-ed to at Running state according to power-on report from hypervisor.", vm.getInstanceName()); break; case Destroyed: case Expunging: - logger.info("Receive power on report when VM is in destroyed or expunging state. vm: " - + vm.getId() + ", state: " + vm.getState()); + logger.info("Receive power on report when VM is in destroyed or expunging state. vm: {}, state: {}.", vm.getId(), vm.getState()); break; case Migrating: - logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); + logger.info("VM {} is at {} and we received a power-on report while there is no pending jobs on it.", vm.getInstanceName(), vm.getState()); try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { logger.warn("Unexpected VM state transition exception, race-condition?", e); } - logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); + logger.info("VM {} is sync-ed to at Running state according to power-on report from hypervisor.", vm.getInstanceName()); break; case Error: default: - logger.info("Receive power on report when VM is in error or unexpected state. vm: " - + vm.getId() + ", state: " + vm.getState()); + logger.info("Receive power on report when VM is in error or unexpected state. vm: {}, state: {}.", vm.getId(), vm.getState()); break; } } @@ -4782,11 +4766,11 @@ private void handlePowerOffReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { && HaVmRestartHostUp.value() && vm.getHypervisorType() != HypervisorType.VMware && vm.getHypervisorType() != HypervisorType.Hyperv) { - logger.info("Detected out-of-band stop of a HA enabled VM " + vm.getInstanceName() + ", will schedule restart"); + logger.info("Detected out-of-band stop of a HA enabled VM {}, will schedule restart.", vm.getInstanceName()); if (!_haMgr.hasPendingHaWork(vm.getId())) { _haMgr.scheduleRestart(vm, true); } else { - logger.info("VM " + vm.getInstanceName() + " already has an pending HA task working on it"); + logger.info("VM {} already has a pending HA task working on it.", vm.getInstanceName()); } return; } @@ -4816,7 +4800,7 @@ private void handlePowerOffReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") state is sync-ed (" + vm.getState() + " -> Stopped) from out-of-context transition."); - logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Stopped state according to power-off report from hypervisor"); + logger.info("VM {} is sync-ed to at Stopped state according to power-off report from hypervisor.", vm.getInstanceName()); break; @@ -5378,7 +5362,7 @@ private Pair orchestrateMigrateAway(final VmWorkMigrateA try { orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), null); } catch (final InsufficientServerCapacityException e) { - logger.warn("Failed to deploy vm " + vm.getId() + " with original planner, sending HAPlanner", e); + logger.warn("Failed to deploy vm {} with original planner, sending HAPlanner.", vm.getId(), e); orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), _haMgr.getHAPlanner()); } From af2bfb7383509aec2f20c728e0a0372ef1db2ba3 Mon Sep 17 00:00:00 2001 From: Klaus de Freitas Dornsbach Date: Fri, 19 Apr 2024 12:29:26 -0300 Subject: [PATCH 4/6] addressing comments --- .../cloud/vm/VirtualMachineManagerImpl.java | 32 +- .../vm/VirtualMachinePowerStateSyncImpl.java | 39 ++- .../com/cloud/vm/VmWorkJobDispatcher.java | 20 +- .../orchestration/NetworkOrchestrator.java | 300 +++++++++--------- .../orchestration/StorageOrchestrator.java | 10 +- 5 files changed, 196 insertions(+), 205 deletions(-) diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 8414fad01edc..824b9f5f45d6 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -1343,7 +1343,7 @@ public void orchestrateStart(final String vmUuid, final Map vlanToPersistenceMap = getVlanToPersistenceMapForVM(vm.getId()); @@ -2429,7 +2429,7 @@ private Answer[] attemptHypervisorMigration(VMInstanceVO vm, Map volumeToPool, Long sourceClusterId, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException { - logger.debug("Cleaning up after hypervisor pool migration volumes for VM %s(%s)", vm.getInstanceName(), vm.getUuid()); + logger.debug("Cleaning up after hypervisor pool migration volumes for VM {}({})", vm.getInstanceName(), vm.getUuid()); StoragePool rootVolumePool = null; if (MapUtils.isNotEmpty(volumeToPool)) { @@ -2443,7 +2443,7 @@ private void afterHypervisorMigrationCleanup(VMInstanceVO vm, Map addVmToNetworkThroughJobQueue( } workJob = pendingWorkJobs.get(0); } else { - logger.trace(String.format("no jobs to add network %s for vm %s yet", network, vm)); + logger.trace("no jobs to add network {} for vm {} yet", network, vm); workJob = createVmWorkJobToAddNetwork(vm, network, requested, context, user, account); } @@ -5600,7 +5600,7 @@ public UserVm restoreVirtualMachine(final long vmId, final Long newTemplateId, f } private UserVm orchestrateRestoreVirtualMachine(final long vmId, final Long newTemplateId, final Long rootDiskOfferingId, final boolean expunge, final Map details) throws ResourceUnavailableException, InsufficientCapacityException { - logger.debug("Restoring vm " + vmId + " with templateId : " + newTemplateId + " diskOfferingId : " + rootDiskOfferingId + " details : " + details); + logger.debug("Restoring vm {} with templateId: {}, diskOfferingId: {}, details: {}", vmId, newTemplateId, rootDiskOfferingId, details); final CallContext context = CallContext.current(); final Account account = context.getCallingAccount(); return _userVmService.restoreVirtualMachine(account, vmId, newTemplateId, rootDiskOfferingId, expunge, details); @@ -5668,7 +5668,7 @@ public Boolean updateDefaultNicForVM(final VirtualMachine vm, final Nic nic, fin private Boolean orchestrateUpdateDefaultNicForVM(final VirtualMachine vm, final Nic nic, final Nic defaultNic) { - logger.debug("Updating default nic of vm " + vm + " from nic " + defaultNic.getUuid() + " to nic " + nic.getUuid()); + logger.debug("Updating default nic of vm {} from nic {} to nic {}", vm, defaultNic.getUuid(), nic.getUuid()); Integer chosenID = nic.getDeviceId(); Integer existingID = defaultNic.getDeviceId(); NicVO nicVO = _nicsDao.findById(nic.getId()); @@ -6015,9 +6015,7 @@ protected boolean isDiskOfferingSuitableForVm(VMInstanceVO vm, VirtualMachinePro for (StoragePoolAllocator allocator : _storagePoolAllocators) { List poolListFromAllocator = allocator.allocateToPool(diskProfile, profile, plan, avoid, 1); if (CollectionUtils.isNotEmpty(poolListFromAllocator)) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("Found a suitable pool: %s for disk offering: %s", poolListFromAllocator.get(0).getName(), diskOffering.getName())); - } + logger.debug("Found a suitable pool: {} for disk offering: {}", poolListFromAllocator.get(0).getName(), diskOffering.getName()); return true; } } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java index 779d62eed47e..4c89a75d2151 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java @@ -47,13 +47,13 @@ public VirtualMachinePowerStateSyncImpl() { @Override public void resetHostSyncState(long hostId) { - logger.info("Reset VM power state sync for host: " + hostId); + logger.info("Reset VM power state sync for host: {}.", hostId); _instanceDao.resetHostPowerStateTracking(hostId); } @Override public void processHostVmStateReport(long hostId, Map report) { - logger.debug("Process host VM state report. host: " + hostId); + logger.debug("Process host VM state report. host: {}.", hostId); Map translatedInfo = convertVmStateReport(report); processReport(hostId, translatedInfo, false); @@ -61,7 +61,7 @@ public void processHostVmStateReport(long hostId, Map report, boolean force) { - logger.debug("Process host VM state report from ping process. host: " + hostId); + logger.debug("Process host VM state report from ping process. host: {}.", hostId); Map translatedInfo = convertVmStateReport(report); processReport(hostId, translatedInfo, force); @@ -69,18 +69,18 @@ public void processHostVmStatePingReport(long hostId, Map translatedInfo, boolean force) { - logger.debug("Process VM state report. host: " + hostId + ", number of records in report: " + translatedInfo.size()); + logger.debug("Process VM state report. host: {}, number of records in report: {}.", hostId, translatedInfo.size()); for (Map.Entry entry : translatedInfo.entrySet()) { - logger.debug("VM state report. host: " + hostId + ", vm id: " + entry.getKey() + ", power state: " + entry.getValue()); + logger.debug("VM state report. host: {}, vm id: {}, power state: {}.", hostId, entry.getKey(), entry.getValue()); if (_instanceDao.updatePowerState(entry.getKey(), hostId, entry.getValue(), DateUtil.currentGMTTime())) { - logger.debug("VM state report is updated. host: " + hostId + ", vm id: " + entry.getKey() + ", power state: " + entry.getValue()); + logger.debug("VM state report is updated. host: {}, vm id: {}, power state: {}.", hostId, entry.getKey(), entry.getValue()); _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, entry.getKey()); } else { - logger.trace("VM power state does not change, skip DB writing. vm id: " + entry.getKey()); + logger.trace("VM power state does not change, skip DB writing. vm id: {}.", entry.getKey()); } } @@ -99,7 +99,7 @@ private void processReport(long hostId, Map tra // here we need to be wary of out of band migration as opposed to other, more unexpected state changes if (vmsThatAreMissingReport.size() > 0) { Date currentTime = DateUtil.currentGMTTime(); - logger.debug("Run missing VM report. current time: " + currentTime.getTime()); + logger.debug("Run missing VM report. current time: {}", currentTime.getTime()); // 2 times of sync-update interval for graceful period long milliSecondsGracefullPeriod = mgmtServiceConf.getPingInterval() * 2000L; @@ -109,7 +109,7 @@ private void processReport(long hostId, Map tra // Make sure powerState is up to date for missing VMs try { if (!force && !_instanceDao.isPowerStateUpToDate(instance.getId())) { - logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM id: " + instance.getId()); + logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM id: {}.", instance.getId()); _instanceDao.resetVmPowerStateTracking(instance.getId()); continue; } @@ -120,45 +120,44 @@ private void processReport(long hostId, Map tra Date vmStateUpdateTime = instance.getPowerStateUpdateTime(); if (vmStateUpdateTime == null) { - logger.warn("VM power state update time is null, falling back to update time for vm id: " + instance.getId()); + logger.warn("VM power state update time is null, falling back to update time for vm id: {}.", instance.getId()); vmStateUpdateTime = instance.getUpdateTime(); if (vmStateUpdateTime == null) { - logger.warn("VM update time is null, falling back to creation time for vm id: " + instance.getId()); + logger.warn("VM update time is null, falling back to creation time for vm id: {}", instance.getId()); vmStateUpdateTime = instance.getCreated(); } } String lastTime = new SimpleDateFormat("yyyy/MM/dd'T'HH:mm:ss.SSS'Z'").format(vmStateUpdateTime); - logger.debug( - String.format("Detected missing VM. host: %d, vm id: %d(%s), power state: %s, last state update: %s" + logger.debug("Detected missing VM. host: {}, vm id: {}({}), power state: {}, last state update: {}" , hostId , instance.getId() , instance.getUuid() , VirtualMachine.PowerState.PowerReportMissing - , lastTime)); + , lastTime); long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime(); if (force || milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) { - logger.debug("vm id: " + instance.getId() + " - time since last state update(" + milliSecondsSinceLastStateUpdate + "ms) has passed graceful period"); + logger.debug("vm id: {} - time since last state update({}ms) has passed graceful period.", instance.getId(), milliSecondsSinceLastStateUpdate); // this is were a race condition might have happened if we don't re-fetch the instance; // between the startime of this job and the currentTime of this missing-branch // an update might have occurred that we should not override in case of out of band migration if (_instanceDao.updatePowerState(instance.getId(), hostId, VirtualMachine.PowerState.PowerReportMissing, startTime)) { - logger.debug("VM state report is updated. host: " + hostId + ", vm id: " + instance.getId() + ", power state: PowerReportMissing "); + logger.debug("VM state report is updated. host: {}, vm id: {}, power state: PowerReportMissing.", hostId, instance.getId()); _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, instance.getId()); } else { - logger.debug("VM power state does not change, skip DB writing. vm id: " + instance.getId()); + logger.debug("VM power state does not change, skip DB writing. vm id: {}", instance.getId()); } } else { - logger.debug("vm id: " + instance.getId() + " - time since last state update(" + milliSecondsSinceLastStateUpdate + "ms) has not passed graceful period yet"); + logger.debug("vm id: {} - time since last state update({}ms) has not passed graceful period yet.", instance.getId(), milliSecondsSinceLastStateUpdate); } } } - logger.debug("Done with process of VM state report. host: " + hostId); + logger.debug("Done with process of VM state report. host: {}", hostId); } @Override @@ -173,7 +172,7 @@ public Map convertVmStateReport(Map existingConfiguration(final NetworkOffering offering, List configs, final boolean errorIfAlreadySetup) { - if (logger.isDebugEnabled()) { - logger.debug("Found existing network configuration for offering " + offering + ": " + configs.get(0)); - } + logger.debug("Found existing network configuration for offering {}: {}", offering, configs.get(0)); if (errorIfAlreadySetup) { final InvalidParameterValueException ex = new InvalidParameterValueException( @@ -1000,7 +998,7 @@ private int determineNumberOfNicsRequired(final VirtualMachineProfile vm, final */ private void createExtraNics(final VirtualMachineProfile vm, int size, List nics, Network finalNetwork) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException { if (nics.size() != size) { - logger.warn("Number of nics " + nics.size() + " doesn't match number of requested nics " + size); + logger.warn("Number of nics {} doesn't match number of requested nics {}", nics.size(), size); if (nics.size() > size) { throw new CloudRuntimeException("Number of nics " + nics.size() + " doesn't match number of requested networks " + size); } else { @@ -1039,7 +1037,7 @@ public Pair allocateNic(final NicProfile requested, final N throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException, ConcurrentOperationException { final NetworkVO ntwkVO = _networksDao.findById(network.getId()); - logger.debug("Allocating nic for vm " + vm.getVirtualMachine() + " in network " + network + " with requested profile " + requested); + logger.debug("Allocating nic for vm {} in network {} with requested profile {}", vm.getVirtualMachine(), network, requested); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, ntwkVO.getGuruName()); if (requested != null && requested.getMode() == null) { @@ -1429,17 +1427,17 @@ private void setupPersistentNetwork(NetworkVO network, NetworkOfferingVO offerin final SetupPersistentNetworkAnswer answer = (SetupPersistentNetworkAnswer) _agentMgr.send(host.getId(), cmd); if (answer == null) { - logger.warn("Unable to get an answer to the SetupPersistentNetworkCommand from agent:" + host.getId()); + logger.warn("Unable to get an answer to the SetupPersistentNetworkCommand from agent: {}", host.getId()); clusterToHostsMap.get(host.getClusterId()).remove(host.getId()); continue; } if (!answer.getResult()) { - logger.warn("Unable to setup agent " + host.getId() + " due to " + answer.getDetails()); + logger.warn("Unable to setup agent {} due to {}", host.getId(), answer.getDetails()); clusterToHostsMap.get(host.getClusterId()).remove(host.getId()); } } catch (Exception e) { - logger.warn("Failed to connect to host: " + host.getName()); + logger.warn("Failed to connect to host: {}", host.getName()); } } if (clusterToHostsMap.keySet().size() != clusterVOs.size()) { @@ -1466,7 +1464,7 @@ public Pair implementNetwork(final long networkId, final NetworkVO network = _networksDao.findById(networkId); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName()); if (isNetworkImplemented(network)) { - logger.debug("Network id=" + networkId + " is already implemented"); + logger.debug("Network id={} is already implemented", networkId); implemented.set(guru, network); return implemented; } @@ -1480,16 +1478,16 @@ public Pair implementNetwork(final long networkId, final throw ex; } - logger.debug("Lock is acquired for network id " + networkId + " as a part of network implement"); + logger.debug("Lock is acquired for network id {} as a part of network implement", networkId); try { if (isNetworkImplemented(network)) { - logger.debug("Network id=" + networkId + " is already implemented"); + logger.debug("Network id={} is already implemented", networkId); implemented.set(guru, network); return implemented; } - logger.debug("Asking " + guru.getName() + " to implement " + network); + logger.debug("Asking {} to implement {}", guru.getName(), network); final NetworkOfferingVO offering = _networkOfferingDao.findById(network.getNetworkOfferingId()); @@ -1529,11 +1527,11 @@ public Pair implementNetwork(final long networkId, final logger.error(e.getMessage()); return new Pair(null, null); } catch (final CloudRuntimeException | OperationTimedoutException e) { - logger.error("Caught exception: " + e.getMessage()); + logger.error("Caught exception: {}", e.getMessage()); return new Pair(null, null); } finally { if (implemented.first() == null) { - logger.debug("Cleaning up because we're unable to implement the network " + network); + logger.debug("Cleaning up because we're unable to implement the network {}", network); try { if (isSharedNetworkWithServices(network)) { network.setState(Network.State.Shutdown); @@ -1554,7 +1552,7 @@ public Pair implementNetwork(final long networkId, final } _networksDao.releaseFromLockTable(networkId); - logger.debug("Lock is released for network id " + networkId + " as a part of network implement"); + logger.debug("Lock is released for network id {} as a part of network implement", networkId); } } @@ -1579,13 +1577,13 @@ public void implementNetworkElementsAndResources(final DeployDestination dest, f ips = _ipAddressDao.listByAssociatedVpc(network.getVpcId(), true); if (ips.isEmpty()) { final Vpc vpc = _vpcMgr.getActiveVpc(network.getVpcId()); - logger.debug("Creating a source nat ip for vpc " + vpc); + logger.debug("Creating a source nat ip for vpc {}", vpc); _vpcMgr.assignSourceNatIpAddressToVpc(owner, vpc); } } else { ips = _ipAddressDao.listByAssociatedNetwork(network.getId(), true); if (ips.isEmpty()) { - logger.debug("Creating a source nat ip for network " + network); + logger.debug("Creating a source nat ip for network {}", network); _ipAddrMgr.assignSourceNatIpAddressToGuestNetwork(owner, network); } } @@ -1610,9 +1608,9 @@ public void implementNetworkElementsAndResources(final DeployDestination dest, f try { // reapply all the firewall/staticNat/lb rules - logger.debug("Reprogramming network " + network + " as a part of network implement"); + logger.debug("Reprogramming network {} as a part of network implement", network); if (!reprogramNetworkRules(network.getId(), CallContext.current().getCallingAccount(), network)) { - logger.warn("Failed to re-program the network as a part of network " + network + " implement"); + logger.warn("Failed to re-program the network as a part of network {} implement", network); // see DataCenterVO.java final ResourceUnavailableException ex = new ResourceUnavailableException("Unable to apply network rules as a part of network " + network + " implement", DataCenter.class, network.getDataCenterId()); @@ -1622,7 +1620,7 @@ public void implementNetworkElementsAndResources(final DeployDestination dest, f for (final NetworkElement element : networkElements) { if (element instanceof AggregatedCommandExecutor && providersToImplement.contains(element.getProvider())) { if (!((AggregatedCommandExecutor) element).completeAggregatedExecution(network, dest)) { - logger.warn("Failed to re-program the network as a part of network " + network + " implement due to aggregated commands execution failure!"); + logger.warn("Failed to re-program the network as a part of network {} implement due to aggregated commands execution failure!", network); // see DataCenterVO.java final ResourceUnavailableException ex = new ResourceUnavailableException("Unable to apply network rules as a part of network " + network + " implement", DataCenter.class, network.getDataCenterId()); @@ -1653,7 +1651,7 @@ private void implementNetworkElements(final DeployDestination dest, final Reserv + network.getPhysicalNetworkId()); } - logger.debug("Asking " + element.getName() + " to implement " + network); + logger.debug("Asking {} to implement {}", element.getName(), network); if (!element.implement(network, offering, dest, context)) { CloudRuntimeException ex = new CloudRuntimeException("Failed to implement provider " + element.getProvider().getName() + " for network with specified id"); @@ -1678,50 +1676,50 @@ protected boolean reprogramNetworkRules(final long networkId, final Account call _firewallMgr.applyDefaultEgressFirewallRule(network.getId(), offering.isEgressDefaultPolicy(), true); } if (!_firewallMgr.applyFirewallRules(firewallEgressRulesToApply, false, caller)) { - logger.warn("Failed to reapply firewall Egress rule(s) as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply firewall Egress rule(s) as a part of network id={} restart", networkId); success = false; } // associate all ip addresses if (!_ipAddrMgr.applyIpAssociations(network, false)) { - logger.warn("Failed to apply ip addresses as a part of network id" + networkId + " restart"); + logger.warn("Failed to apply ip addresses as a part of network id {} restart", networkId); success = false; } // apply static nat if (!_rulesMgr.applyStaticNatsForNetwork(networkId, false, caller)) { - logger.warn("Failed to apply static nats a part of network id" + networkId + " restart"); + logger.warn("Failed to apply static nats a part of network id {} restart", networkId); success = false; } // apply firewall rules final List firewallIngressRulesToApply = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress); if (!_firewallMgr.applyFirewallRules(firewallIngressRulesToApply, false, caller)) { - logger.warn("Failed to reapply Ingress firewall rule(s) as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply Ingress firewall rule(s) as a part of network id={} restart", networkId); success = false; } // apply port forwarding rules if (!_rulesMgr.applyPortForwardingRulesForNetwork(networkId, false, caller)) { - logger.warn("Failed to reapply port forwarding rule(s) as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply port forwarding rule(s) as a part of network id={} restart", networkId); success = false; } // apply static nat rules if (!_rulesMgr.applyStaticNatRulesForNetwork(networkId, false, caller)) { - logger.warn("Failed to reapply static nat rule(s) as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply static nat rule(s) as a part of network id={} restart", networkId); success = false; } // apply public load balancer rules if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Public)) { - logger.warn("Failed to reapply Public load balancer rules as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply Public load balancer rules as a part of network id={} restart", networkId); success = false; } // apply internal load balancer rules if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Internal)) { - logger.warn("Failed to reapply internal load balancer rules as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply internal load balancer rules as a part of network id={} restart", networkId); success = false; } @@ -1731,7 +1729,7 @@ protected boolean reprogramNetworkRules(final long networkId, final Account call for (final RemoteAccessVpn vpn : vpnsToReapply) { // Start remote access vpn per ip if (_vpnMgr.startRemoteAccessVpn(vpn.getServerAddressId(), false) == null) { - logger.warn("Failed to reapply vpn rules as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply vpn rules as a part of network id={} restart", networkId); success = false; } } @@ -1739,7 +1737,7 @@ protected boolean reprogramNetworkRules(final long networkId, final Account call //apply network ACLs if (!_networkACLMgr.applyACLToNetwork(networkId)) { - logger.warn("Failed to reapply network ACLs as a part of of network id=" + networkId + " restart"); + logger.warn("Failed to reapply network ACLs as a part of of network id={}", networkId); success = false; } @@ -1836,14 +1834,14 @@ public void cleanupConfigForServicesInNetwork(List services, final Netwo Account caller = _accountDao.findById(Account.ACCOUNT_ID_SYSTEM); long userId = User.UID_SYSTEM; //remove all PF/Static Nat rules for the network - logger.info("Services:" + services + " are no longer supported in network:" + network.getUuid() + - " after applying new network offering:" + network.getNetworkOfferingId() + " removing the related configuration"); + logger.info("Services: {} are no longer supported in network: {} after applying new network offering: {} removing the related configuration", + services, network.getUuid(), network.getNetworkOfferingId()); if (services.contains(Service.StaticNat.getName()) || services.contains(Service.PortForwarding.getName())) { try { if (_rulesMgr.revokeAllPFStaticNatRulesForNetwork(networkId, userId, caller)) { - logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id=" + networkId); + logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id={}", networkId); } else { - logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup", networkId); } if (services.contains(Service.StaticNat.getName())) { //removing static nat configured on ips. @@ -1862,7 +1860,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { }); } } catch (ResourceUnavailableException ex) { - logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex); } } if (services.contains(Service.SourceNat.getName())) { @@ -1881,9 +1879,9 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (services.contains(Service.Lb.getName())) { //remove all LB rules for the network if (_lbMgr.removeAllLoadBalanacersForNetwork(networkId, caller, userId)) { - logger.debug("Successfully cleaned up load balancing rules for network id=" + networkId); + logger.debug("Successfully cleaned up load balancing rules for network id={}", networkId); } else { - logger.warn("Failed to cleanup LB rules as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to cleanup LB rules as a part of network id={} cleanup", networkId); } } @@ -1891,12 +1889,12 @@ public void doInTransactionWithoutResult(TransactionStatus status) { //revoke all firewall rules for the network try { if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, userId, caller)) { - logger.debug("Successfully cleaned up firewallRules rules for network id=" + networkId); + logger.debug("Successfully cleaned up firewallRules rules for network id={}", networkId); } else { - logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup", networkId); } } catch (ResourceUnavailableException ex) { - logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); + logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex); } } @@ -1906,7 +1904,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { try { _vpnMgr.destroyRemoteAccessVpnForIp(vpn.getServerAddressId(), caller, true); } catch (ResourceUnavailableException ex) { - logger.warn("Failed to cleanup remote access vpn resources of network:" + network.getUuid() + " due to Exception: ", ex); + logger.warn("Failed to cleanup remote access vpn resources of network: {} due to Exception: {}", network.getUuid(), ex); } } } @@ -2010,7 +2008,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { _nicDao.update(nic.getId(), nic); if (nic.getVmType() == VirtualMachine.Type.User) { - logger.debug("Changing active number of nics for network id=" + networkId + " on " + count); + logger.debug("Changing active number of nics for network id={} on {}", networkId, count); _networksDao.changeActiveNicsBy(networkId, count); } @@ -2043,7 +2041,7 @@ public int compare(final NicVO nic1, final NicVO nic2) { for (final NicVO nic : nics) { final Pair implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter); if (implemented == null || implemented.first() == null) { - logger.warn("Failed to implement network id=" + nic.getNetworkId() + " as a part of preparing nic id=" + nic.getId()); + logger.warn("Failed to implement network id={} as a part of preparing nic id={}", nic.getNetworkId(), nic.getId()); throw new CloudRuntimeException("Failed to implement network id=" + nic.getNetworkId() + " as a part preparing nic id=" + nic.getId()); } @@ -2118,7 +2116,7 @@ public NicProfile prepareNic(final VirtualMachineProfile vmProfile, final Deploy throw new CloudRuntimeException("Service provider " + element.getProvider().getName() + " either doesn't exist or is not enabled in physical network id: " + network.getPhysicalNetworkId()); } - logger.debug("Asking " + element.getName() + " to prepare for " + nic); + logger.debug("Asking {} to prepare for {}", element.getName(), nic); if (!prepareElement(element, network, profile, vmProfile, dest, context)) { throw new InsufficientAddressCapacityException("unable to configure the dhcp service, due to insufficiant address capacity", Network.class, network.getId()); } @@ -2158,7 +2156,7 @@ public void prepareNicForMigration(final VirtualMachineProfile vm, final DeployD _networkModel.getNetworkTag(vm.getHypervisorType(), network)); if (guru instanceof NetworkMigrationResponder) { if (!((NetworkMigrationResponder) guru).prepareMigration(profile, network, vm, dest, context)) { - logger.error("NetworkGuru " + guru + " prepareForMigration failed."); // XXX: Transaction error + logger.error("NetworkGuru {} prepareForMigration failed.", guru); // XXX: Transaction error } } @@ -2175,7 +2173,7 @@ public void prepareNicForMigration(final VirtualMachineProfile vm, final DeployD } if (element instanceof NetworkMigrationResponder) { if (!((NetworkMigrationResponder) element).prepareMigration(profile, network, vm, dest, context)) { - logger.error("NetworkElement " + element + " prepareForMigration failed."); // XXX: Transaction error + logger.error("NetworkElement {} prepareForMigration failed.", element); // XXX: Transaction error } } } @@ -2207,7 +2205,7 @@ public void prepareAllNicsForMigration(final VirtualMachineProfile vm, final Dep _networkModel.isSecurityGroupSupportedInNetwork(network), _networkModel.getNetworkTag(vm.getHypervisorType(), network)); if (guru instanceof NetworkMigrationResponder) { if (!((NetworkMigrationResponder) guru).prepareMigration(profile, network, vm, dest, context)) { - logger.error("NetworkGuru " + guru + " prepareForMigration failed."); // XXX: Transaction error + logger.error("NetworkGuru {} prepareForMigration failed.", guru); // XXX: Transaction error } } final List providersToImplement = getNetworkProviders(network.getId()); @@ -2218,7 +2216,7 @@ public void prepareAllNicsForMigration(final VirtualMachineProfile vm, final Dep } if (element instanceof NetworkMigrationResponder) { if (!((NetworkMigrationResponder) element).prepareMigration(profile, network, vm, dest, context)) { - logger.error("NetworkElement " + element + " prepareForMigration failed."); // XXX: Transaction error + logger.error("NetworkElement {} prepareForMigration failed.", element); // XXX: Transaction error } } } @@ -2239,7 +2237,7 @@ public void prepareAllNicsForMigration(final VirtualMachineProfile vm, final Dep if (nic == null && !addedURIs.contains(broadcastUri.toString())) { //Nic details are not available in DB //Create nic profile for migration - logger.debug("Creating nic profile for migration. BroadcastUri: " + broadcastUri.toString() + " NetworkId: " + ntwkId + " Vm: " + vm.getId()); + logger.debug("Creating nic profile for migration. BroadcastUri: {} NetworkId: {} VM: {}", broadcastUri.toString(), ntwkId, vm.getId()); final NetworkVO network = _networksDao.findById(ntwkId); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName()); final NicProfile profile = new NicProfile(); @@ -2378,9 +2376,7 @@ public Pair doInTransaction(final TransactionStatus status) final NicProfile profile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), null, _networkModel .isSecurityGroupSupportedInNetwork(network), _networkModel.getNetworkTag(vmProfile.getHypervisorType(), network)); if (guru.release(profile, vmProfile, nic.getReservationId())) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("The nic %s on %s was released according to %s by guru %s, now updating record.", nic, profile, vmProfile, guru)); - } + logger.debug("The nic {} on {} was released according to {} by guru {}, now updating record.", nic, profile, vmProfile, guru); applyProfileToNicForRelease(nic, profile); nic.setState(Nic.State.Allocated); if (originalState == Nic.State.Reserved) { @@ -2419,7 +2415,7 @@ public Pair doInTransaction(final TransactionStatus status) final List providersToImplement = getNetworkProviders(network.getId()); for (final NetworkElement element : networkElements) { if (providersToImplement.contains(element.getProvider())) { - logger.debug("Asking " + element.getName() + " to release " + profile); + logger.debug("Asking {} to release {}", element.getName(), profile); //NOTE: Context appear to never be used in release method //implementations. Consider removing it from interface Element element.release(network, profile, vmProfile, null); @@ -2430,7 +2426,7 @@ public Pair doInTransaction(final TransactionStatus status) @Override public void cleanupNics(final VirtualMachineProfile vm) { - logger.debug("Cleaning network for vm: " + vm.getId()); + logger.debug("Cleaning network for vm: {}", vm.getId()); final List nics = _nicDao.listByVmId(vm.getId()); for (final NicVO nic : nics) { @@ -2451,7 +2447,7 @@ protected void removeNic(final VirtualMachineProfile vm, final NicVO nic) { try { releaseNic(vm, nic.getId()); } catch (final Exception ex) { - logger.warn("Failed to release nic: " + nic.toString() + " as part of remove operation due to", ex); + logger.warn("Failed to release nic: {} as part of remove operation due to", nic.toString(), ex); } } @@ -2527,7 +2523,7 @@ && isDhcpAccrossMultipleSubnetsSupported(dhcpServiceProvider)) { _nicDao.remove(nic.getId()); } - logger.debug("Removed nic id=" + nic.getId()); + logger.debug("Removed nic id={}", nic.getId()); // release assigned IPv6 for Isolated Network VR NIC if (Type.User.equals(vm.getType()) && GuestType.Isolated.equals(network.getGuestType()) @@ -2540,7 +2536,7 @@ && isDhcpAccrossMultipleSubnetsSupported(dhcpServiceProvider)) { //remove the secondary ip addresses corresponding to this nic if (!removeVmSecondaryIpsOfNic(nic.getId())) { - logger.debug("Removing nic " + nic.getId() + " secondary ip addresses failed"); + logger.debug("Removing nic {} secondary ip addresses failed", nic.getId()); } } @@ -2579,7 +2575,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } }); if (!dhcpServiceProvider.removeDhcpSupportForSubnet(network)) { - logger.warn("Failed to remove the ip alias on the router, marking it as removed in db and freed the allocated ip " + ipAlias.getIp4Address()); + logger.warn("Failed to remove the ip alias on the router, marking it as removed in db and freed the allocated ip {}", ipAlias.getIp4Address()); } } } catch (final ResourceUnavailableException e) { @@ -3064,12 +3060,12 @@ protected void checkL2OfferingServices(NetworkOfferingVO ntwkOff) { public boolean shutdownNetwork(final long networkId, final ReservationContext context, final boolean cleanupElements) { NetworkVO network = _networksDao.findById(networkId); if (network.getState() == Network.State.Allocated) { - logger.debug("Network is already shutdown: " + network); + logger.debug("Network is already shutdown: {}", network); return true; } if (network.getState() != Network.State.Implemented && network.getState() != Network.State.Shutdown) { - logger.debug("Network is not implemented: " + network); + logger.debug("Network is not implemented: {}", network); return false; } @@ -3077,20 +3073,18 @@ public boolean shutdownNetwork(final long networkId, final ReservationContext co //do global lock for the network network = _networksDao.acquireInLockTable(networkId, NetworkLockTimeout.value()); if (network == null) { - logger.warn("Unable to acquire lock for the network " + network + " as a part of network shutdown"); + logger.warn("Unable to acquire lock for the network {} as a part of network shutdown", network); return false; } - if (logger.isDebugEnabled()) { - logger.debug("Lock is acquired for network " + network + " as a part of network shutdown"); - } + logger.debug("Lock is acquired for network " + network + " as a part of network shutdown"); if (network.getState() == Network.State.Allocated) { - logger.debug("Network is already shutdown: " + network); + logger.debug("Network is already shutdown: {}", network); return true; } if (network.getState() != Network.State.Implemented && network.getState() != Network.State.Shutdown) { - logger.debug("Network is not implemented: " + network); + logger.debug("Network is not implemented: {}", network); return false; } @@ -3115,7 +3109,7 @@ public Boolean doInTransaction(final TransactionStatus status) { boolean result = false; if (success) { - logger.debug("Network id=" + networkId + " is shutdown successfully, cleaning up corresponding resources now."); + logger.debug("Network id={} is shutdown successfully, cleaning up corresponding resources now.", networkId); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, networkFinal.getGuruName()); final NetworkProfile profile = convertNetworkToNetworkProfile(networkFinal.getId()); guru.shutdown(profile, _networkOfferingDao.findById(networkFinal.getNetworkOfferingId())); @@ -3153,7 +3147,7 @@ public Boolean doInTransaction(final TransactionStatus status) { } finally { if (network != null) { _networksDao.releaseFromLockTable(network.getId()); - logger.debug("Lock is released for network " + network + " as a part of network shutdown"); + logger.debug("Lock is released for network {} as a part of network shutdown", network); } } } @@ -3179,11 +3173,11 @@ public boolean shutdownNetworkElementsAndResources(final ReservationContext cont cleanupResult = shutdownNetworkResources(network.getId(), context.getAccount(), context.getCaller().getId()); } } catch (final Exception ex) { - logger.warn("shutdownNetworkRules failed during the network " + network + " shutdown due to ", ex); + logger.warn("shutdownNetworkRules failed during the network {} shutdown due to", network, ex); } finally { // just warn the administrator that the network elements failed to shutdown if (!cleanupResult) { - logger.warn("Failed to cleanup network id=" + network.getId() + " resources as a part of shutdownNetwork"); + logger.warn("Failed to cleanup network id={} resources as a part of shutdownNetwork", network.getId()); } } @@ -3192,19 +3186,19 @@ public boolean shutdownNetworkElementsAndResources(final ReservationContext cont for (final NetworkElement element : networkElements) { if (providersToShutdown.contains(element.getProvider())) { try { - logger.debug("Sending network shutdown to " + element.getName()); + logger.debug("Sending network shutdown to {}", element.getName()); if (!element.shutdown(network, context, cleanupElements)) { - logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName()); + logger.warn("Unable to complete shutdown of the network elements due to element: {}", element.getName()); success = false; } } catch (final ResourceUnavailableException e) { - logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e); + logger.warn("Unable to complete shutdown of the network elements due to element: {}", element.getName(), e); success = false; } catch (final ConcurrentOperationException e) { - logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e); + logger.warn("Unable to complete shutdown of the network elements due to element: {}", element.getName(), e); success = false; } catch (final Exception e) { - logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e); + logger.warn("Unable to complete shutdown of the network elements due to element: {}", element.getName(), e); success = false; } } @@ -3225,15 +3219,15 @@ private void cleanupPersistentnNetworkResources(NetworkVO network) { CleanupPersistentNetworkResourceCommand cmd = new CleanupPersistentNetworkResourceCommand(to); CleanupPersistentNetworkResourceAnswer answer = (CleanupPersistentNetworkResourceAnswer) _agentMgr.send(host.getId(), cmd); if (answer == null) { - logger.warn("Unable to get an answer to the CleanupPersistentNetworkResourceCommand from agent:" + host.getId()); + logger.warn("Unable to get an answer to the CleanupPersistentNetworkResourceCommand from agent: {}", host.getId()); continue; } if (!answer.getResult()) { - logger.warn("Unable to setup agent " + host.getId() + " due to " + answer.getDetails()); + logger.warn("Unable to setup agent {} due to {}", host.getId(), answer.getDetails()); } } catch (Exception e) { - logger.warn("Failed to cleanup network resources on host: " + host.getName()); + logger.warn("Failed to cleanup network resources on host: {}", host.getName()); } } } @@ -3247,7 +3241,7 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con NetworkVO network = _networksDao.findById(networkId); if (network == null) { - logger.debug("Unable to find network with id: " + networkId); + logger.debug("Unable to find network with id: {}", networkId); return false; } // Make sure that there are no user vms in the network that are not Expunged/Error @@ -3255,7 +3249,7 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con for (final UserVmVO vm : userVms) { if (!(vm.getState() == VirtualMachine.State.Expunging && vm.getRemoved() != null)) { - logger.warn("Can't delete the network, not all user vms are expunged. Vm " + vm + " is in " + vm.getState() + " state"); + logger.warn("Can't delete the network, not all user vms are expunged. Vm {} is in {} state", vm, vm.getState()); return false; } } @@ -3263,7 +3257,7 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con // Don't allow to delete network via api call when it has vms assigned to it final int nicCount = getActiveNicsInNetwork(networkId); if (nicCount > 0) { - logger.debug("The network id=" + networkId + " has active Nics, but shouldn't."); + logger.debug("The network id={} has active Nics, but shouldn't.", networkId"); // at this point we have already determined that there are no active user vms in network // if the op_networks table shows active nics, it's a bug in releasing nics updating op_networks _networksDao.changeActiveNicsBy(networkId, -1 * nicCount); @@ -3287,13 +3281,13 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con // get updated state for the network network = _networksDao.findById(networkId); if (network.getState() != Network.State.Allocated && network.getState() != Network.State.Setup && !forced) { - logger.debug("Network is not in the correct state to be destroyed: " + network.getState()); + logger.debug("Network is not in the correct state to be destroyed: {}", network.getState()); return false; } boolean success = true; if (!cleanupNetworkResources(networkId, callerAccount, context.getCaller().getId())) { - logger.warn("Unable to delete network id=" + networkId + ": failed to cleanup network resources"); + logger.warn("Unable to delete network id={}: failed to cleanup network resources", networkId); return false; } @@ -3302,27 +3296,27 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con for (final NetworkElement element : networkElements) { if (providersToDestroy.contains(element.getProvider())) { try { - logger.debug("Sending destroy to " + element); + logger.debug("Sending destroy to {}", element); if (!element.destroy(network, context)) { success = false; - logger.warn("Unable to complete destroy of the network: failed to destroy network element " + element.getName()); + logger.warn("Unable to complete destroy of the network: failed to destroy network element {}", element.getName()); } } catch (final ResourceUnavailableException e) { - logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e); + logger.warn("Unable to complete destroy of the network due to element: {}", element.getName(), e); success = false; } catch (final ConcurrentOperationException e) { - logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e); + logger.warn("Unable to complete destroy of the network due to element: {}", element.getName(), e); success = false; } catch (final Exception e) { - logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e); + logger.warn("Unable to complete destroy of the network due to element: {}", element.getName(), e); success = false; } } } if (success) { - logger.debug("Network id=" + networkId + " is destroyed successfully, cleaning up corresponding resources now."); + logger.debug("Network id={} is destroyed successfully, cleaning up corresponding resources now.", networkId); final NetworkVO networkFinal = network; try { @@ -3336,7 +3330,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } if (!deleteVlansInNetwork(networkFinal, context.getCaller().getId(), callerAccount)) { - logger.warn("Failed to delete network " + networkFinal + "; was unable to cleanup corresponding ip ranges"); + logger.warn("Failed to delete network {}; was unable to cleanup corresponding ip ranges", networkFinal); throw new CloudRuntimeException("Failed to delete network " + networkFinal + "; was unable to cleanup corresponding ip ranges"); } else { // commit transaction only when ips and vlans for the network are released successfully @@ -3400,7 +3394,7 @@ protected boolean deleteVlansInNetwork(final NetworkVO network, final long userI boolean result = true; for (final VlanVO vlan : publicVlans) { if (!_configMgr.deleteVlanAndPublicIpRange(userId, vlan.getId(), callerAccount)) { - logger.warn("Failed to delete vlan " + vlan.getId() + ");"); + logger.warn("Failed to delete vlan {});", vlan.getId()); result = false; } } @@ -3408,16 +3402,16 @@ protected boolean deleteVlansInNetwork(final NetworkVO network, final long userI //cleanup private vlans final int privateIpAllocCount = _privateIpDao.countAllocatedByNetworkId(networkId); if (privateIpAllocCount > 0) { - logger.warn("Can't delete Private ip range for network " + networkId + " as it has allocated ip addresses"); + logger.warn("Can't delete Private ip range for network {} as it has allocated ip addresses", networkId); result = false; } else { _privateIpDao.deleteByNetworkId(networkId); - logger.debug("Deleted ip range for private network id=" + networkId); + logger.debug("Deleted ip range for private network id={}", networkId); } // release vlans of user-shared networks without specifyvlan if (isSharedNetworkWithoutSpecifyVlan(_networkOfferingDao.findById(network.getNetworkOfferingId()))) { - logger.debug("Releasing vnet for the network id=" + network.getId()); + logger.debug("Releasing vnet for the network id={}", network.getId()); _dcDao.releaseVnet(BroadcastDomainType.getValue(network.getBroadcastUri()), network.getDataCenterId(), network.getPhysicalNetworkId(), network.getAccountId(), network.getReservationId()); } @@ -3449,7 +3443,7 @@ public void reallyRun() { final List networkIds = _networksDao.findNetworksToGarbageCollect(); final int netGcWait = NumbersUtil.parseInt(_configDao.getValue(NetworkGcWait.key()), 60); - logger.info("NetworkGarbageCollector uses '" + netGcWait + "' seconds for GC interval."); + logger.info("NetworkGarbageCollector uses '{}' seconds for GC interval.", netGcWait); for (final Long networkId : networkIds) { if (!_networkModel.isNetworkReadyForGc(networkId)) { @@ -3463,10 +3457,10 @@ public void reallyRun() { final Long time = _lastNetworkIdsToFree.remove(networkId); if (time == null) { - logger.debug("We found network " + networkId + " to be free for the first time. Adding it to the list: " + currentTime); + logger.debug("We found network {} to be free for the first time. Adding it to the list: {}", networkId, currentTime); stillFree.put(networkId, currentTime); } else if (time > currentTime - netGcWait) { - logger.debug("Network " + networkId + " is still free but it's not time to shutdown yet: " + time); + logger.debug("Network {} is still free but it's not time to shutdown yet: {}",networkId, time); stillFree.put(networkId, time); } else { shutdownList.add(networkId); @@ -3481,7 +3475,7 @@ public void reallyRun() { // If network is removed, unset gc flag for it if (_networksDao.findById(networkId) == null) { - logger.debug("Network id=" + networkId + " is removed, so clearing up corresponding gc check"); + logger.debug("Network id={} is removed, so clearing up corresponding gc check", networkId); _networksDao.clearCheckForGc(networkId); } else { try { @@ -3493,7 +3487,7 @@ public void reallyRun() { shutdownNetwork(networkId, context, false); } catch (final Exception e) { - logger.warn("Unable to shutdown network: " + networkId); + logger.warn("Unable to shutdown network: {}", networkId); } } } @@ -3516,10 +3510,10 @@ public boolean startNetwork(final long networkId, final DeployDestination dest, } // implement the network - logger.debug("Starting network " + network + "..."); + logger.debug("Starting network {}...", network); final Pair implementedNetwork = implementNetwork(networkId, dest, context); if (implementedNetwork == null || implementedNetwork.first() == null) { - logger.warn("Failed to start the network " + network); + logger.warn("Failed to start the network {}", network); return false; } else { return true; @@ -3533,7 +3527,7 @@ public boolean restartNetwork(final Long networkId, final Account callerAccount, boolean restartRequired = false; final NetworkVO network = _networksDao.findById(networkId); - logger.debug("Restarting network " + networkId + "..."); + logger.debug("Restarting network {}...", networkId); final ReservationContext context = new ReservationContextImpl(null, null, callerUser, callerAccount); final NetworkOffering offering = _networkOfferingDao.findByIdIncludingRemoved(network.getNetworkOfferingId()); @@ -3562,7 +3556,7 @@ public boolean restartNetwork(final Long networkId, final Account callerAccount, } Pair patched = mgr.updateSystemVM(instanceVO, true); if (patched.first()) { - logger.info(String.format("Successfully patched router %s", router)); + logger.info("Successfully patched router {}", router); } } catch (CloudRuntimeException e) { throw new CloudRuntimeException(String.format("Failed to live patch router: %s", router), e); @@ -3571,13 +3565,13 @@ public boolean restartNetwork(final Long networkId, final Account callerAccount, } } - logger.debug("Implementing the network " + network + " elements and resources as a part of network restart without cleanup"); + logger.debug("Implementing the network {} elements and resources as a part of network restart without cleanup", network); try { implementNetworkElementsAndResources(dest, context, network, offering); setRestartRequired(network, false); return true; } catch (final Exception ex) { - logger.warn("Failed to implement network " + network + " elements and resources as a part of network restart due to ", ex); + logger.warn("Failed to implement network {} elements and resources as a part of network restart due to ", network, ex); return false; } } @@ -3590,7 +3584,7 @@ public void destroyExpendableRouters(final List routers router.getState() == VirtualMachine.State.Error || router.getState() == VirtualMachine.State.Shutdown || router.getState() == VirtualMachine.State.Unknown) { - logger.debug("Destroying old router " + router); + logger.debug("Destroying old router {}", router); _routerService.destroyRouter(router.getId(), context.getAccount(), context.getCaller().getId()); } else { remainingRouters.add(router); @@ -3619,7 +3613,7 @@ public void destroyExpendableRouters(final List routers public boolean areRoutersRunning(final List routers) { for (final VirtualRouter router : routers) { if (router.getState() != VirtualMachine.State.Running) { - logger.debug("Found new router " + router.getInstanceName() + " to be in non-Running state: " + router.getState() + ". Please try restarting network again."); + logger.debug("Found new router {} to be in non-Running state: {}. Please try restarting network again." + router.getInstanceName(), router.getState()); return false; } } @@ -3674,10 +3668,10 @@ private boolean rollingRestartRouters(final NetworkVO network, final NetworkOffe implementNetworkElementsAndResources(dest, context, network, offering); return true; } - logger.debug("Failed to shutdown the network elements and resources as a part of network restart: " + network.getState()); + logger.debug("Failed to shutdown the network elements and resources as a part of network restart: {}", network.getState()); return false; } - logger.debug("Performing rolling restart of routers of network " + network); + logger.debug("Performing rolling restart of routers of network {}", network); destroyExpendableRouters(routerDao.findByNetwork(network.getId()), context); final List providersToImplement = getNetworkProviders(network.getId()); @@ -3718,7 +3712,7 @@ private boolean rollingRestartRouters(final NetworkVO network, final NetworkOffe } private void setRestartRequired(final NetworkVO network, final boolean restartRequired) { - logger.debug("Marking network " + network + " with restartRequired=" + restartRequired); + logger.debug("Marking network {} with restartRequired={}", network, restartRequired); network.setRestartRequired(restartRequired); _networksDao.update(network.getId(), network); } @@ -3742,7 +3736,7 @@ public UserDataServiceProvider getPasswordResetProvider(final Network network) { final String passwordProvider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.UserData); if (passwordProvider == null) { - logger.debug("Network " + network + " doesn't support service " + Service.UserData.getName()); + logger.debug("Network {} doesn't support service {}", network, Service.UserData.getName()); return null; } @@ -3754,7 +3748,7 @@ public UserDataServiceProvider getSSHKeyResetProvider(final Network network) { final String SSHKeyProvider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.UserData); if (SSHKeyProvider == null) { - logger.debug("Network " + network + " doesn't support service " + Service.UserData.getName()); + logger.debug("Network {} doesn't support service", network, Service.UserData.getName()); return null; } @@ -3766,7 +3760,7 @@ public DhcpServiceProvider getDhcpServiceProvider(final Network network) { final String DhcpProvider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.Dhcp); if (DhcpProvider == null) { - logger.debug("Network " + network + " doesn't support service " + Service.Dhcp.getName()); + logger.debug("Network {} doesn't support service {}", network, Service.Dhcp.getName()); return null; } @@ -3783,7 +3777,7 @@ public DnsServiceProvider getDnsServiceProvider(final Network network) { final String dnsProvider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.Dns); if (dnsProvider == null) { - logger.debug("Network " + network + " doesn't support service " + Service.Dhcp.getName()); + logger.debug("Network {} doesn't support service {}", network, Service.Dhcp.getName()); return null; } @@ -3880,51 +3874,51 @@ private boolean cleanupNetworkResources(final long networkId, final Account call //remove all PF/Static Nat rules for the network try { if (_rulesMgr.revokeAllPFStaticNatRulesForNetwork(networkId, callerUserId, caller)) { - logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id=" + networkId); + logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id={}", networkId); } else { success = false; - logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup", networkId); } } catch (final ResourceUnavailableException ex) { success = false; // shouldn't even come here as network is being cleaned up after all network elements are shutdown - logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup due to resourceUnavailable" + networkId, ex); } //remove all LB rules for the network if (_lbMgr.removeAllLoadBalanacersForNetwork(networkId, caller, callerUserId)) { - logger.debug("Successfully cleaned up load balancing rules for network id=" + networkId); + logger.debug("Successfully cleaned up load balancing rules for network id={}", networkId); } else { // shouldn't even come here as network is being cleaned up after all network elements are shutdown success = false; - logger.warn("Failed to cleanup LB rules as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to cleanup LB rules as a part of network id={} cleanup", networkId); } //revoke all firewall rules for the network try { if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, callerUserId, caller)) { - logger.debug("Successfully cleaned up firewallRules rules for network id=" + networkId); + logger.debug("Successfully cleaned up firewallRules rules for network id={}", networkId); } else { success = false; - logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup", networkId); } } catch (final ResourceUnavailableException ex) { success = false; // shouldn't even come here as network is being cleaned up after all network elements are shutdown - logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); + logger.warn("Failed to cleanup Firewall rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex); } //revoke all network ACLs for network try { if (_networkACLMgr.revokeACLItemsForNetwork(networkId)) { - logger.debug("Successfully cleaned up NetworkACLs for network id=" + networkId); + logger.debug("Successfully cleaned up NetworkACLs for network id={}", networkId); } else { success = false; - logger.warn("Failed to cleanup NetworkACLs as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to cleanup NetworkACLs as a part of network id={} cleanup", networkId); } } catch (final ResourceUnavailableException ex) { success = false; - logger.warn("Failed to cleanup Network ACLs as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); + logger.warn("Failed to cleanup Network ACLs as a part of network id={} cleanup due to resourceUnavailable ", networkId, ex); } //release all ip addresses @@ -3939,7 +3933,7 @@ private boolean cleanupNetworkResources(final long networkId, final Account call // so as part of network clean up just break IP association with guest network ipToRelease.setAssociatedWithNetworkId(null); _ipAddressDao.update(ipToRelease.getId(), ipToRelease); - logger.debug("Portable IP address " + ipToRelease + " is no longer associated with any network"); + logger.debug("Portable IP address {} is no longer associated with any network", ipToRelease); } } else { _vpcMgr.unassignIPFromVpcNetwork(ipToRelease.getId(), network.getId()); @@ -3948,7 +3942,7 @@ private boolean cleanupNetworkResources(final long networkId, final Account call try { if (!_ipAddrMgr.applyIpAssociations(network, true)) { - logger.warn("Unable to apply ip address associations for " + network); + logger.warn("Unable to apply ip address associations for {}", network); success = false; } } catch (final ResourceUnavailableException e) { @@ -3967,10 +3961,10 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal // Mark all PF rules as revoked and apply them on the backend (not in the DB) final List pfRules = _portForwardingRulesDao.listByNetwork(networkId); - logger.debug("Releasing " + pfRules.size() + " port forwarding rules for network id=" + networkId + " as a part of shutdownNetworkRules"); + logger.debug("Releasing {} port forwarding rules for network id={} as a part of shutdownNetworkRules.", pfRules.size(), networkId); for (final PortForwardingRuleVO pfRule : pfRules) { - logger.trace("Marking pf rule " + pfRule + " with Revoke state"); + logger.trace("Marking pf rule {} with Revoke state", pfRule); pfRule.setState(FirewallRule.State.Revoke); } @@ -3987,10 +3981,10 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal // Mark all static rules as revoked and apply them on the backend (not in the DB) final List firewallStaticNatRules = _firewallDao.listByNetworkAndPurpose(networkId, Purpose.StaticNat); final List staticNatRules = new ArrayList(); - logger.debug("Releasing " + firewallStaticNatRules.size() + " static nat rules for network id=" + networkId + " as a part of shutdownNetworkRules"); + logger.debug("Releasing {} static nat rules for network id={} as a part of shutdownNetworkRules", firewallStaticNatRules.size(), networkId); for (final FirewallRuleVO firewallStaticNatRule : firewallStaticNatRules) { - logger.trace("Marking static nat rule " + firewallStaticNatRule + " with Revoke state"); + logger.trace("Marking static nat rule {} with Revoke state", firewallStaticNatRule); final IpAddress ip = _ipAddressDao.findById(firewallStaticNatRule.getSourceIpAddressId()); final FirewallRuleVO ruleVO = _firewallDao.findById(firewallStaticNatRule.getId()); @@ -4035,10 +4029,10 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal // revoke all firewall rules for the network w/o applying them on the DB final List firewallRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress); - logger.debug("Releasing " + firewallRules.size() + " firewall ingress rules for network id=" + networkId + " as a part of shutdownNetworkRules"); + logger.debug("Releasing firewall ingress rules for network id={} as a part of shutdownNetworkRules", firewallRules.size(), networkId); for (final FirewallRuleVO firewallRule : firewallRules) { - logger.trace("Marking firewall ingress rule " + firewallRule + " with Revoke state"); + logger.trace("Marking firewall ingress rule {} with Revoke state", firewallRule); firewallRule.setState(FirewallRule.State.Revoke); } @@ -4053,7 +4047,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal } final List firewallEgressRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Egress); - logger.debug("Releasing " + firewallEgressRules.size() + " firewall egress rules for network id=" + networkId + " as a part of shutdownNetworkRules"); + logger.debug("Releasing {} firewall egress rules for network id={} as a part of shutdownNetworkRules", firewallEgressRules.size(), networkId); try { // delete default egress rule @@ -4070,7 +4064,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal } for (final FirewallRuleVO firewallRule : firewallEgressRules) { - logger.trace("Marking firewall egress rule " + firewallRule + " with Revoke state"); + logger.trace("Marking firewall egress rule {} with Revoke state", firewallRule); firewallRule.setState(FirewallRule.State.Revoke); } @@ -4085,7 +4079,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal } if (network.getVpcId() != null) { - logger.debug("Releasing Network ACL Items for network id=" + networkId + " as a part of shutdownNetworkRules"); + logger.debug("Releasing Network ACL Items for network id={} as a part of shutdownNetworkRules", networkId); try { //revoke all Network ACLs for the network w/o applying them in the DB @@ -4102,7 +4096,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal //release all static nats for the network if (!_rulesMgr.applyStaticNatForNetwork(networkId, false, caller, true)) { - logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network id " + networkId); + logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network id {}", networkId); success = false; } @@ -4119,7 +4113,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal try { if (!_ipAddrMgr.applyIpAssociations(network, true, true, publicIpsToRelease)) { - logger.warn("Unable to apply ip address associations for " + network + " as a part of shutdownNetworkRules"); + logger.warn("Unable to apply ip address associations for {} as a part of shutdownNetworkRules", network); success = false; } } catch (final ResourceUnavailableException e) { @@ -4172,7 +4166,7 @@ public void processConnect(final Host host, final StartupCommand cmd, final bool dcId = dc.getId(); final HypervisorType hypervisorType = startup.getHypervisorType(); - logger.debug("Host's hypervisorType is: " + hypervisorType); + logger.debug("Host's hypervisorType is: {}", hypervisorType); final List networkInfoList = new ArrayList(); @@ -4205,12 +4199,12 @@ public void processConnect(final Host host, final StartupCommand cmd, final bool final CheckNetworkAnswer answer = (CheckNetworkAnswer) _agentMgr.easySend(hostId, nwCmd); if (answer == null) { - logger.warn("Unable to get an answer to the CheckNetworkCommand from agent:" + host.getId()); + logger.warn("Unable to get an answer to the CheckNetworkCommand from agent: {}", host.getId()); throw new ConnectionException(true, "Unable to get an answer to the CheckNetworkCommand from agent: " + host.getId()); } if (!answer.getResult()) { - logger.warn("Unable to setup agent " + hostId + " due to " + answer.getDetails()); + logger.warn("Unable to setup agent {} due to {}", hostId, answer.getDetails()); final String msg = "Incorrect Network setup on agent, Reinitialize agent after network names are setup, details : " + answer.getDetails(); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, host.getPodId(), msg, msg); throw new ConnectionException(true, msg); @@ -4359,18 +4353,18 @@ public NicProfile createNicForVm(final Network network, final NicProfile request final VMNetworkMapVO vno = new VMNetworkMapVO(vm.getId(), network.getId()); _vmNetworkMapDao.persist(vno); } - logger.debug("Nic is allocated successfully for vm " + vm + " in network " + network); + logger.debug("Nic is allocated successfully for vm {} in network {}", vm, network); } //2) prepare nic if (prepare) { final Pair implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter); if (implemented == null || implemented.first() == null) { - logger.warn("Failed to implement network id=" + nic.getNetworkId() + " as a part of preparing nic id=" + nic.getId()); + logger.warn("Failed to implement network id={} as a part of preparing nic id={}", nic.getNetworkId(), nic.getId()); throw new CloudRuntimeException("Failed to implement network id=" + nic.getNetworkId() + " as a part preparing nic id=" + nic.getId()); } nic = prepareNic(vmProfile, dest, context, nic.getId(), implemented.second()); - logger.debug("Nic is prepared successfully for vm " + vm + " in network " + network); + logger.debug("Nic is prepared successfully for vm {} in network {}", vm, network); } return nic; @@ -4478,18 +4472,18 @@ protected List getElementForServiceInNetwork(final Network netwo final List providers = getProvidersForServiceInNetwork(network, service); //Only support one provider now if (providers == null) { - logger.error("Cannot find " + service.getName() + " provider for network " + network.getId()); + logger.error("Cannot find {} provider for network {}", service.getName(), network.getId()); return null; } if (providers.size() != 1 && service != Service.Lb) { //support more than one LB providers only - logger.error("Found " + providers.size() + " " + service.getName() + " providers for network!" + network.getId()); + logger.error("Found {} {} providers for network! {}", providers.size(), service.getName(), network.getId()); return null; } for (final Provider provider : providers) { final NetworkElement element = _networkModel.getElementImplementingProvider(provider.getName()); - logger.info("Let " + element.getName() + " handle " + service.getName() + " in network " + network.getId()); + logger.info("Let {} handle {} in network {}", element.getName(), service.getName(), network.getId()); elements.add(element); } return elements; @@ -4583,7 +4577,7 @@ public NicVO savePlaceholderNic(final Network network, final String ip4Address, @Override public Pair importNic(final String macAddress, int deviceId, final Network network, final Boolean isDefaultNic, final VirtualMachine vm, final Network.IpAddresses ipAddresses, final DataCenter dataCenter, final boolean forced) throws ConcurrentOperationException, InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException { - logger.debug("Allocating nic for vm " + vm.getUuid() + " in network " + network + " during import"); + logger.debug("Allocating nic for vm {} in network {} during import", vm.getUuid(), network); String selectedIp = null; if (ipAddresses != null && StringUtils.isNotEmpty(ipAddresses.getIp4Address())) { if (ipAddresses.getIp4Address().equals("auto")) { @@ -4627,7 +4621,7 @@ public NicVO doInTransaction(TransactionStatus status) { int count = 1; if (vo.getVmType() == VirtualMachine.Type.User) { - logger.debug("Changing active number of nics for network id=" + network.getUuid() + " on " + count); + logger.debug("Changing active number of nics for network id={} on {}", network.getUuid(), count); _networksDao.changeActiveNicsBy(network.getId(), count); } if (vo.getVmType() == VirtualMachine.Type.User @@ -4708,7 +4702,7 @@ private String generateNewMacAddressIfForced(Network network, String macAddress, @Override public void unmanageNics(VirtualMachineProfile vm) { - logger.debug("Unmanaging NICs for VM: " + vm.getId()); + logger.debug("Unmanaging NICs for VM: {}", vm.getId()); VirtualMachine virtualMachine = vm.getVirtualMachine(); final List nics = _nicDao.listByVmId(vm.getId()); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index db2cdedb466a..ec5d5efb5cfb 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -267,7 +267,7 @@ public MigrationResponse migrateResources(Long srcImgStoreId, Long destImgStoreI } if (chosenFileForMigration.getPhysicalSize() > storageCapacities.get(destImgStoreId).first()) { - logger.debug("{}: %s too large to be migrated to {}", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid(), destImgStoreId); + logger.debug("{}: {} too large to be migrated to {}", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid(), destImgStoreId); continue; } @@ -302,7 +302,7 @@ protected Pair migrateCompleted(Long destDatastoreId, DataStore boolean success = true; if (destDatastoreId == srcDatastore.getId() && !files.isEmpty()) { if (migrationPolicy == MigrationPolicy.BALANCE) { - logger.debug("Migration completed : data stores have been balanced "); + logger.debug("Migration completed : data stores have been balanced"); if (destDatastoreId == srcDatastore.getId()) { message = "Seems like source datastore has more free capacity than the destination(s)"; } @@ -353,7 +353,7 @@ protected Map> migrateAway( task.setTemplateChain(templateChains); } futures.add((executor.submit(task))); - logger.debug(String.format("Migration of %s: %s is initiated. ", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid())); + logger.debug(String.format("Migration of {}: {} is initiated.", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid())); return storageCapacities; } @@ -510,10 +510,10 @@ private boolean storageCapacityBelowThreshold(Map> storag Pair imageStoreCapacity = storageCapacities.get(destStoreId); long usedCapacity = imageStoreCapacity.second() - imageStoreCapacity.first(); if (imageStoreCapacity != null && (usedCapacity / (imageStoreCapacity.second() * 1.0)) <= CapacityManager.SecondaryStorageCapacityThreshold.value()) { - logger.debug("image store: " + destStoreId + " has sufficient capacity to proceed with migration of file"); + logger.debug("image store: {} has sufficient capacity to proceed with migration of file.", destStoreId); return true; } - logger.debug("Image store capacity threshold exceeded, migration not possible"); + logger.debug("Image store capacity threshold exceeded, migration not possible."); return false; } From 3f0e309090975759d6dd3a221cf3220ed6a5267a Mon Sep 17 00:00:00 2001 From: Klaus de Freitas Dornsbach Date: Fri, 19 Apr 2024 14:18:18 -0300 Subject: [PATCH 5/6] addressing other comments --- .../engine/orchestration/NetworkOrchestrator.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index e93e984bdb86..04bae7d0c11f 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -3076,7 +3076,7 @@ public boolean shutdownNetwork(final long networkId, final ReservationContext co logger.warn("Unable to acquire lock for the network {} as a part of network shutdown", network); return false; } - logger.debug("Lock is acquired for network " + network + " as a part of network shutdown"); + logger.debug("Lock is acquired for network {} as a part of network shutdown", network); if (network.getState() == Network.State.Allocated) { logger.debug("Network is already shutdown: {}", network); @@ -3257,7 +3257,7 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con // Don't allow to delete network via api call when it has vms assigned to it final int nicCount = getActiveNicsInNetwork(networkId); if (nicCount > 0) { - logger.debug("The network id={} has active Nics, but shouldn't.", networkId"); + logger.debug("The network id={} has active Nics, but shouldn't.", networkId); // at this point we have already determined that there are no active user vms in network // if the op_networks table shows active nics, it's a bug in releasing nics updating op_networks _networksDao.changeActiveNicsBy(networkId, -1 * nicCount); @@ -3882,7 +3882,7 @@ private boolean cleanupNetworkResources(final long networkId, final Account call } catch (final ResourceUnavailableException ex) { success = false; // shouldn't even come here as network is being cleaned up after all network elements are shutdown - logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup due to resourceUnavailable" + networkId, ex); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id={} cleanup due to resourceUnavailable", networkId, ex); } //remove all LB rules for the network From 2e2ec00db4d70b27642be7f4ba748fc5b41e29d8 Mon Sep 17 00:00:00 2001 From: Klaus de Freitas Dornsbach Date: Mon, 22 Apr 2024 10:21:06 -0300 Subject: [PATCH 6/6] fixing typo --- .../cloudstack/engine/orchestration/NetworkOrchestrator.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index 04bae7d0c11f..ba9e5646bb53 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -701,7 +701,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { @Override public boolean start() { final int netGcInterval = NumbersUtil.parseInt(_configDao.getValue(NetworkGcInterval.key()), 60); - logger.info("Network Manager will run the NetworkGarbageCollector every '{}' seconds.", netGcInterval"); + logger.info("Network Manager will run the NetworkGarbageCollector every '{}' seconds.", netGcInterval); _executor.scheduleWithFixedDelay(new NetworkGarbageCollector(), netGcInterval, netGcInterval, TimeUnit.SECONDS); return true;