Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,9 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
Expand Down Expand Up @@ -380,7 +382,7 @@ public Integer call() {
// /////////// Write out the container-tokens in the nmPrivate space.
try (DataOutputStream tokensOutStream =
lfs.create(nmPrivateTokensPath, EnumSet.of(CREATE, OVERWRITE))) {
Credentials creds = container.getCredentials();
Credentials creds = getEffectiveCredentials(container);
creds.writeTokenStorageToStream(tokensOutStream);
}
// /////////// End of writing out container-tokens
Expand Down Expand Up @@ -1827,6 +1829,29 @@ private void recordContainerCsiVolumesRootDir(ContainerId containerId,
// TODO persistent to the NM store...
}

/**
* Get the effective credentials for writing the container token file.
* In secure mode, if the RM has pushed updated system credentials
* (e.g., renewed HDFS delegation tokens) for this application, they
* are merged into the container's credentials. This mirrors the
* behavior in ResourceLocalizationService.writeCredentials().
*/
@VisibleForTesting
Credentials getEffectiveCredentials(Container cntr) {
Credentials creds = cntr.getCredentials();
if (UserGroupInformation.isSecurityEnabled()) {
ApplicationId appId =
cntr.getContainerId().getApplicationAttemptId().getApplicationId();
Credentials systemCreds =
context.getSystemCredentialsForApps().get(appId);
if (systemCreds != null) {
creds = new Credentials(creds);
creds.addAll(systemCreds);
}
}
return creds;
}

protected Path getContainerWorkDir() throws IOException {
String containerWorkDir = container.getWorkDir();
if (containerWorkDir == null
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2699,4 +2699,99 @@ public Void answer(InvocationOnMock invocation) throws Throwable {
assertEquals(testVal2Expanded, env.get(testKey2));
}

/**
* Test that ContainerLaunch.getEffectiveCredentials() merges system
* credentials (renewed HDFS delegation tokens pushed by RM via
* heartbeat) into the container's credentials.
*
* Without the fix, getEffectiveCredentials() would just return the
* container's original (stale) credentials. With the fix, it merges
* in the updated tokens from NMContext.getSystemCredentialsForApps().
*/
@Test
@Timeout(value = 30)
public void testContainerLaunchUsesSystemCredentials() throws Exception {
ApplicationId appId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
ContainerId cId = ContainerId.newContainerId(appAttemptId, 0);

org.apache.hadoop.io.Text tokenAlias =
new org.apache.hadoop.io.Text("hdfs-dt");

// Old HDFS token — what the container was created with
org.apache.hadoop.security.token.Token<? extends
org.apache.hadoop.security.token.TokenIdentifier> oldToken =
new org.apache.hadoop.security.token.Token<>(
"old-id".getBytes(), "old-pass".getBytes(),
new org.apache.hadoop.io.Text("HDFS_DELEGATION_TOKEN"),
new org.apache.hadoop.io.Text("hdfs-service"));

// New HDFS token — pushed by RM after token replacement
org.apache.hadoop.security.token.Token<? extends
org.apache.hadoop.security.token.TokenIdentifier> newToken =
new org.apache.hadoop.security.token.Token<>(
"new-id".getBytes(), "new-pass".getBytes(),
new org.apache.hadoop.io.Text("HDFS_DELEGATION_TOKEN"),
new org.apache.hadoop.io.Text("hdfs-service"));

// Container credentials with old token
Credentials containerCreds = new Credentials();
containerCreds.addToken(tokenAlias, oldToken);

// Mock container
Container container = mock(Container.class);
when(container.getContainerId()).thenReturn(cId);
when(container.getCredentials()).thenReturn(containerCreds);

// Simulate RM pushing renewed credentials via heartbeat
Credentials systemCreds = new Credentials();
systemCreds.addToken(tokenAlias, newToken);
Map<ApplicationId, Credentials> systemCredsMap = new HashMap<>();
systemCredsMap.put(appId, systemCreds);
((NMContext) context).setSystemCrendentialsForApps(systemCredsMap);

// Enable security so the merge path is triggered
Configuration secureConf = new Configuration(conf);
secureConf.set(
org.apache.hadoop.fs.CommonConfigurationKeysPublic
.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
org.apache.hadoop.security.UserGroupInformation
.setConfiguration(secureConf);

try {
assertTrue(
org.apache.hadoop.security.UserGroupInformation.isSecurityEnabled(),
"Security should be enabled for this test");

// Create a real ContainerLaunch and call getEffectiveCredentials
Application app = mock(Application.class);
when(app.getAppId()).thenReturn(appId);
Dispatcher dispatcher = mock(Dispatcher.class);
ContainerLaunch launch = new ContainerLaunch(context, secureConf,
dispatcher, exec, app, container, dirsHandler, containerManager);

Credentials effective = launch.getEffectiveCredentials(container);

// The effective credentials should contain the NEW token
org.apache.hadoop.security.token.Token<? extends
org.apache.hadoop.security.token.TokenIdentifier> resultToken =
effective.getToken(tokenAlias);
assertNotNull(resultToken,
"HDFS token should exist in effective credentials");
assertEquals("new-id", new String(resultToken.getIdentifier()),
"Effective credentials should contain the renewed HDFS token "
+ "from system credentials, not the stale original");

// Original container credentials should NOT be modified
org.apache.hadoop.security.token.Token<? extends
org.apache.hadoop.security.token.TokenIdentifier> origToken =
containerCreds.getToken(tokenAlias);
assertEquals("old-id", new String(origToken.getIdentifier()),
"Original container credentials must not be modified");
} finally {
org.apache.hadoop.security.UserGroupInformation.setConfiguration(conf);
}
}

}