Skip to content

HIVE-29138: Authentication failure when connecting to Kerberized ZooK… #6024

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,8 @@ public class MiniHiveKdc {
public static String AUTHENTICATION_TYPE = "KERBEROS";
private static final String HIVE_METASTORE_SERVICE_PRINCIPAL = "hive";

private final MiniKdc miniKdc;
private final File workDir;
final MiniKdc miniKdc;
final File workDir;
private final Map<String, String> userPrincipals =
new HashMap<String, String>();
private final Properties kdcConf = MiniKdc.createConf();
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hive.minikdc;

import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.util.HashMap;

import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.security.ZooKeeperTokenStore;
import org.apache.hive.jdbc.HiveConnection;
import org.apache.hive.jdbc.miniHS2.MiniHS2;
import org.apache.zookeeper.client.ZooKeeperSaslClient;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.testcontainers.containers.GenericContainer;

import static org.apache.hive.minikdc.TestZooKeeperWithMiniKdc.startZooKeeper;

public class TestZooKeeperHS2HAWithMiniKdc {
private static final String ZK_PRINCIPAL = "zookeeper";
private static MiniHS2 miniHS2 = null;
private static MiniHiveKdc miniKDC;
private static HiveConf conf;
private static GenericContainer<?> zookeeper;

@BeforeClass
public static void setUp() throws Exception {
miniKDC = new MiniHiveKdc();
conf = new HiveConf();
miniKDC.addUserPrincipal(miniKDC.getServicePrincipalForUser(ZK_PRINCIPAL));
zookeeper = startZooKeeper(miniKDC, conf);
DriverManager.setLoginTimeout(0);
conf.set("hive.cluster.delegation.token.store.class", ZooKeeperTokenStore.class.getName());
HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY, true);
HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_ZOOKEEPER_USE_KERBEROS, true);
HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_SERVER2_ACTIVE_PASSIVE_HA_ENABLE, true);
HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_FETCH_TASK_CACHING, false);
miniHS2 = MiniHiveKdc.getMiniHS2WithKerb(miniKDC, conf);
miniHS2.start(new HashMap<String, String>());
}

@Test
public void testJdbcConnection() throws Exception {
System.clearProperty(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY);
String url = "jdbc:hive2://localhost:" + zookeeper.getMappedPort(2181) + "/default;" +
"serviceDiscoveryMode=zooKeeperHA;zooKeeperNamespace=hs2ActivePassiveHA;principal=hive/[email protected]";
try (Connection con = DriverManager.getConnection(url);
ResultSet rs = con.getMetaData().getCatalogs()) {
Assert.assertFalse(rs.next());
((HiveConnection) con).getDelegationToken("hive", "hive");
}
Assert.assertNotNull(System.getProperty(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY));
}

@AfterClass
public static void tearDown() throws Exception {
try {
if (zookeeper != null) {
zookeeper.stop();
}
} finally {
miniKDC.shutDown();
if (miniHS2 != null && miniHS2.isStarted()) {
miniHS2.stop();
miniHS2.cleanup();
}
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hive.minikdc;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.net.URI;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;

import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.security.ZooKeeperTokenStore;
import org.apache.hive.jdbc.HiveConnection;
import org.apache.hive.jdbc.miniHS2.MiniHS2;
import org.apache.zookeeper.client.ZooKeeperSaslClient;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.testcontainers.containers.BindMode;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.wait.strategy.Wait;
import org.testcontainers.utility.DockerImageName;

public class TestZooKeeperWithMiniKdc {
private static final String ZK_PRINCIPAL = "zookeeper";
private static MiniHS2 miniHS2 = null;
private static MiniHiveKdc miniKDC;
private static HiveConf conf;
private static GenericContainer<?> zookeeper;

@BeforeClass
public static void setUp() throws Exception {
miniKDC = new MiniHiveKdc();
conf = new HiveConf();
miniKDC.addUserPrincipal(miniKDC.getServicePrincipalForUser(ZK_PRINCIPAL));
zookeeper = startZooKeeper(miniKDC, conf);
String hiveMetastorePrincipal =
miniKDC.getFullyQualifiedServicePrincipal(miniKDC.getHiveMetastoreServicePrincipal());
String hiveMetastoreKeytab = miniKDC.getKeyTabFile(
miniKDC.getServicePrincipalForUser(miniKDC.getHiveMetastoreServicePrincipal()));
MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.USE_THRIFT_SASL, true);
conf.set("hive.metastore.kerberos.principal", hiveMetastorePrincipal);
conf.set("hive.metastore.kerberos.keytab.file", hiveMetastoreKeytab);
conf.set("hive.cluster.delegation.token.store.class", ZooKeeperTokenStore.class.getName());
MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.EXECUTE_SET_UGI, false);
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.THRIFT_SERVICE_DISCOVERY_MODE, "zookeeper");
MetastoreConf.setVar(conf, MetastoreConf.ConfVars.THRIFT_BIND_HOST, "localhost");

DriverManager.setLoginTimeout(0);
HiveConf.setVar(conf, HiveConf.ConfVars.METASTORE_URIS, "localhost:" + zookeeper.getMappedPort(2181));
HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY, true);
HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_ZOOKEEPER_USE_KERBEROS, true);
HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_FETCH_TASK_CACHING, false);
miniHS2 = MiniHiveKdc.getMiniHS2WithKerbWithRemoteHMSWithKerb(miniKDC, conf);
miniHS2.start(new HashMap<String, String>());
}

@Test
public void testMetaStoreClient() throws Exception {
System.clearProperty(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY);
Assert.assertEquals("localhost:" + zookeeper.getMappedPort(2181),
MetastoreConf.getVar(conf, MetastoreConf.ConfVars.THRIFT_URIS));
try (HiveMetaStoreClient client = new HiveMetaStoreClient(conf)) {
URI[] uris = client.getThriftClient().getMetastoreUris();
Assert.assertEquals(1, uris.length);
Assert.assertEquals(miniHS2.getHmsPort(), uris[0].getPort());
client.addMasterKey("adbcedfghigklmn");
}
Assert.assertNotNull(System.getProperty(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY));
}

@Test
public void testJdbcConnection() throws Exception {
System.clearProperty(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY);
String url = "jdbc:hive2://localhost:" + zookeeper.getMappedPort(2181) + "/default;" +
"serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2;principal=hive/[email protected]";
try (Connection con = DriverManager.getConnection(url);
ResultSet rs = con.getMetaData().getCatalogs()) {
Assert.assertFalse(rs.next());
((HiveConnection) con).getDelegationToken("hive", "hive");
}
Assert.assertNotNull(System.getProperty(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY));
}

static GenericContainer<?>
startZooKeeper(MiniHiveKdc miniKDC, HiveConf conf) throws Exception {
Pair<File, int[]> krb5Conf = forkNewKrbConf(miniKDC);
GenericContainer<?> zookeeper = new GenericContainer<>(DockerImageName.parse("zookeeper:3.8.4"))
.withExposedPorts(2181)
.waitingFor(Wait.forLogMessage(".*binding to port.*2181.*\\n", 1))
.withEnv("JVMFLAGS", "-Djava.security.auth.login.config=/conf/jaas.conf")
.withEnv("ZOO_CFG_EXTRA", "authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider" +
" sessionRequireClientSASLAuth=true")
.withFileSystemBind(miniKDC.getKeyTabFile(miniKDC.getServicePrincipalForUser(ZK_PRINCIPAL)),
"/conf/zookeeper.keytab", BindMode.READ_ONLY)
.withFileSystemBind(TestZooKeeperWithMiniKdc.class.getClassLoader().getResource("zk_jaas.conf").getPath(),
"/conf/jaas.conf", BindMode.READ_ONLY)
.withFileSystemBind(krb5Conf.getLeft().getPath(), "/etc/krb5.conf", BindMode.READ_ONLY);
if (krb5Conf.getRight().length > 0) {
org.testcontainers.Testcontainers.exposeHostPorts(krb5Conf.getRight());
}
zookeeper.start();
HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_ZOOKEEPER_QUORUM, "localhost:" + zookeeper.getMappedPort(2181));
return zookeeper;
}

private static Pair<File, int[]> forkNewKrbConf(MiniHiveKdc miniKDC) throws Exception {
File krb5 = miniKDC.miniKdc.getKrb5conf();
File newKrb5 = new File(miniKDC.workDir, krb5.getName() + "_new");
List<Integer> hostPorts = new ArrayList<>();
try (BufferedReader reader = new BufferedReader(new FileReader(krb5));
FileWriter writer = new FileWriter(newKrb5, false)) {
String line;
String localhost = "localhost:";
while ((line = reader.readLine()) != null) {
if (line.contains(localhost)) {
hostPorts.add(Integer.valueOf(line.split(localhost)[1]));
line = line.replace("localhost", "host.testcontainers.internal");
}
writer.write(line);
writer.write(System.lineSeparator());
}
writer.flush();
}
int[] ports = new int[hostPorts.size()];
for (int i = 0; i < hostPorts.size(); i++) {
ports[i] = hostPorts.get(i);
}
return Pair.of(newKrb5, ports);
}

@AfterClass
public static void tearDown() throws Exception {
try {
if (zookeeper != null) {
zookeeper.stop();
}
} finally {
miniKDC.shutDown();
if (miniHS2 != null && miniHS2.isStarted()) {
miniHS2.stop();
miniHS2.cleanup();
}
}
}
}
8 changes: 8 additions & 0 deletions itests/hive-minikdc/src/test/resources/zk_jaas.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
Server {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
keyTab="/conf/zookeeper.keytab"
storeKey=true
useTicketCache=false
principal="zookeeper/[email protected]";
};
2 changes: 1 addition & 1 deletion jdbc/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@
<include>org/apache/hadoop/security/token/**</include>
<include>org/apache/hadoop/util/*</include>
<include>org/apache/hadoop/net/*</include>
<include>org/apache/hadoop/io/*</include>
<include>org/apache/hadoop/io/**</include>
<include>org/apache/hadoop/HadoopIllegalArgumentException.class</include>
</includes>
</filter>
Expand Down
35 changes: 19 additions & 16 deletions jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
import org.apache.hadoop.hive.common.SSLZookeeperFactory;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.common.IPStackUtils;
import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
import org.apache.hive.jdbc.Utils.JdbcConnectionParams;
import org.apache.hive.service.server.HS2ActivePassiveHARegistry;
import org.apache.hive.service.server.HS2ActivePassiveHARegistryClient;
Expand Down Expand Up @@ -70,8 +71,7 @@ private static String getZooKeeperNamespace(JdbcConnectionParams connParams) {
*/
public static boolean isZkHADynamicDiscoveryMode(Map<String, String> sessionConf) {
final String discoveryMode = sessionConf.get(JdbcConnectionParams.SERVICE_DISCOVERY_MODE);
return (discoveryMode != null) &&
JdbcConnectionParams.SERVICE_DISCOVERY_MODE_ZOOKEEPER_HA.equalsIgnoreCase(discoveryMode);
return JdbcConnectionParams.SERVICE_DISCOVERY_MODE_ZOOKEEPER_HA.equalsIgnoreCase(discoveryMode);
}

/**
Expand All @@ -82,9 +82,14 @@ public static boolean isZkHADynamicDiscoveryMode(Map<String, String> sessionConf
*/
public static boolean isZkDynamicDiscoveryMode(Map<String, String> sessionConf) {
final String discoveryMode = sessionConf.get(JdbcConnectionParams.SERVICE_DISCOVERY_MODE);
return (discoveryMode != null)
&& (JdbcConnectionParams.SERVICE_DISCOVERY_MODE_ZOOKEEPER.equalsIgnoreCase(discoveryMode) ||
JdbcConnectionParams.SERVICE_DISCOVERY_MODE_ZOOKEEPER_HA.equalsIgnoreCase(discoveryMode));
return JdbcConnectionParams.SERVICE_DISCOVERY_MODE_ZOOKEEPER.equalsIgnoreCase(discoveryMode) ||
JdbcConnectionParams.SERVICE_DISCOVERY_MODE_ZOOKEEPER_HA.equalsIgnoreCase(discoveryMode);
}

static boolean isZkEnforceSASLClient(Map<String, String> sessionVars) {
HiveConf.ConfVars confVars = HiveConf.ConfVars.HIVE_ZOOKEEPER_USE_KERBEROS;
return sessionVars.containsKey(JdbcConnectionParams.AUTH_PRINCIPAL) &&
Boolean.parseBoolean(sessionVars.getOrDefault(confVars.varname, confVars.getDefaultValue()));
}

/**
Expand Down Expand Up @@ -128,6 +133,11 @@ private static CuratorFramework getZkClient(JdbcConnectionParams connParams) thr
connParams.getZookeeperTrustStoreLocation(),
connParams.getZookeeperTrustStorePassword(), connParams.getZookeeperTrustStoreType()))
.build();

// If the client is requesting the Kerberos, then the ZooKeeper is mostly Kerberos-secured
if (isZkEnforceSASLClient(connParams.getSessionVars())) {
SecurityUtils.setZookeeperClientKerberosJaasConfig(null, null);
}
zooKeeperClient.start();
return zooKeeperClient;
}
Expand Down Expand Up @@ -195,38 +205,31 @@ static void configureConnParams(JdbcConnectionParams connParams) throws ZooKeepe
if (isZkHADynamicDiscoveryMode(connParams.getSessionVars())) {
configureConnParamsHA(connParams);
} else {
CuratorFramework zooKeeperClient = null;
try {
zooKeeperClient = getZkClient(connParams);

try (CuratorFramework zooKeeperClient = getZkClient(connParams)) {
final List<String> serverHosts = getServerHosts(connParams, zooKeeperClient);

if (serverHosts.isEmpty()) {
throw new ZooKeeperHiveClientException("No more HiveServer2 URIs from ZooKeeper to attempt");
}

// Pick a server node randomly
final String serverNode = serverHosts.get(ThreadLocalRandom.current().nextInt(serverHosts.size()));

updateParamsWithZKServerNode(connParams, zooKeeperClient, serverNode);
} catch (ZooKeeperHiveClientException zkhce) {
throw zkhce;
} catch (Exception e) {
throw new ZooKeeperHiveClientException("Unable to read HiveServer2 configs from ZooKeeper", e);
} finally {
if (zooKeeperClient != null) {
zooKeeperClient.close();
}
}
}
}

private static void configureConnParamsHA(JdbcConnectionParams connParams) throws ZooKeeperHiveClientException {
try {

Configuration registryConf = new Configuration();
registryConf.set(HiveConf.ConfVars.HIVE_ZOOKEEPER_QUORUM.varname, connParams.getZooKeeperEnsemble());
registryConf.set(HiveConf.ConfVars.HIVE_SERVER2_ACTIVE_PASSIVE_HA_REGISTRY_NAMESPACE.varname,
getZooKeeperNamespace(connParams));
registryConf.setBoolean(HiveConf.ConfVars.HIVE_ZOOKEEPER_USE_KERBEROS.varname,
isZkEnforceSASLClient(connParams.getSessionVars()));
HS2ActivePassiveHARegistry haRegistryClient = HS2ActivePassiveHARegistryClient.getClient(registryConf);
boolean foundLeader = false;
String maxRetriesConf = connParams.getSessionVars().get(JdbcConnectionParams.RETRIES);
Expand Down
Loading
Loading