Skip to content
This repository has been archived by the owner on Jan 8, 2024. It is now read-only.

Commit

Permalink
CLOUD-10637 Install PostgreSQL and use with Hive Metastore by default…
Browse files Browse the repository at this point in the history
… with Ambari 2.6.1
  • Loading branch information
lacikaaa authored and biharitomi committed Jan 31, 2018
1 parent cb10f86 commit 91b4450
Show file tree
Hide file tree
Showing 23 changed files with 558 additions and 34 deletions.
29 changes: 18 additions & 11 deletions cloud-common/src/main/resources/application.yml
Original file line number Diff line number Diff line change
Expand Up @@ -117,10 +117,17 @@ cb:
nginx:
port: 9443

hive.database:
user: hive
password: hiveSecurePassword
db: hive
port: 5432
host: localhost

ambari:
repo:
version: 2.6.0.0
baseurl: http://public-repo-1.hortonworks.com/ambari/centos6/2.x/updates/2.6.0.0
version: 2.6.1.0
baseurl: http://public-repo-1.hortonworks.com/ambari/centos6/2.x/updates/2.6.1.0
gpgkey: http://public-repo-1.hortonworks.com/ambari/centos6/RPM-GPG-KEY/RPM-GPG-KEY-Jenkins
database:
vendor: embedded
Expand All @@ -147,19 +154,19 @@ cb:
redhat6: http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6
redhat7: http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos7
2.6:
version: 2.6.3.0
version: 2.6.4.0
repo:
stack:
repoid: HDP-2.6
redhat6: http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.6.3.0
redhat7: http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.6.3.0
repository-version: 2.6.3.0-235
vdf-redhat6: http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.6.3.0/HDP-2.6.3.0-235.xml
vdf-redhat7: http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.6.3.0/HDP-2.6.3.0-235.xml
redhat6: http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.6.4.0
redhat7: http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.6.4.0
repository-version: 2.6.4.0-91
vdf-redhat6: http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.6.4.0/HDP-2.6.4.0-91.xml
vdf-redhat7: http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.6.4.0/HDP-2.6.4.0-91.xml
util:
repoid: HDP-UTILS-1.1.0.21
redhat6: http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6
redhat7: http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos7
repoid: HDP-UTILS-1.1.0.22
redhat6: http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.22/repos/centos6
redhat7: http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.22/repos/centos7

hdf:
entries:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
import com.sequenceiq.cloudbreak.service.blueprint.ComponentLocatorService;
import com.sequenceiq.cloudbreak.service.cluster.AmbariSecurityConfigProvider;
import com.sequenceiq.cloudbreak.service.cluster.flow.blueprint.BlueprintProcessor;
import com.sequenceiq.cloudbreak.service.cluster.flow.blueprint.HiveConfigProvider;
import com.sequenceiq.cloudbreak.service.cluster.flow.kerberos.KerberosDetailService;

@Component
Expand Down Expand Up @@ -92,6 +93,9 @@ public class ClusterHostServiceRunner {
@Inject
private KerberosDetailService kerberosDetailService;

@Inject
private HiveConfigProvider hiveConfigProvider;

@Transactional
public void runAmbariServices(Stack stack, Cluster cluster) throws CloudbreakException {
try {
Expand Down Expand Up @@ -156,6 +160,8 @@ private SaltConfig createSaltConfig(Stack stack, Cluster cluster, GatewayConfig
servicePillar.put("smartsense-credentials", new SaltPillarProperties("/smartsense/credentials.sls", smartSenseCredentials));
}

hiveConfigProvider.decorateServicePillarWithPostgresIfNeeded(servicePillar, cluster.getBlueprint());

return new SaltConfig(servicePillar, createGrainProperties(gatewayConfigs));
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,9 @@
import com.sequenceiq.cloudbreak.service.PollingResult;
import com.sequenceiq.cloudbreak.service.PollingService;
import com.sequenceiq.cloudbreak.service.TlsSecurityService;
import com.sequenceiq.cloudbreak.service.cluster.AmbariSecurityConfigProvider;
import com.sequenceiq.cloudbreak.service.cluster.AmbariClientProvider;
import com.sequenceiq.cloudbreak.service.cluster.AmbariOperationFailedException;
import com.sequenceiq.cloudbreak.service.cluster.AmbariSecurityConfigProvider;
import com.sequenceiq.cloudbreak.service.cluster.HadoopConfigurationService;
import com.sequenceiq.cloudbreak.service.cluster.flow.blueprint.AutoRecoveryConfigProvider;
import com.sequenceiq.cloudbreak.service.cluster.flow.blueprint.AzureFileSystemConfigProvider;
Expand All @@ -106,6 +106,7 @@
import com.sequenceiq.cloudbreak.service.hostgroup.HostGroupService;
import com.sequenceiq.cloudbreak.service.image.ImageService;
import com.sequenceiq.cloudbreak.service.messages.CloudbreakMessagesService;
import com.sequenceiq.cloudbreak.service.rdsconfig.RdsConfigService;
import com.sequenceiq.cloudbreak.service.stack.StackService;
import com.sequenceiq.cloudbreak.service.stack.flow.AmbariStartupListenerTask;
import com.sequenceiq.cloudbreak.service.stack.flow.AmbariStartupPollerObject;
Expand Down Expand Up @@ -207,6 +208,9 @@ public class AmbariClusterConnector {
@Inject
private RDSConfigProvider rdsConfigProvider;

@Inject
private RdsConfigService rdsConfigService;

@Inject
private ContainerExecutorConfigProvider containerExecutorConfigProvider;

Expand Down Expand Up @@ -275,12 +279,8 @@ public void buildAmbariCluster(Stack stack) {
Map<String, List<Map<String, String>>> hostGroupMappings = buildHostGroupAssociations(hostGroups);

recipeEngine.executePostAmbariStartRecipes(stack, hostGroups);
Set<RDSConfig> rdsConfigs = rdsConfigRepository.findByClusterId(stack.getOwner(), stack.getAccount(), cluster.getId());

String blueprintText = updateBlueprintWithInputs(cluster, cluster.getBlueprint(), rdsConfigs);

FileSystem fs = cluster.getFileSystem();
blueprintText = updateBlueprintConfiguration(stack, blueprintText, rdsConfigs, fs);
String blueprintText = generateBlueprintText(stack, cluster);

AmbariClient ambariClient = getAmbariClient(stack);

Expand Down Expand Up @@ -317,6 +317,18 @@ public void buildAmbariCluster(Stack stack) {
}
}

private String generateBlueprintText(Stack stack, Cluster cluster) throws IOException, CloudbreakException {
Blueprint blueprint = cluster.getBlueprint();

Set<RDSConfig> rdsConfigs = rdsConfigProvider.createPostgresRdsConfigIfNeeded(stack, cluster, blueprint);

String blueprintText = updateBlueprintWithInputs(cluster, blueprint, rdsConfigs);

FileSystem fs = cluster.getFileSystem();
blueprintText = updateBlueprintConfiguration(stack, blueprintText, rdsConfigs, fs);
return blueprintText;
}

public void prepareClusterToDekerberizing(Long stackId) {
try {
Stack stack = stackService.getByIdWithLists(stackId);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,4 +24,6 @@ public interface BlueprintProcessor {
String addComponentToHostgroups(String component, Collection<String> hostGroupNames, String blueprintText);

Map<String, Set<String>> getComponentsByHostGroup(String blueprintText);

boolean hivaDatabaseConfigurationExistsInBlueprint(String blueprintText);
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
package com.sequenceiq.cloudbreak.service.cluster.flow.blueprint;

import static java.util.Collections.singletonMap;

import java.util.HashMap;
import java.util.Map;

import javax.inject.Inject;

import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;

import com.sequenceiq.cloudbreak.domain.Blueprint;
import com.sequenceiq.cloudbreak.orchestrator.model.SaltPillarProperties;

@Component
public class HiveConfigProvider {
@Value("${cb.hive.database.user:hive}")
private String hiveDbUser;

@Value("${cb.hive.database.password:hive}")
private String hiveDbPassword;

@Value("${cb.hive.database.db:hive}")
private String hiveDb;

@Value("${cb.hive.database.port:5432}")
private String hiveDbPort;

@Value("${cb.hive.database.host:localhost}")
private String hiveDbHost;

@Inject
private BlueprintProcessor blueprintProcessor;

@Inject
private HiveConfigProvider hiveConfigProvider;

public String getHiveDbUser() {
return hiveDbUser;
}

public String getHiveDbPassword() {
return hiveDbPassword;
}

public String getHiveDb() {
return hiveDb;
}

public String getHiveDbPort() {
return hiveDbPort;
}

public String getHiveDbHost() {
return hiveDbHost;
}

public boolean isRdsConfigNeedForHiveMetastore(Blueprint blueprint) {
return blueprintProcessor.componentExistsInBlueprint("HIVE_METASTORE", blueprint.getBlueprintText())
&& !blueprintProcessor.componentExistsInBlueprint("MYSQL_SERVER", blueprint.getBlueprintText())
&& !blueprintProcessor.hivaDatabaseConfigurationExistsInBlueprint(blueprint.getBlueprintText());
}

public void decorateServicePillarWithPostgresIfNeeded(Map<String, SaltPillarProperties> servicePillar, Blueprint blueprint) {
if (hiveConfigProvider.isRdsConfigNeedForHiveMetastore(blueprint)) {
Map<String, Object> postgres = new HashMap<>();
postgres.put("database", hiveConfigProvider.getHiveDb());
postgres.put("user", hiveConfigProvider.getHiveDbUser());
postgres.put("password", hiveConfigProvider.getHiveDbPassword());
servicePillar.put("postgresql-server", new SaltPillarProperties("/postgresql/postgre.sls", singletonMap("postgres", postgres)));
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,14 @@
@Component
public class JacksonBlueprintProcessor implements BlueprintProcessor {

public static final String JAVAX_JDO_OPTION_CONNECTION_URL = "javax.jdo.option.ConnectionURL";

public static final String JAVAX_JDO_OPTION_CONNECTION_DRIVER_NAME = "javax.jdo.option.ConnectionDriverName";

public static final String JAVAX_JDO_OPTION_CONNECTION_USER_NAME = "javax.jdo.option.ConnectionUserName";

public static final String JAVAX_JDO_OPTION_CONNECTION_PASSWORD = "javax.jdo.option.ConnectionPassword";

private static final String CONFIGURATIONS_NODE = "configurations";

private static final String SETTINGS_NODE = "settings";
Expand All @@ -29,6 +37,8 @@ public class JacksonBlueprintProcessor implements BlueprintProcessor {

private static final String STACK_VERSION = "stack_version";

private static final String HIVE_SITE = "hive-site";

@Override
public String addConfigEntries(String originalBlueprint, List<BlueprintConfigurationEntry> configurationEntries, boolean override) {
try {
Expand Down Expand Up @@ -224,6 +234,25 @@ public String addComponentToHostgroups(String component, Collection<String> host
}
}

public boolean hivaDatabaseConfigurationExistsInBlueprint(String blueprintText) {
try {
ObjectNode root = (ObjectNode) JsonUtil.readTree(blueprintText);
JsonNode configurationsNode = root.path(CONFIGURATIONS_NODE);
if (configurationsNode.isArray()) {
ArrayNode arrayConfNode = (ArrayNode) configurationsNode;
JsonNode hiveSite = arrayConfNode.findValue(HIVE_SITE);
return hiveSite != null
&& hiveSite.findValue(JAVAX_JDO_OPTION_CONNECTION_URL) != null
&& hiveSite.findValue(JAVAX_JDO_OPTION_CONNECTION_DRIVER_NAME) != null
&& hiveSite.findValue(JAVAX_JDO_OPTION_CONNECTION_USER_NAME) != null
&& hiveSite.findValue(JAVAX_JDO_OPTION_CONNECTION_PASSWORD) != null;
}
} catch (IOException e) {
throw new BlueprintProcessingException("Failed to check hiva databse config in blueprint.", e);
}
return false;
}

private boolean componentExistsInHostgroup(String component, JsonNode hostGroupNode) {
boolean componentExists = false;
Iterator<JsonNode> components = hostGroupNode.path("components").elements();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,44 @@


import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

import javax.inject.Inject;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;

import com.sequenceiq.cloudbreak.api.model.RDSDatabase;
import com.sequenceiq.cloudbreak.api.model.ResourceStatus;
import com.sequenceiq.cloudbreak.domain.Blueprint;
import com.sequenceiq.cloudbreak.domain.Cluster;
import com.sequenceiq.cloudbreak.domain.RDSConfig;
import com.sequenceiq.cloudbreak.domain.Stack;
import com.sequenceiq.cloudbreak.repository.ClusterRepository;
import com.sequenceiq.cloudbreak.repository.RdsConfigRepository;
import com.sequenceiq.cloudbreak.service.rdsconfig.RdsConfigService;

@Component
public class RDSConfigProvider {

private static final Logger LOGGER = LoggerFactory.getLogger(RDSConfigProvider.class);

@Inject
private HiveConfigProvider hiveConfigProvider;

@Inject
private RdsConfigService rdsConfigService;

@Inject
private RdsConfigRepository rdsConfigRepository;

@Inject
private ClusterRepository clusterRepository;

public List<BlueprintConfigurationEntry> getConfigs(Set<RDSConfig> rdsConfigs) {
List<BlueprintConfigurationEntry> bpConfigs = new ArrayList<>();
for (RDSConfig rds : rdsConfigs) {
Expand Down Expand Up @@ -40,6 +68,35 @@ public List<BlueprintConfigurationEntry> getConfigs(Set<RDSConfig> rdsConfigs) {
return bpConfigs;
}

public Set<RDSConfig> createPostgresRdsConfigIfNeeded(Stack stack, Cluster cluster, Blueprint blueprint) {
Set<RDSConfig> rdsConfigs = rdsConfigRepository.findByClusterId(stack.getOwner(), stack.getAccount(), cluster.getId());
if (hiveConfigProvider.isRdsConfigNeedForHiveMetastore(blueprint)) {
LOGGER.info("Creating postgres RDSConfig");
RDSConfig rdsConfig = new RDSConfig();
rdsConfig.setName(stack.getName() + stack.getId());
rdsConfig.setConnectionUserName(hiveConfigProvider.getHiveDbUser());
rdsConfig.setConnectionPassword(hiveConfigProvider.getHiveDbPassword());
rdsConfig.setConnectionURL(
"jdbc:postgresql://" + hiveConfigProvider.getHiveDbHost() + ":" + hiveConfigProvider.getHiveDbPort() + "/" + hiveConfigProvider.getHiveDb()
);
rdsConfig.setDatabaseType(RDSDatabase.POSTGRES);
rdsConfig.setStatus(ResourceStatus.DEFAULT);
rdsConfig.setOwner(stack.getOwner());
rdsConfig.setAccount(stack.getAccount());
rdsConfig.setClusters(Collections.singleton(cluster));
rdsConfig = rdsConfigService.create(rdsConfig);

if (rdsConfigs == null) {
rdsConfigs = new HashSet<>();
cluster.setRdsConfigs(rdsConfigs);
}
rdsConfigs.add(rdsConfig);
cluster.setRdsConfigs(rdsConfigs);
clusterRepository.save(cluster);
}
return rdsConfigs;
}

private String parseDatabaseTypeFromJdbcUrl(String jdbcUrl) {
return jdbcUrl.split(":")[1];
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;

import com.google.common.base.Preconditions;
import com.sequenceiq.cloudbreak.api.model.RdsType;
import com.sequenceiq.cloudbreak.api.model.ResourceStatus;
import com.sequenceiq.cloudbreak.common.model.user.IdentityUser;
Expand Down Expand Up @@ -96,6 +97,13 @@ public RDSConfig create(IdentityUser user, RDSConfig rdsConfig) {
return rdsConfigRepository.save(rdsConfig);
}

public RDSConfig create(RDSConfig rdsConfig) {
Preconditions.checkNotNull(rdsConfig.getOwner(), "Owner cannot be null");
Preconditions.checkNotNull(rdsConfig.getAccount(), "Account cannot be null");
LOGGER.debug("Creating RDS configuration: [User: '{}', Account: '{}']", rdsConfig.getOwner(), rdsConfig.getAccount());
return rdsConfigRepository.save(rdsConfig);
}

public RDSConfig createIfNotExists(IdentityUser user, RDSConfig rdsConfig) {
try {
return getPrivateRdsConfig(rdsConfig.getName(), user);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,9 +93,6 @@
{
"name": "METRICS_MONITOR"
},
{
"name": "MYSQL_SERVER"
},
{
"name": "NAMENODE"
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,9 +96,6 @@
{
"name": "METRICS_MONITOR"
},
{
"name": "MYSQL_SERVER"
},
{
"name": "NAMENODE"
},
Expand Down
Loading

0 comments on commit 91b4450

Please sign in to comment.