diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBSimpleQueryIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBSimpleQueryIT.java index 397018804736..f404046d28a3 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBSimpleQueryIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBSimpleQueryIT.java @@ -1054,7 +1054,6 @@ public void testStorageGroupWithHyphenInName() { try (final ResultSet resultSet = statement.executeQuery("SHOW DATABASES DETAILS")) { while (resultSet.next()) { Assert.assertEquals("root.group_with_hyphen", resultSet.getString(1)); - Assert.assertEquals("TREE", resultSet.getString(12)); } } } catch (final SQLException e) { diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBDeleteStorageGroupIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBDeleteStorageGroupIT.java index c751f6f57221..5315bd08c4d3 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBDeleteStorageGroupIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBDeleteStorageGroupIT.java @@ -114,8 +114,8 @@ public void testDeleteMultipleStorageGroupWithQuote() throws Exception { @Test(expected = SQLException.class) public void deleteNonExistStorageGroup() throws Exception { - try (Connection connection = EnvFactory.getEnv().getConnection(); - Statement statement = connection.createStatement()) { + try (final Connection connection = EnvFactory.getEnv().getConnection(); + final Statement statement = connection.createStatement()) { statement.execute("CREATE DATABASE root.ln2.wf01.wt01"); statement.execute("DELETE DATABASE root.ln2.wf01.wt02"); } diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/tablemodel/IoTDBPipeClusterIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/tablemodel/IoTDBPipeClusterIT.java index 83725d76bff8..f30ddebc6988 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/tablemodel/IoTDBPipeClusterIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/tablemodel/IoTDBPipeClusterIT.java @@ -231,7 +231,8 @@ public void testPipeAfterDataRegionLeaderStop() throws Exception { } final AtomicInteger leaderPort = new AtomicInteger(-1); - final TShowRegionResp showRegionResp = client.showRegion(new TShowRegionReq()); + final TShowRegionResp showRegionResp = + client.showRegion(new TShowRegionReq().setIsTableModel(true)); showRegionResp .getRegionInfoList() .forEach( diff --git a/integration-test/src/test/java/org/apache/iotdb/relational/it/db/it/IoTDBFlushQueryTableIT.java b/integration-test/src/test/java/org/apache/iotdb/relational/it/db/it/IoTDBFlushQueryTableIT.java index d69fbd9892fc..6a1207d91cdf 100644 --- a/integration-test/src/test/java/org/apache/iotdb/relational/it/db/it/IoTDBFlushQueryTableIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/relational/it/db/it/IoTDBFlushQueryTableIT.java @@ -181,15 +181,15 @@ public void testFlushGivenGroupNoData() { @Test @Ignore public void testFlushNotExistGroupNoData() { - try (Connection connection = EnvFactory.getEnv().getConnection(); - Statement statement = connection.createStatement()) { + try (final Connection connection = EnvFactory.getEnv().getConnection(); + final Statement statement = connection.createStatement()) { statement.execute("CREATE DATABASE root.noexist.nodatagroup1"); try { statement.execute( "FLUSH root.noexist.nodatagroup1,root.notExistGroup1,root.notExistGroup2"); - } catch (SQLException sqe) { + } catch (final SQLException sqe) { String expectedMsg = - "322: 322: storageGroup root.notExistGroup1,root.notExistGroup2 does not exist"; + "322: 322: Database root.notExistGroup1,root.notExistGroup2 does not exist"; sqe.printStackTrace(); assertTrue(sqe.getMessage().contains(expectedMsg)); } diff --git a/integration-test/src/test/java/org/apache/iotdb/relational/it/schema/IoTDBDatabaseIT.java b/integration-test/src/test/java/org/apache/iotdb/relational/it/schema/IoTDBDatabaseIT.java index c48f10a7f2fc..493d7487af8a 100644 --- a/integration-test/src/test/java/org/apache/iotdb/relational/it/schema/IoTDBDatabaseIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/relational/it/schema/IoTDBDatabaseIT.java @@ -85,7 +85,6 @@ public void testManageDatabase() { int[] schemaReplicaFactors = new int[] {1}; int[] dataReplicaFactors = new int[] {1}; int[] timePartitionInterval = new int[] {604800000}; - String[] model = new String[] {"TABLE"}; // show try (final ResultSet resultSet = statement.executeQuery("SHOW DATABASES")) { @@ -106,6 +105,8 @@ public void testManageDatabase() { assertEquals(databaseNames.length, cnt); } + final int[] schemaRegionGroupNum = new int[] {0}; + final int[] dataRegionGroupNum = new int[] {0}; // show try (final ResultSet resultSet = statement.executeQuery("SHOW DATABASES DETAILS")) { int cnt = 0; @@ -121,7 +122,8 @@ public void testManageDatabase() { assertEquals(schemaReplicaFactors[cnt], resultSet.getInt(3)); assertEquals(dataReplicaFactors[cnt], resultSet.getInt(4)); assertEquals(timePartitionInterval[cnt], resultSet.getLong(5)); - assertEquals(model[cnt], resultSet.getString(6)); + assertEquals(schemaRegionGroupNum[cnt], resultSet.getInt(6)); + assertEquals(dataRegionGroupNum[cnt], resultSet.getInt(7)); cnt++; } assertEquals(databaseNames.length, cnt); @@ -305,4 +307,49 @@ public void testDatabaseWithSpecificCharacters() throws SQLException { Collections.singleton("1,4,")); } } + + @Test + public void testMixedDatabase() throws SQLException { + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("create database test"); + } + + try (final Connection connection = EnvFactory.getEnv().getConnection(); + final Statement statement = connection.createStatement()) { + statement.execute("create database root.test"); + statement.execute( + "alter database root.test WITH SCHEMA_REGION_GROUP_NUM=2, DATA_REGION_GROUP_NUM=3"); + statement.execute("drop database root.test"); + } + + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + try (final ResultSet resultSet = statement.executeQuery("SHOW DATABASES DETAILS")) { + assertTrue(resultSet.next()); + assertEquals("test", resultSet.getString(1)); + assertEquals(0, resultSet.getInt(6)); + assertEquals(0, resultSet.getInt(7)); + assertFalse(resultSet.next()); + } + } + + try (final Connection connection = EnvFactory.getEnv().getConnection(); + final Statement statement = connection.createStatement()) { + statement.execute("create database root.test"); + } + + try (final Connection connection = + EnvFactory.getEnv().getConnection(BaseEnv.TABLE_SQL_DIALECT); + final Statement statement = connection.createStatement()) { + statement.execute("drop database test"); + } + + try (final Connection connection = EnvFactory.getEnv().getConnection(); + final Statement statement = connection.createStatement()) { + TestUtils.assertResultSetSize(statement.executeQuery("show databases"), 1); + } + } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/database/CountDatabasePlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/database/CountDatabasePlan.java index 4772688c7015..b32fef1a36f1 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/database/CountDatabasePlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/database/CountDatabasePlan.java @@ -30,20 +30,27 @@ public class CountDatabasePlan extends ConfigPhysicalReadPlan { private final String[] storageGroupPattern; private final PathPatternTree scope; + private final boolean isTableModel; - public CountDatabasePlan(final List storageGroupPattern, final PathPatternTree scope) { + public CountDatabasePlan( + final List storageGroupPattern, + final PathPatternTree scope, + final boolean isTableModel) { super(ConfigPhysicalPlanType.CountDatabase); this.storageGroupPattern = storageGroupPattern.toArray(new String[0]); this.scope = scope; + this.isTableModel = isTableModel; } public CountDatabasePlan( final ConfigPhysicalPlanType type, final List storageGroupPattern, - final PathPatternTree scope) { + final PathPatternTree scope, + final boolean isTableModel) { super(type); this.storageGroupPattern = storageGroupPattern.toArray(new String[0]); this.scope = scope; + this.isTableModel = isTableModel; } public String[] getDatabasePattern() { @@ -54,6 +61,10 @@ public PathPatternTree getScope() { return scope; } + public boolean isTableModel() { + return isTableModel; + } + @Override public boolean equals(final Object o) { if (this == o) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/database/GetDatabasePlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/database/GetDatabasePlan.java index 2ba4c4194984..374df7eec0bc 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/database/GetDatabasePlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/database/GetDatabasePlan.java @@ -26,7 +26,10 @@ public class GetDatabasePlan extends CountDatabasePlan { - public GetDatabasePlan(final List storageGroupPathPattern, final PathPatternTree scope) { - super(ConfigPhysicalPlanType.GetDatabase, storageGroupPathPattern, scope); + public GetDatabasePlan( + final List storageGroupPathPattern, + final PathPatternTree scope, + final boolean isTableModel) { + super(ConfigPhysicalPlanType.GetDatabase, storageGroupPathPattern, scope, isTableModel); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/database/AdjustMaxRegionGroupNumPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/database/AdjustMaxRegionGroupNumPlan.java index c6dbd600278a..42b98cc30d03 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/database/AdjustMaxRegionGroupNumPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/database/AdjustMaxRegionGroupNumPlan.java @@ -42,7 +42,7 @@ public AdjustMaxRegionGroupNumPlan() { this.maxRegionGroupNumMap = new HashMap<>(); } - public void putEntry(String storageGroup, Pair maxRegionGroupNum) { + public void putEntry(final String storageGroup, final Pair maxRegionGroupNum) { maxRegionGroupNumMap.put(storageGroup, maxRegionGroupNum); } @@ -51,11 +51,11 @@ public Map> getMaxRegionGroupNumMap() { } @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { + protected void serializeImpl(final DataOutputStream stream) throws IOException { ReadWriteIOUtils.write(getType().getPlanType(), stream); ReadWriteIOUtils.write(maxRegionGroupNumMap.size(), stream); - for (Map.Entry> maxRegionGroupNumEntry : + for (final Map.Entry> maxRegionGroupNumEntry : maxRegionGroupNumMap.entrySet()) { ReadWriteIOUtils.write(maxRegionGroupNumEntry.getKey(), stream); ReadWriteIOUtils.write(maxRegionGroupNumEntry.getValue().getLeft(), stream); @@ -64,27 +64,27 @@ protected void serializeImpl(DataOutputStream stream) throws IOException { } @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - int storageGroupNum = buffer.getInt(); + protected void deserializeImpl(final ByteBuffer buffer) throws IOException { + final int storageGroupNum = buffer.getInt(); for (int i = 0; i < storageGroupNum; i++) { - String storageGroup = ReadWriteIOUtils.readString(buffer); - int maxSchemaRegionGroupNum = buffer.getInt(); - int maxDataRegionGroupNum = buffer.getInt(); + final String storageGroup = ReadWriteIOUtils.readString(buffer); + final int maxSchemaRegionGroupNum = buffer.getInt(); + final int maxDataRegionGroupNum = buffer.getInt(); maxRegionGroupNumMap.put( storageGroup, new Pair<>(maxSchemaRegionGroupNum, maxDataRegionGroupNum)); } } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - AdjustMaxRegionGroupNumPlan that = (AdjustMaxRegionGroupNumPlan) o; + final AdjustMaxRegionGroupNumPlan that = (AdjustMaxRegionGroupNumPlan) o; return maxRegionGroupNumMap.equals(that.maxRegionGroupNumMap); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/quota/SetSpaceQuotaPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/quota/SetSpaceQuotaPlan.java index 525bf4f76653..c5e8c0d3673b 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/quota/SetSpaceQuotaPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/quota/SetSpaceQuotaPlan.java @@ -39,7 +39,7 @@ public SetSpaceQuotaPlan() { super(ConfigPhysicalPlanType.setSpaceQuota); } - public SetSpaceQuotaPlan(List prefixPathList, TSpaceQuota spaceLimit) { + public SetSpaceQuotaPlan(final List prefixPathList, final TSpaceQuota spaceLimit) { super(ConfigPhysicalPlanType.setSpaceQuota); this.prefixPathList = prefixPathList; this.spaceLimit = spaceLimit; diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/region/CreateRegionGroupsPlan.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/region/CreateRegionGroupsPlan.java index 9cdec5f0d904..82677d745811 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/region/CreateRegionGroupsPlan.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/region/CreateRegionGroupsPlan.java @@ -50,7 +50,7 @@ public CreateRegionGroupsPlan() { this.regionGroupMap = new HashMap<>(); } - public CreateRegionGroupsPlan(ConfigPhysicalPlanType type) { + public CreateRegionGroupsPlan(final ConfigPhysicalPlanType type) { super(type); this.regionGroupMap = new HashMap<>(); } @@ -59,16 +59,17 @@ public Map> getRegionGroupMap() { return regionGroupMap; } - public void addRegionGroup(String database, TRegionReplicaSet regionReplicaSet) { + public void addRegionGroup(final String database, final TRegionReplicaSet regionReplicaSet) { regionGroupMap .computeIfAbsent(database, regionReplicaSets -> new ArrayList<>()) .add(regionReplicaSet); } - public void planLog(Logger logger) { - for (Map.Entry> regionGroupEntry : regionGroupMap.entrySet()) { - String database = regionGroupEntry.getKey(); - for (TRegionReplicaSet regionReplicaSet : regionGroupEntry.getValue()) { + public void planLog(final Logger logger) { + for (final Map.Entry> regionGroupEntry : + regionGroupMap.entrySet()) { + final String database = regionGroupEntry.getKey(); + for (final TRegionReplicaSet regionReplicaSet : regionGroupEntry.getValue()) { logger.info( "[CreateRegionGroups] RegionGroup: {}, belonged database: {}, on DataNodes: {}", regionReplicaSet.getRegionId(), @@ -80,24 +81,24 @@ public void planLog(Logger logger) { } } - public void serializeForProcedure(DataOutputStream stream) throws IOException { + public void serializeForProcedure(final DataOutputStream stream) throws IOException { this.serializeImpl(stream); } - public void deserializeForProcedure(ByteBuffer buffer) throws IOException { + public void deserializeForProcedure(final ByteBuffer buffer) throws IOException { // to remove the planType of ConfigPhysicalPlanType buffer.getShort(); this.deserializeImpl(buffer); } @Override - protected void serializeImpl(DataOutputStream stream) throws IOException { + protected void serializeImpl(final DataOutputStream stream) throws IOException { stream.writeShort(getType().getPlanType()); stream.writeInt(regionGroupMap.size()); - for (Entry> entry : regionGroupMap.entrySet()) { - String database = entry.getKey(); - List regionReplicaSets = entry.getValue(); + for (final Entry> entry : regionGroupMap.entrySet()) { + final String database = entry.getKey(); + final List regionReplicaSets = entry.getValue(); BasicStructureSerDeUtil.write(database, stream); stream.writeInt(regionReplicaSets.size()); regionReplicaSets.forEach( @@ -107,15 +108,15 @@ protected void serializeImpl(DataOutputStream stream) throws IOException { } @Override - protected void deserializeImpl(ByteBuffer buffer) throws IOException { - int databaseNum = buffer.getInt(); + protected void deserializeImpl(final ByteBuffer buffer) throws IOException { + final int databaseNum = buffer.getInt(); for (int i = 0; i < databaseNum; i++) { - String database = BasicStructureSerDeUtil.readString(buffer); + final String database = BasicStructureSerDeUtil.readString(buffer); regionGroupMap.put(database, new ArrayList<>()); - int regionReplicaSetNum = buffer.getInt(); + final int regionReplicaSetNum = buffer.getInt(); for (int j = 0; j < regionReplicaSetNum; j++) { - TRegionReplicaSet regionReplicaSet = + final TRegionReplicaSet regionReplicaSet = ThriftCommonsSerDeUtils.deserializeTRegionReplicaSet(buffer); regionGroupMap.get(database).add(regionReplicaSet); } @@ -123,7 +124,7 @@ protected void deserializeImpl(ByteBuffer buffer) throws IOException { } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } @@ -133,7 +134,7 @@ public boolean equals(Object o) { if (!super.equals(o)) { return false; } - CreateRegionGroupsPlan that = (CreateRegionGroupsPlan) o; + final CreateRegionGroupsPlan that = (CreateRegionGroupsPlan) o; return Objects.equals(regionGroupMap, that.regionGroupMap); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/exception/NoAvailableRegionGroupException.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/exception/NoAvailableRegionGroupException.java index d2309c5e2114..589cd481ded1 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/exception/NoAvailableRegionGroupException.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/exception/NoAvailableRegionGroupException.java @@ -26,7 +26,7 @@ public class NoAvailableRegionGroupException extends ConfigNodeException { private static final String SCHEMA_REGION_GROUP = "SchemaRegionGroup"; private static final String DATA_REGION_GROUP = "DataRegionGroup"; - public NoAvailableRegionGroupException(TConsensusGroupType regionGroupType) { + public NoAvailableRegionGroupException(final TConsensusGroupType regionGroupType) { super( String.format( "There are no available %s RegionGroups currently, " diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterQuotaManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterQuotaManager.java index cbfa1bc2fb2e..b6bff890f361 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterQuotaManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterQuotaManager.java @@ -72,7 +72,7 @@ public ClusterQuotaManager(IManager configManager, QuotaInfo quotaInfo) { regionDisk = new ConcurrentHashMap<>(); } - public TSStatus setSpaceQuota(TSetSpaceQuotaReq req) { + public TSStatus setSpaceQuota(final TSetSpaceQuotaReq req) { if (!checkSpaceQuota(req)) { return RpcUtils.getStatus( TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode(), @@ -80,12 +80,12 @@ public TSStatus setSpaceQuota(TSetSpaceQuotaReq req) { } // TODO: Datanode failed to receive rpc try { - TSStatus response = + final TSStatus response = configManager .getConsensusManager() .write(new SetSpaceQuotaPlan(req.getDatabase(), req.getSpaceLimit())); if (response.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - Map dataNodeLocationMap = + final Map dataNodeLocationMap = configManager.getNodeManager().getRegisteredDataNodeLocations(); DataNodeAsyncRequestContext clientHandler = new DataNodeAsyncRequestContext<>( @@ -95,7 +95,7 @@ public TSStatus setSpaceQuota(TSetSpaceQuotaReq req) { return RpcUtils.squashResponseStatusList(clientHandler.getResponseList()); } return response; - } catch (ConsensusException e) { + } catch (final ConsensusException e) { LOGGER.warn( String.format( "Unexpected error happened while setting space quota on database: %s ", diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java index 35f5559231f0..671aca0e17b9 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java @@ -231,7 +231,6 @@ import org.apache.iotdb.confignode.rpc.thrift.TUnsubscribeReq; import org.apache.iotdb.consensus.common.DataSet; import org.apache.iotdb.consensus.exception.ConsensusException; -import org.apache.iotdb.db.exception.metadata.DatabaseModelException; import org.apache.iotdb.db.schemaengine.template.Template; import org.apache.iotdb.db.schemaengine.template.TemplateAlterOperationType; import org.apache.iotdb.db.schemaengine.template.alter.TemplateAlterOperationUtil; @@ -746,65 +745,42 @@ public synchronized TSStatus deleteDatabases(final TDeleteDatabasesReq tDeleteRe final TSStatus status = confirmLeader(); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { final List deletedPaths = tDeleteReq.getPrefixPathList(); + // remove wild final Map deleteDatabaseSchemaMap = - getClusterSchemaManager().getMatchedDatabaseSchemasByName(deletedPaths); - - // Filter by model - final int size = deleteDatabaseSchemaMap.size(); - final boolean isTableModel = tDeleteReq.isSetIsTableModel() && tDeleteReq.isIsTableModel(); - final List mismatchDatabaseNames = new ArrayList<>(); - deleteDatabaseSchemaMap - .entrySet() - .removeIf( - entry -> { - if (entry.getValue().isIsTableModel() != isTableModel) { - mismatchDatabaseNames.add(entry.getKey()); - return true; - } - return false; - }); - + getClusterSchemaManager() + .getMatchedDatabaseSchemasByName( + deletedPaths, tDeleteReq.isSetIsTableModel() && tDeleteReq.isIsTableModel()); if (deleteDatabaseSchemaMap.isEmpty()) { - if (size == 0) { - return RpcUtils.getStatus( - TSStatusCode.PATH_NOT_EXIST.getStatusCode(), - String.format("Path %s does not exist", Arrays.toString(deletedPaths.toArray()))); - } else if (size == 1) { - final DatabaseModelException exception = - new DatabaseModelException(mismatchDatabaseNames.get(0), !isTableModel); - return RpcUtils.getStatus(exception.getErrorCode(), exception.getMessage()); - } else { - final DatabaseModelException exception = - new DatabaseModelException(mismatchDatabaseNames, !isTableModel); - return RpcUtils.getStatus(exception.getErrorCode(), exception.getMessage()); - } + return RpcUtils.getStatus( + TSStatusCode.PATH_NOT_EXIST.getStatusCode(), + String.format("Path %s does not exist", Arrays.toString(deletedPaths.toArray()))); } - final ArrayList parsedDeleteDatabases = - new ArrayList<>(deleteDatabaseSchemaMap.values()); + return procedureManager.deleteDatabases( - parsedDeleteDatabases, + new ArrayList<>(deleteDatabaseSchemaMap.values()), tDeleteReq.isSetIsGeneratedByPipe() && tDeleteReq.isIsGeneratedByPipe()); } else { return status; } } - private List calculateRelatedSlot(PartialPath path, PartialPath database) { + private List calculateRelatedSlot( + final PartialPath path, final PartialPath database) { // The path contains `**` if (path.getFullPath().contains(IoTDBConstant.MULTI_LEVEL_PATH_WILDCARD)) { return new ArrayList<>(); } // with database = root.sg, path = root.*.d1 // convert path = root.sg.d1 - List innerPathList = path.alterPrefixPath(database); + final List innerPathList = path.alterPrefixPath(database); if (innerPathList.isEmpty()) { return new ArrayList<>(); } - String[] devicePath = + final String[] devicePath = Arrays.copyOf(innerPathList.get(0).getNodes(), innerPathList.get(0).getNodeLength() - 1); // root.sg1.*.d1 - for (String node : devicePath) { + for (final String node : devicePath) { if (node.contains(IoTDBConstant.ONE_LEVEL_PATH_WILDCARD)) { return Collections.emptyList(); } @@ -815,8 +791,7 @@ private List calculateRelatedSlot(PartialPath path, Partia } @Override - public TSchemaPartitionTableResp getSchemaPartition( - final PathPatternTree patternTree, final boolean isTableModel) { + public TSchemaPartitionTableResp getSchemaPartition(final PathPatternTree patternTree) { // Construct empty response final TSStatus status = confirmLeader(); @@ -828,11 +803,11 @@ public TSchemaPartitionTableResp getSchemaPartition( // Build GetSchemaPartitionPlan final Map> partitionSlotsMap = new HashMap<>(); final List relatedPaths = patternTree.getAllPathPatterns(); - final List allDatabases = getClusterSchemaManager().getDatabaseNames(isTableModel); + final List allDatabases = getClusterSchemaManager().getDatabaseNames(false); final List allDatabasePaths = new ArrayList<>(); for (final String database : allDatabases) { try { - allDatabasePaths.add(PartialPath.getDatabasePath(database)); + allDatabasePaths.add(PartialPath.getQualifiedDatabasePartialPath(database)); } catch (final IllegalPathException e) { throw new RuntimeException(e); } @@ -861,7 +836,7 @@ public TSchemaPartitionTableResp getSchemaPartition( @Override public TSchemaPartitionTableResp getSchemaPartition( - Map> dbSlotMap) { + final Map> dbSlotMap) { // Construct empty response TSchemaPartitionTableResp resp = new TSchemaPartitionTableResp(); // Return empty resp if the partitionSlotsMap is empty @@ -869,11 +844,12 @@ public TSchemaPartitionTableResp getSchemaPartition( return resp.setStatus(StatusUtils.OK).setSchemaPartitionTable(new HashMap<>()); } - GetSchemaPartitionPlan getSchemaPartitionPlan = + final GetSchemaPartitionPlan getSchemaPartitionPlan = new GetSchemaPartitionPlan( dbSlotMap.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> new ArrayList<>(e.getValue())))); - SchemaPartitionResp queryResult = partitionManager.getSchemaPartition(getSchemaPartitionPlan); + final SchemaPartitionResp queryResult = + partitionManager.getSchemaPartition(getSchemaPartitionPlan); resp = queryResult.convertToRpcSchemaPartitionTableResp(); LOGGER.debug("GetSchemaPartition receive paths: {}, return: {}", dbSlotMap, resp); @@ -882,22 +858,21 @@ public TSchemaPartitionTableResp getSchemaPartition( } @Override - public TSchemaPartitionTableResp getOrCreateSchemaPartition( - final PathPatternTree patternTree, final boolean isTableModel) { - TSStatus status = confirmLeader(); + public TSchemaPartitionTableResp getOrCreateSchemaPartition(final PathPatternTree patternTree) { + final TSStatus status = confirmLeader(); if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { // Construct empty response - TSchemaPartitionTableResp resp = new TSchemaPartitionTableResp(); + final TSchemaPartitionTableResp resp = new TSchemaPartitionTableResp(); return resp.setStatus(status); } - List devicePaths = patternTree.getAllDevicePatterns(); - List databases = getClusterSchemaManager().getDatabaseNames(isTableModel); + final List devicePaths = patternTree.getAllDevicePatterns(); + final List databases = getClusterSchemaManager().getDatabaseNames(false); // Build GetOrCreateSchemaPartitionPlan - Map> partitionSlotsMap = new HashMap<>(); - for (IDeviceID deviceID : devicePaths) { - for (String database : databases) { + final Map> partitionSlotsMap = new HashMap<>(); + for (final IDeviceID deviceID : devicePaths) { + for (final String database : databases) { if (PathUtils.isStartWith(deviceID, database)) { partitionSlotsMap .computeIfAbsent(database, key -> new HashSet<>()) @@ -907,14 +882,14 @@ public TSchemaPartitionTableResp getOrCreateSchemaPartition( } } - Map> partitionSlotListMap = new HashMap<>(); + final Map> partitionSlotListMap = new HashMap<>(); partitionSlotsMap.forEach((db, slots) -> partitionSlotListMap.put(db, new ArrayList<>(slots))); return getOrCreateSchemaPartition(partitionSlotListMap); } @Override public TSchemaPartitionTableResp getOrCreateSchemaPartition( - Map> dbSlotMap) { + final Map> dbSlotMap) { TSStatus status = confirmLeader(); if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { @@ -1888,7 +1863,8 @@ public TShowDatabaseResp showDatabase(final TGetDatabaseReq req) { ? SchemaConstant.ALL_MATCH_SCOPE : PathPatternTree.deserialize(ByteBuffer.wrap(req.getScopePatternTree())); final GetDatabasePlan getDatabasePlan = - new GetDatabasePlan(req.getDatabasePathPattern(), scope); + new GetDatabasePlan( + req.getDatabasePathPattern(), scope, req.isSetIsTableModel() && req.isIsTableModel()); return getClusterSchemaManager().showDatabase(getDatabasePlan); } else { return new TShowDatabaseResp().setStatus(status); @@ -2428,14 +2404,18 @@ public TShowCQResp showCQ() { */ public Map getRelatedSchemaRegionGroup( final PathPatternTree patternTree) { - return getRelatedSchemaRegionGroup(patternTree, false); + return getRelatedSchemaRegionGroup(getSchemaPartition(patternTree).getSchemaPartitionTable()); } - public Map getRelatedSchemaRegionGroup( - final PathPatternTree patternTree, final boolean isTableModel) { - final Map> schemaPartitionTable = - getSchemaPartition(patternTree, isTableModel).getSchemaPartitionTable(); + public Map getRelatedSchemaRegionGroup4TableModel( + final String database) { + return getRelatedSchemaRegionGroup( + getSchemaPartition(Collections.singletonMap(database, Collections.emptyList())) + .getSchemaPartitionTable()); + } + private Map getRelatedSchemaRegionGroup( + final Map> schemaPartitionTable) { final List allRegionReplicaSets = getPartitionManager().getAllReplicaSets(); final Set groupIdSet = schemaPartitionTable.values().stream() @@ -2455,11 +2435,19 @@ public Map getRelatedSchemaRegionGroup( * patternTree */ public Map getRelatedDataRegionGroup( - final PathPatternTree patternTree, final boolean isTableModel) { - // Get all databases and slots by getting schemaengine partition - final Map> schemaPartitionTable = - getSchemaPartition(patternTree, isTableModel).getSchemaPartitionTable(); + final PathPatternTree patternTree) { + return getRelatedDataRegionGroup(getSchemaPartition(patternTree).getSchemaPartitionTable()); + } + + public Map getRelatedDataRegionGroup4TableModel( + final String database) { + return getRelatedDataRegionGroup( + getSchemaPartition(Collections.singletonMap(database, Collections.emptyList())) + .getSchemaPartitionTable()); + } + private Map getRelatedDataRegionGroup( + final Map> schemaPartitionTable) { // Construct request for getting data partition final Map> partitionSlotsMap = new HashMap<>(); schemaPartitionTable.forEach( diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java index 526c66612995..779f3daa03b8 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java @@ -421,8 +421,7 @@ public interface IManager { * * @return TSchemaPartitionResp */ - TSchemaPartitionTableResp getSchemaPartition( - final PathPatternTree patternTree, final boolean isTableModel); + TSchemaPartitionTableResp getSchemaPartition(final PathPatternTree patternTree); /** * Get SchemaPartition with . @@ -436,8 +435,7 @@ TSchemaPartitionTableResp getSchemaPartition( * * @return TSchemaPartitionResp */ - TSchemaPartitionTableResp getOrCreateSchemaPartition( - final PathPatternTree patternTree, final boolean isTableModel); + TSchemaPartitionTableResp getOrCreateSchemaPartition(final PathPatternTree patternTree); /** * Get or create SchemaPartition with . diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java index e18e02e4ab82..b26b8c6b3beb 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java @@ -233,7 +233,7 @@ public TSStatus testSubProcedure() { public TSStatus deleteDatabases( final List deleteSgSchemaList, final boolean isGeneratedByPipe) { - List procedures = new ArrayList<>(); + final List procedures = new ArrayList<>(); final long startCheckTimeForProcedures = System.currentTimeMillis(); for (final TDatabaseSchema databaseSchema : deleteSgSchemaList) { final String database = databaseSchema.getName(); @@ -912,11 +912,12 @@ public TSStatus migrateRegion(TMigrateRegionReq migrateRegionReq) { * {@link TSStatusCode#CREATE_REGION_ERROR} otherwise */ public TSStatus createRegionGroups( - TConsensusGroupType consensusGroupType, CreateRegionGroupsPlan createRegionGroupsPlan) { - CreateRegionGroupsProcedure procedure = + final TConsensusGroupType consensusGroupType, + final CreateRegionGroupsPlan createRegionGroupsPlan) { + final CreateRegionGroupsProcedure procedure = new CreateRegionGroupsProcedure(consensusGroupType, createRegionGroupsPlan); executor.submitProcedure(procedure); - TSStatus status = waitingProcedureFinished(procedure); + final TSStatus status = waitingProcedureFinished(procedure); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { return status; } else { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/LoadManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/LoadManager.java index ee9e7530f3d0..bb4c6fa068f5 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/LoadManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/LoadManager.java @@ -99,7 +99,7 @@ protected void setHeartbeatService(IManager configManager, LoadCache loadCache) * @throws DatabaseNotExistsException If some specific StorageGroups don't exist */ public CreateRegionGroupsPlan allocateRegionGroups( - Map allotmentMap, TConsensusGroupType consensusGroupType) + final Map allotmentMap, final TConsensusGroupType consensusGroupType) throws NotEnoughDataNodeException, DatabaseNotExistsException { return regionBalancer.genRegionGroupsAllocationPlan(allotmentMap, consensusGroupType); } @@ -111,7 +111,7 @@ public CreateRegionGroupsPlan allocateRegionGroups( * @return Map, the allocating result */ public Map allocateSchemaPartition( - Map> unassignedSchemaPartitionSlotsMap) + final Map> unassignedSchemaPartitionSlotsMap) throws NoAvailableRegionGroupException { return partitionBalancer.allocateSchemaPartition(unassignedSchemaPartitionSlotsMap); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/PartitionBalancer.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/PartitionBalancer.java index 9b9b79b61159..2173563c704f 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/PartitionBalancer.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/PartitionBalancer.java @@ -71,31 +71,31 @@ public PartitionBalancer(IManager configManager) { * @return Map, the allocating result */ public Map allocateSchemaPartition( - Map> unassignedSchemaPartitionSlotsMap) + final Map> unassignedSchemaPartitionSlotsMap) throws NoAvailableRegionGroupException { - Map result = new HashMap<>(); + final Map result = new HashMap<>(); - for (Map.Entry> slotsMapEntry : + for (final Map.Entry> slotsMapEntry : unassignedSchemaPartitionSlotsMap.entrySet()) { final String database = slotsMapEntry.getKey(); final List unassignedPartitionSlots = slotsMapEntry.getValue(); // Filter available SchemaRegionGroups and // sort them by the number of allocated SchemaPartitions - BalanceTreeMap counter = new BalanceTreeMap<>(); - List> regionSlotsCounter = + final BalanceTreeMap counter = new BalanceTreeMap<>(); + final List> regionSlotsCounter = getPartitionManager() .getSortedRegionGroupSlotsCounter(database, TConsensusGroupType.SchemaRegion); - for (Pair pair : regionSlotsCounter) { + for (final Pair pair : regionSlotsCounter) { counter.put(pair.getRight(), pair.getLeft().intValue()); } // Enumerate SeriesPartitionSlot - Map schemaPartitionMap = new HashMap<>(); - for (TSeriesPartitionSlot seriesPartitionSlot : unassignedPartitionSlots) { + final Map schemaPartitionMap = new HashMap<>(); + for (final TSeriesPartitionSlot seriesPartitionSlot : unassignedPartitionSlots) { // Greedy allocation: allocate the unassigned SchemaPartition to // the RegionGroup whose allocated SchemaPartitions is the least - TConsensusGroupId consensusGroupId = counter.getKeyWithMinValue(); + final TConsensusGroupId consensusGroupId = counter.getKeyWithMinValue(); schemaPartitionMap.put(seriesPartitionSlot, consensusGroupId); counter.put(consensusGroupId, counter.get(consensusGroupId) + 1); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RegionBalancer.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RegionBalancer.java index 1b553eda4483..3528cbe13607 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RegionBalancer.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RegionBalancer.java @@ -77,42 +77,42 @@ public RegionBalancer(IManager configManager) { * @throws DatabaseNotExistsException When some StorageGroups don't exist */ public CreateRegionGroupsPlan genRegionGroupsAllocationPlan( - Map allotmentMap, TConsensusGroupType consensusGroupType) + final Map allotmentMap, final TConsensusGroupType consensusGroupType) throws NotEnoughDataNodeException, DatabaseNotExistsException { // Some new RegionGroups will have to occupy unknown DataNodes // if the number of online DataNodes is insufficient - List availableDataNodes = + final List availableDataNodes = getNodeManager().filterDataNodeThroughStatus(NodeStatus.Running, NodeStatus.Unknown); // Make sure the number of available DataNodes is enough for allocating new RegionGroups - for (String database : allotmentMap.keySet()) { - int replicationFactor = + for (final String database : allotmentMap.keySet()) { + final int replicationFactor = getClusterSchemaManager().getReplicationFactor(database, consensusGroupType); if (availableDataNodes.size() < replicationFactor) { throw new NotEnoughDataNodeException(availableDataNodes, replicationFactor); } } - CreateRegionGroupsPlan createRegionGroupsPlan = new CreateRegionGroupsPlan(); + final CreateRegionGroupsPlan createRegionGroupsPlan = new CreateRegionGroupsPlan(); // Only considering the specified ConsensusGroupType when doing allocation - List allocatedRegionGroups = + final List allocatedRegionGroups = getPartitionManager().getAllReplicaSets(consensusGroupType); - for (Map.Entry entry : allotmentMap.entrySet()) { - String database = entry.getKey(); - int allotment = entry.getValue(); - int replicationFactor = + for (final Map.Entry entry : allotmentMap.entrySet()) { + final String database = entry.getKey(); + final int allotment = entry.getValue(); + final int replicationFactor = getClusterSchemaManager().getReplicationFactor(database, consensusGroupType); // Only considering the specified Database when doing allocation - List databaseAllocatedRegionGroups = + final List databaseAllocatedRegionGroups = getPartitionManager().getAllReplicaSets(database, consensusGroupType); for (int i = 0; i < allotment; i++) { // Prepare input data - Map availableDataNodeMap = + final Map availableDataNodeMap = new HashMap<>(availableDataNodes.size()); - Map freeDiskSpaceMap = new HashMap<>(availableDataNodes.size()); + final Map freeDiskSpaceMap = new HashMap<>(availableDataNodes.size()); availableDataNodes.forEach( dataNodeConfiguration -> { int dataNodeId = dataNodeConfiguration.getLocation().getDataNodeId(); @@ -121,7 +121,7 @@ public CreateRegionGroupsPlan genRegionGroupsAllocationPlan( }); // Generate allocation plan - TRegionReplicaSet newRegionGroup = + final TRegionReplicaSet newRegionGroup = regionGroupAllocator.generateOptimalRegionReplicasDistribution( availableDataNodeMap, freeDiskSpaceMap, diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/LoadCache.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/LoadCache.java index 5968ef36ae05..1886d7e76d4c 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/LoadCache.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/cache/LoadCache.java @@ -99,7 +99,7 @@ public LoadCache() { this.confirmedConfigNodeMap = new ConcurrentHashMap<>(); } - public void initHeartbeatCache(IManager configManager) { + public void initHeartbeatCache(final IManager configManager) { initNodeHeartbeatCache( configManager.getNodeManager().getRegisteredConfigNodes(), configManager.getNodeManager().getRegisteredDataNodes(), diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java index f410c68284ad..0fcd8f773475 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/partition/PartitionManager.java @@ -33,7 +33,6 @@ import org.apache.iotdb.commons.partition.DataPartitionTable; import org.apache.iotdb.commons.partition.SchemaPartitionTable; import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor; -import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; @@ -172,12 +171,12 @@ private void setSeriesPartitionExecutor() { * @param req SchemaPartitionPlan with partitionSlotsMap * @return SchemaPartitionDataSet that contains only existing SchemaPartition */ - public SchemaPartitionResp getSchemaPartition(GetSchemaPartitionPlan req) { + public SchemaPartitionResp getSchemaPartition(final GetSchemaPartitionPlan req) { try { return (SchemaPartitionResp) getConsensusManager().read(req); - } catch (ConsensusException e) { + } catch (final ConsensusException e) { LOGGER.warn(CONSENSUS_READ_ERROR, e); - TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); + final TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); res.setMessage(e.getMessage()); return new SchemaPartitionResp(res, false, Collections.emptyMap()); } @@ -190,12 +189,12 @@ public SchemaPartitionResp getSchemaPartition(GetSchemaPartitionPlan req) { * TTimeSlotList>> * @return DataPartitionDataSet that contains only existing DataPartition */ - public DataPartitionResp getDataPartition(GetDataPartitionPlan req) { + public DataPartitionResp getDataPartition(final GetDataPartitionPlan req) { try { return (DataPartitionResp) getConsensusManager().read(req); - } catch (ConsensusException e) { + } catch (final ConsensusException e) { LOGGER.warn(CONSENSUS_READ_ERROR, e); - TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); + final TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); res.setMessage(e.getMessage()); return new DataPartitionResp(res, false, Collections.emptyMap()); } @@ -209,9 +208,9 @@ public DataPartitionResp getDataPartition(GetDataPartitionPlan req) { * finish. NOT_ENOUGH_DATA_NODE if the DataNodes is not enough to create new Regions. * STORAGE_GROUP_NOT_EXIST if some StorageGroup don't exist. */ - public SchemaPartitionResp getOrCreateSchemaPartition(GetOrCreateSchemaPartitionPlan req) { + public SchemaPartitionResp getOrCreateSchemaPartition(final GetOrCreateSchemaPartitionPlan req) { // Check if the related Databases exist - for (String database : req.getPartitionSlotsMap().keySet()) { + for (final String database : req.getPartitionSlotsMap().keySet()) { if (!isDatabaseExist(database)) { return new SchemaPartitionResp( new TSStatus(TSStatusCode.DATABASE_NOT_EXIST.getStatusCode()) @@ -245,14 +244,14 @@ public SchemaPartitionResp getOrCreateSchemaPartition(GetOrCreateSchemaPartition } // Filter unassigned SchemaPartitionSlots - Map> unassignedSchemaPartitionSlotsMap = + final Map> unassignedSchemaPartitionSlotsMap = partitionInfo.filterUnassignedSchemaPartitionSlots(req.getPartitionSlotsMap()); // Here we ensure that each StorageGroup has at least one SchemaRegion. // And if some StorageGroups own too many slots, extend SchemaRegion for them. // Map - Map unassignedSchemaPartitionSlotsCountMap = new ConcurrentHashMap<>(); + final Map unassignedSchemaPartitionSlotsCountMap = new ConcurrentHashMap<>(); unassignedSchemaPartitionSlotsMap.forEach( (storageGroup, unassignedSchemaPartitionSlots) -> unassignedSchemaPartitionSlotsCountMap.put( @@ -266,11 +265,11 @@ public SchemaPartitionResp getOrCreateSchemaPartition(GetOrCreateSchemaPartition return resp; } - Map assignedSchemaPartition; + final Map assignedSchemaPartition; try { assignedSchemaPartition = getLoadManager().allocateSchemaPartition(unassignedSchemaPartitionSlotsMap); - } catch (NoAvailableRegionGroupException e) { + } catch (final NoAvailableRegionGroupException e) { status = getConsensusManager().confirmLeader(); if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { // The allocation might fail due to leadership change @@ -286,7 +285,7 @@ public SchemaPartitionResp getOrCreateSchemaPartition(GetOrCreateSchemaPartition } // Cache allocating result only if the current ConfigNode still holds its leadership - CreateSchemaPartitionPlan createPlan = new CreateSchemaPartitionPlan(); + final CreateSchemaPartitionPlan createPlan = new CreateSchemaPartitionPlan(); createPlan.setAssignedSchemaPartition(assignedSchemaPartition); status = consensusWritePartitionResult(createPlan); @@ -300,18 +299,18 @@ public SchemaPartitionResp getOrCreateSchemaPartition(GetOrCreateSchemaPartition resp = getSchemaPartition(req); if (!resp.isAllPartitionsExist()) { // Count the fail rate - AtomicInteger totalSlotNum = new AtomicInteger(); + final AtomicInteger totalSlotNum = new AtomicInteger(); req.getPartitionSlotsMap() .forEach((database, partitionSlots) -> totalSlotNum.addAndGet(partitionSlots.size())); - AtomicInteger unassignedSlotNum = new AtomicInteger(); - Map> unassignedSchemaPartitionSlotsMap = + final AtomicInteger unassignedSlotNum = new AtomicInteger(); + final Map> unassignedSchemaPartitionSlotsMap = partitionInfo.filterUnassignedSchemaPartitionSlots(req.getPartitionSlotsMap()); unassignedSchemaPartitionSlotsMap.forEach( (database, unassignedSchemaPartitionSlots) -> unassignedSlotNum.addAndGet(unassignedSchemaPartitionSlots.size())); - String errMsg = + final String errMsg = String.format( "Lacked %d/%d SchemaPartition allocation result in the response of getOrCreateSchemaPartition method", unassignedSlotNum.get(), totalSlotNum.get()); @@ -491,10 +490,10 @@ private TSStatus consensusWritePartitionResult(ConfigPhysicalPlan plan) { * are not enough DataNodes; STORAGE_GROUP_NOT_EXIST when some StorageGroups don't exist */ private TSStatus extendRegionGroupIfNecessary( - Map unassignedPartitionSlotsCountMap, - TConsensusGroupType consensusGroupType) { + final Map unassignedPartitionSlotsCountMap, + final TConsensusGroupType consensusGroupType) { - TSStatus result = new TSStatus(); + final TSStatus result = new TSStatus(); try { if (TConsensusGroupType.SchemaRegion.equals(consensusGroupType)) { @@ -532,17 +531,18 @@ private TSStatus extendRegionGroupIfNecessary( } private TSStatus customExtendRegionGroupIfNecessary( - Map unassignedPartitionSlotsCountMap, TConsensusGroupType consensusGroupType) + final Map unassignedPartitionSlotsCountMap, + final TConsensusGroupType consensusGroupType) throws DatabaseNotExistsException, NotEnoughDataNodeException { // Map - Map allotmentMap = new ConcurrentHashMap<>(); + final Map allotmentMap = new ConcurrentHashMap<>(); - for (Map.Entry entry : unassignedPartitionSlotsCountMap.entrySet()) { + for (final Map.Entry entry : unassignedPartitionSlotsCountMap.entrySet()) { final String database = entry.getKey(); - int minRegionGroupNum = + final int minRegionGroupNum = getClusterSchemaManager().getMinRegionGroupNum(database, consensusGroupType); - int allocatedRegionGroupCount = + final int allocatedRegionGroupCount = partitionInfo.getRegionGroupCount(database, consensusGroupType); // Extend RegionGroups until allocatedRegionGroupCount == minRegionGroupNum @@ -555,11 +555,12 @@ private TSStatus customExtendRegionGroupIfNecessary( } private TSStatus autoExtendRegionGroupIfNecessary( - Map unassignedPartitionSlotsCountMap, TConsensusGroupType consensusGroupType) + final Map unassignedPartitionSlotsCountMap, + final TConsensusGroupType consensusGroupType) throws NotEnoughDataNodeException, DatabaseNotExistsException { // Map - Map allotmentMap = new ConcurrentHashMap<>(); + final Map allotmentMap = new ConcurrentHashMap<>(); for (Map.Entry entry : unassignedPartitionSlotsCountMap.entrySet()) { final String database = entry.getKey(); @@ -577,7 +578,7 @@ private TSStatus autoExtendRegionGroupIfNecessary( /* RegionGroup extension is required in the following cases */ // 1. The number of current RegionGroup of the Database is less than the minimum number - int minRegionGroupNum = + final int minRegionGroupNum = getClusterSchemaManager().getMinRegionGroupNum(database, consensusGroupType); if (allocatedRegionGroupCount < minRegionGroupNum // Ensure the number of RegionGroups is enough @@ -600,7 +601,7 @@ private TSStatus autoExtendRegionGroupIfNecessary( // The delta is equal to the smallest integer solution that satisfies the inequality: // slotCount / (allocatedRegionGroupCount + delta) < maxSlotCount / maxRegionGroupNum - int delta = + final int delta = Math.min( (int) (maxRegionGroupNum - allocatedRegionGroupCount), Math.max( @@ -623,10 +624,10 @@ private TSStatus autoExtendRegionGroupIfNecessary( } private TSStatus generateAndAllocateRegionGroups( - Map allotmentMap, TConsensusGroupType consensusGroupType) + final Map allotmentMap, final TConsensusGroupType consensusGroupType) throws NotEnoughDataNodeException, DatabaseNotExistsException { if (!allotmentMap.isEmpty()) { - CreateRegionGroupsPlan createRegionGroupsPlan = + final CreateRegionGroupsPlan createRegionGroupsPlan = getLoadManager().allocateRegionGroups(allotmentMap, consensusGroupType); LOGGER.info("[CreateRegionGroups] Starting to create the following RegionGroups:"); createRegionGroupsPlan.planLog(LOGGER); @@ -839,7 +840,7 @@ public List getAllRegionGroupIds(String database, TConsensusG * @param database The specified Database * @return True if the DatabaseSchema is exists and the Database is not pre-deleted */ - public boolean isDatabaseExist(String database) { + public boolean isDatabaseExist(final String database) { return partitionInfo.isDatabaseExisted(database); } @@ -849,17 +850,12 @@ public boolean isDatabaseExist(String database) { * @param databases the Databases to check * @return List of PartialPath the Databases that not exist */ - public List filterUnExistDatabases(List databases) { - List unExistDatabases = new ArrayList<>(); - if (databases == null) { - return unExistDatabases; - } - for (PartialPath database : databases) { - if (!isDatabaseExist(database.getFullPath())) { - unExistDatabases.add(database); - } - } - return unExistDatabases; + public List filterUnExistDatabases(final List databases) { + return Objects.isNull(databases) + ? Collections.emptyList() + : databases.stream() + .filter(database -> !isDatabaseExist(database)) + .collect(Collectors.toList()); } /** @@ -896,15 +892,17 @@ public long getAssignedTimePartitionSlotsCount(String database) { * are unavailable currently */ public List> getSortedRegionGroupSlotsCounter( - String database, TConsensusGroupType type) throws NoAvailableRegionGroupException { + final String database, final TConsensusGroupType type) + throws NoAvailableRegionGroupException { // Collect static data - List> regionGroupSlotsCounter = + final List> regionGroupSlotsCounter = partitionInfo.getRegionGroupSlotsCounter(database, type); // Filter RegionGroups that have Disabled status - List> result = new ArrayList<>(); - for (Pair slotsCounter : regionGroupSlotsCounter) { - RegionGroupStatus status = getLoadManager().getRegionGroupStatus(slotsCounter.getRight()); + final List> result = new ArrayList<>(); + for (final Pair slotsCounter : regionGroupSlotsCounter) { + final RegionGroupStatus status = + getLoadManager().getRegionGroupStatus(slotsCounter.getRight()); if (!RegionGroupStatus.Disabled.equals(status)) { result.add(slotsCounter); } @@ -914,7 +912,7 @@ public List> getSortedRegionGroupSlotsCounter( throw new NoAvailableRegionGroupException(type); } - Map regionGroupStatusMap = + final Map regionGroupStatusMap = getLoadManager() .getRegionGroupStatus(result.stream().map(Pair::getRight).collect(Collectors.toList())); result.sort( @@ -992,17 +990,17 @@ public SchemaNodeManagementResp getNodePathsPartition(GetNodePathsPartitionPlan } public void preDeleteDatabase( - String database, PreDeleteDatabasePlan.PreDeleteType preDeleteType) { + final String database, final PreDeleteDatabasePlan.PreDeleteType preDeleteType) { final PreDeleteDatabasePlan preDeleteDatabasePlan = new PreDeleteDatabasePlan(database, preDeleteType); try { getConsensusManager().write(preDeleteDatabasePlan); - } catch (ConsensusException e) { + } catch (final ConsensusException e) { LOGGER.warn(CONSENSUS_WRITE_ERROR, e); } } - public boolean isDatabasePreDeleted(String database) { + public boolean isDatabasePreDeleted(final String database) { return partitionInfo.isDatabasePreDeleted(database); } @@ -1012,16 +1010,17 @@ public boolean isDatabasePreDeleted(String database) { * @param deviceID IDeviceID * @return SeriesPartitionSlot */ - public TSeriesPartitionSlot getSeriesPartitionSlot(IDeviceID deviceID) { + public TSeriesPartitionSlot getSeriesPartitionSlot(final IDeviceID deviceID) { return executor.getSeriesPartitionSlot(deviceID); } - public RegionInfoListResp getRegionInfoList(GetRegionInfoListPlan req) { + public RegionInfoListResp getRegionInfoList(final GetRegionInfoListPlan req) { try { // Get static result - RegionInfoListResp regionInfoListResp = (RegionInfoListResp) getConsensusManager().read(req); + final RegionInfoListResp regionInfoListResp = + (RegionInfoListResp) getConsensusManager().read(req); // Get cached result - Map allLeadership = getLoadManager().getRegionLeaderMap(); + final Map allLeadership = getLoadManager().getRegionLeaderMap(); regionInfoListResp .getRegionInfoList() .forEach( @@ -1032,7 +1031,7 @@ public RegionInfoListResp getRegionInfoList(GetRegionInfoListPlan req) { regionInfo.getConsensusGroupId(), regionInfo.getDataNodeId()) .getStatus()); - String regionType = + final String regionType = regionInfo.getDataNodeId() == allLeadership.getOrDefault(regionInfo.getConsensusGroupId(), -1) ? RegionRoleType.Leader.toString() @@ -1050,11 +1049,11 @@ public RegionInfoListResp getRegionInfoList(GetRegionInfoListPlan req) { return regionInfoListResp; - } catch (ConsensusException e) { + } catch (final ConsensusException e) { LOGGER.warn(CONSENSUS_READ_ERROR, e); - TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); + final TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); res.setMessage(e.getMessage()); - RegionInfoListResp resp = new RegionInfoListResp(); + final RegionInfoListResp resp = new RegionInfoListResp(); resp.setStatus(res); return resp; } @@ -1065,38 +1064,38 @@ public RegionInfoListResp getRegionInfoList(GetRegionInfoListPlan req) { * * @param regionGroupId The specified RegionGroup */ - public boolean isRegionGroupExists(TConsensusGroupId regionGroupId) { + public boolean isRegionGroupExists(final TConsensusGroupId regionGroupId) { return partitionInfo.isRegionGroupExisted(regionGroupId); } - public TSStatus addRegionLocation(AddRegionLocationPlan req) { + public TSStatus addRegionLocation(final AddRegionLocationPlan req) { try { return getConsensusManager().write(req); - } catch (ConsensusException e) { + } catch (final ConsensusException e) { LOGGER.warn(CONSENSUS_WRITE_ERROR, e); - TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); + final TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); res.setMessage(e.getMessage()); return res; } } - public TSStatus removeRegionLocation(RemoveRegionLocationPlan req) { + public TSStatus removeRegionLocation(final RemoveRegionLocationPlan req) { try { return getConsensusManager().write(req); - } catch (ConsensusException e) { + } catch (final ConsensusException e) { LOGGER.warn(CONSENSUS_WRITE_ERROR, e); - TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); + final TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); res.setMessage(e.getMessage()); return res; } } - public GetRegionIdResp getRegionId(TGetRegionIdReq req) { - GetRegionIdPlan plan = new GetRegionIdPlan(req.getType()); + public GetRegionIdResp getRegionId(final TGetRegionIdReq req) { + final GetRegionIdPlan plan = new GetRegionIdPlan(req.getType()); if (req.isSetDatabase()) { plan.setDatabase(req.getDatabase()); } else { - IDeviceID deviceID = + final IDeviceID deviceID = Deserializer.DEFAULT_DESERIALIZER.deserializeFrom(ByteBuffer.wrap(req.getDevice())); plan.setDatabase(getClusterSchemaManager().getDatabaseNameByDevice(deviceID)); plan.setSeriesSlotId(executor.getSeriesPartitionSlot(deviceID)); @@ -1118,7 +1117,7 @@ public GetRegionIdResp getRegionId(TGetRegionIdReq req) { try { return (GetRegionIdResp) getConsensusManager().read(plan); - } catch (ConsensusException e) { + } catch (final ConsensusException e) { LOGGER.warn(CONSENSUS_READ_ERROR, e); TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); res.setMessage(e.getMessage()); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java index 3faca89feab9..9a1f989db5ac 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/schema/ClusterSchemaManager.java @@ -115,8 +115,6 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.stream.Collectors; -import static org.apache.iotdb.commons.schema.SchemaConstant.ROOT; - /** The ClusterSchemaManager Manages cluster schemaengine read and write requests. */ public class ClusterSchemaManager { @@ -155,17 +153,18 @@ public TSStatus setDatabase( final DatabaseSchemaPlan databaseSchemaPlan, final boolean isGeneratedByPipe) { TSStatus result; - if (getPartitionManager().isDatabasePreDeleted(databaseSchemaPlan.getSchema().getName())) { + final TDatabaseSchema schema = databaseSchemaPlan.getSchema(); + if (getPartitionManager().isDatabasePreDeleted(schema.getName())) { return RpcUtils.getStatus( TSStatusCode.METADATA_ERROR, - String.format( - "Some other task is deleting database %s", databaseSchemaPlan.getSchema().getName())); + String.format("Some other task is deleting database %s", schema.getName())); } createDatabaseLock.lock(); try { - clusterSchemaInfo.isDatabaseNameValid(databaseSchemaPlan.getSchema().getName()); - if (!databaseSchemaPlan.getSchema().getName().equals(SchemaConstant.SYSTEM_DATABASE)) { + clusterSchemaInfo.isDatabaseNameValid( + schema.getName(), schema.isSetIsTableModel() && schema.isIsTableModel()); + if (!schema.getName().equals(SchemaConstant.SYSTEM_DATABASE)) { clusterSchemaInfo.checkDatabaseLimit(); } // Cache DatabaseSchema @@ -176,16 +175,16 @@ public TSStatus setDatabase( ? new PipeEnrichedPlan(databaseSchemaPlan) : databaseSchemaPlan); // set ttl - if (databaseSchemaPlan.getSchema().isSetTTL()) { + if (schema.isSetTTL()) { result = configManager.getTTLManager().setTTL(databaseSchemaPlan, isGeneratedByPipe); } // Bind Database metrics PartitionMetrics.bindDatabaseRelatedMetricsWhenUpdate( MetricService.getInstance(), configManager, - databaseSchemaPlan.getSchema().getName(), - databaseSchemaPlan.getSchema().getDataReplicationFactor(), - databaseSchemaPlan.getSchema().getSchemaReplicationFactor()); + schema.getName(), + schema.getDataReplicationFactor(), + schema.getSchemaReplicationFactor()); // Adjust the maximum RegionGroup number of each Database adjustMaxRegionGroupNum(); } catch (final ConsensusException e) { @@ -269,7 +268,8 @@ public TSStatus alterDatabase( } /** Delete DatabaseSchema. */ - public TSStatus deleteDatabase(DeleteDatabasePlan deleteDatabasePlan, boolean isGeneratedByPipe) { + public TSStatus deleteDatabase( + final DeleteDatabasePlan deleteDatabasePlan, final boolean isGeneratedByPipe) { TSStatus result; try { result = @@ -278,7 +278,7 @@ public TSStatus deleteDatabase(DeleteDatabasePlan deleteDatabasePlan, boolean is isGeneratedByPipe ? new PipeEnrichedPlan(deleteDatabasePlan) : deleteDatabasePlan); - } catch (ConsensusException e) { + } catch (final ConsensusException e) { LOGGER.warn(CONSENSUS_WRITE_ERROR, e); result = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); result.setMessage(e.getMessage()); @@ -296,14 +296,14 @@ public TSStatus deleteDatabase(DeleteDatabasePlan deleteDatabasePlan, boolean is * * @return CountDatabaseResp */ - public CountDatabaseResp countMatchedDatabases(CountDatabasePlan countDatabasePlan) { + public CountDatabaseResp countMatchedDatabases(final CountDatabasePlan countDatabasePlan) { try { return (CountDatabaseResp) getConsensusManager().read(countDatabasePlan); - } catch (ConsensusException e) { + } catch (final ConsensusException e) { LOGGER.warn(CONSENSUS_READ_ERROR, e); - TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); + final TSStatus res = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); res.setMessage(e.getMessage()); - CountDatabaseResp response = new CountDatabaseResp(); + final CountDatabaseResp response = new CountDatabaseResp(); response.setStatus(res); return response; } @@ -374,7 +374,6 @@ public TShowDatabaseResp showDatabase(final GetDatabasePlan getDatabasePlan) { getMinRegionGroupNum(database, TConsensusGroupType.DataRegion)); databaseInfo.setMaxDataRegionNum( getMaxRegionGroupNum(database, TConsensusGroupType.DataRegion)); - databaseInfo.setIsTableModel(databaseSchema.isIsTableModel()); try { databaseInfo.setSchemaRegionNum( @@ -396,17 +395,17 @@ public TShowDatabaseResp showDatabase(final GetDatabasePlan getDatabasePlan) { } public Map getTTLInfoForUpgrading() { - List databases = getDatabaseNames(null); - Map infoMap = new ConcurrentHashMap<>(); - for (String database : databases) { + final List databases = getDatabaseNames(false); + final Map infoMap = new ConcurrentHashMap<>(); + for (final String database : databases) { try { final TDatabaseSchema databaseSchema = getDatabaseSchemaByName(database); - long ttl = databaseSchema.isSetTTL() ? databaseSchema.getTTL() : -1; + final long ttl = databaseSchema.isSetTTL() ? databaseSchema.getTTL() : -1; if (ttl < 0 || ttl == Long.MAX_VALUE) { continue; } infoMap.put(database, ttl); - } catch (DatabaseNotExistsException e) { + } catch (final DatabaseNotExistsException e) { LOGGER.warn("Database: {} doesn't exist", databases, e); } } @@ -458,18 +457,19 @@ public TSStatus setTimePartitionInterval( */ public synchronized void adjustMaxRegionGroupNum() { // Get all DatabaseSchemas - Map databaseSchemaMap = - getMatchedDatabaseSchemasByName(getDatabaseNames(null)); + // TODO + final Map databaseSchemaMap = + getMatchedDatabaseSchemasByName(getDatabaseNames(null), null); if (databaseSchemaMap.isEmpty()) { // Skip when there are no Databases return; } - int dataNodeNum = getNodeManager().getRegisteredDataNodeCount(); - int totalCpuCoreNum = getNodeManager().getDataNodeCpuCoreCount(); + final int dataNodeNum = getNodeManager().getRegisteredDataNodeCount(); + final int totalCpuCoreNum = getNodeManager().getDataNodeCpuCoreCount(); int databaseNum = databaseSchemaMap.size(); - for (TDatabaseSchema databaseSchema : databaseSchemaMap.values()) { + for (final TDatabaseSchema databaseSchema : databaseSchemaMap.values()) { if (!isDatabaseExist(databaseSchema.getName()) || databaseSchema.getName().equals(SchemaConstant.SYSTEM_DATABASE)) { // filter the pre deleted database and the system database @@ -477,8 +477,9 @@ public synchronized void adjustMaxRegionGroupNum() { } } - AdjustMaxRegionGroupNumPlan adjustMaxRegionGroupNumPlan = new AdjustMaxRegionGroupNumPlan(); - for (TDatabaseSchema databaseSchema : databaseSchemaMap.values()) { + final AdjustMaxRegionGroupNumPlan adjustMaxRegionGroupNumPlan = + new AdjustMaxRegionGroupNumPlan(); + for (final TDatabaseSchema databaseSchema : databaseSchemaMap.values()) { if (databaseSchema.getName().equals(SchemaConstant.SYSTEM_DATABASE)) { // filter the system database continue; @@ -488,17 +489,17 @@ public synchronized void adjustMaxRegionGroupNum() { // Adjust maxSchemaRegionGroupNum for each Database. // All Databases share the DataNodes equally. // The allocated SchemaRegionGroups will not be shrunk. - int allocatedSchemaRegionGroupCount; + final int allocatedSchemaRegionGroupCount; try { allocatedSchemaRegionGroupCount = getPartitionManager() .getRegionGroupCount(databaseSchema.getName(), TConsensusGroupType.SchemaRegion); - } catch (DatabaseNotExistsException e) { + } catch (final DatabaseNotExistsException e) { // ignore the pre deleted database continue; } - int maxSchemaRegionGroupNum = + final int maxSchemaRegionGroupNum = calcMaxRegionGroupNum( databaseSchema.getMinSchemaRegionGroupNum(), SCHEMA_REGION_PER_DATA_NODE, @@ -514,10 +515,10 @@ public synchronized void adjustMaxRegionGroupNum() { // Adjust maxDataRegionGroupNum for each Database. // All Databases share the DataNodes equally. // The allocated DataRegionGroups will not be shrunk. - int allocatedDataRegionGroupCount = + final int allocatedDataRegionGroupCount = getPartitionManager() .getRegionGroupCount(databaseSchema.getName(), TConsensusGroupType.DataRegion); - int maxDataRegionGroupNum = + final int maxDataRegionGroupNum = calcMaxRegionGroupNum( databaseSchema.getMinDataRegionGroupNum(), DATA_REGION_PER_DATA_NODE == 0 @@ -534,13 +535,13 @@ public synchronized void adjustMaxRegionGroupNum() { adjustMaxRegionGroupNumPlan.putEntry( databaseSchema.getName(), new Pair<>(maxSchemaRegionGroupNum, maxDataRegionGroupNum)); - } catch (DatabaseNotExistsException e) { + } catch (final DatabaseNotExistsException e) { LOGGER.warn("Adjust maxRegionGroupNum failed because Database doesn't exist", e); } } try { getConsensusManager().write(adjustMaxRegionGroupNumPlan); - } catch (ConsensusException e) { + } catch (final ConsensusException e) { LOGGER.warn(CONSENSUS_WRITE_ERROR, e); } } @@ -577,7 +578,7 @@ public static int calcMaxRegionGroupNum( * @param database The specified Database * @return True if the DatabaseSchema exists and the Database is not pre-deleted */ - public boolean isDatabaseExist(String database) { + public boolean isDatabaseExist(final String database) { return getPartitionManager().isDatabaseExist(database); } @@ -601,7 +602,7 @@ public List getDatabaseNames(final Boolean isTableModel) { * @return The specific DatabaseSchema * @throws DatabaseNotExistsException When the specific Database doesn't exist */ - public TDatabaseSchema getDatabaseSchemaByName(String database) + public TDatabaseSchema getDatabaseSchemaByName(final String database) throws DatabaseNotExistsException { if (!isDatabaseExist(database)) { throw new DatabaseNotExistsException(database); @@ -631,13 +632,13 @@ public String getDatabaseNameByDevice(final IDeviceID deviceID) { * @return the matched DatabaseSchemas */ public Map getMatchedDatabaseSchemasByName( - final List rawPathList) { + final List rawPathList, final Boolean isTableModel) { final Map result = new ConcurrentHashMap<>(); clusterSchemaInfo - .getMatchedDatabaseSchemasByName(rawPathList) + .getMatchedDatabaseSchemasByName(rawPathList, isTableModel) .forEach( (database, databaseSchema) -> { - if (isDatabaseExist(database)) { + if (isDatabaseExist(databaseSchema.getName())) { result.put(database, databaseSchema); } }); @@ -1174,9 +1175,7 @@ public synchronized Pair tableColumnCheckForColumnExtension( return new Pair<>( RpcUtils.getStatus( TSStatusCode.TABLE_NOT_EXISTS, - String.format( - "Table '%s.%s' does not exist", - database.substring(ROOT.length() + 1), tableName)), + String.format("Table '%s.%s' does not exist", database, tableName)), null); } @@ -1212,9 +1211,7 @@ public synchronized Pair tableColumnCheckForColumnRenaming( return new Pair<>( RpcUtils.getStatus( TSStatusCode.TABLE_NOT_EXISTS, - String.format( - "Table '%s.%s' does not exist", - database.substring(ROOT.length() + 1), tableName)), + String.format("Table '%s.%s' does not exist", database, tableName)), null); } @@ -1306,9 +1303,7 @@ public synchronized Pair updateTableProperties( return new Pair<>( RpcUtils.getStatus( TSStatusCode.TABLE_NOT_EXISTS, - String.format( - "Table '%s.%s' does not exist", - database.substring(ROOT.length() + 1), tableName)), + String.format("Table '%s.%s' does not exist", database, tableName)), null); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java index 29ce77ae5e29..804be5986ecc 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java @@ -765,13 +765,13 @@ private DataSet getSchemaNodeManagementPartition(ConfigPhysicalPlan req) { return schemaNodeManagementResp; } - private DataSet getRegionInfoList(ConfigPhysicalPlan req) { + private DataSet getRegionInfoList(final ConfigPhysicalPlan req) { final GetRegionInfoListPlan getRegionInfoListPlan = (GetRegionInfoListPlan) req; - TShowRegionReq showRegionReq = getRegionInfoListPlan.getShowRegionReq(); + final TShowRegionReq showRegionReq = getRegionInfoListPlan.getShowRegionReq(); if (showRegionReq != null && showRegionReq.isSetDatabases()) { final List storageGroups = showRegionReq.getDatabases(); final List matchedStorageGroups = - clusterSchemaInfo.getMatchedDatabaseSchemasByName(storageGroups).values().stream() + clusterSchemaInfo.getMatchedDatabaseSchemasByName(storageGroups, false).values().stream() .map(TDatabaseSchema::getName) .collect(Collectors.toList()); if (!matchedStorageGroups.isEmpty()) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/DatabasePartitionTable.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/DatabasePartitionTable.java index 1ff56a89bb0f..bb9e26e9dda3 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/DatabasePartitionTable.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/DatabasePartitionTable.java @@ -274,7 +274,7 @@ public int getAssignedSeriesPartitionSlotsCount() { * @return True if all the SeriesPartitionSlots are matched, false otherwise */ public boolean getSchemaPartition( - List partitionSlots, SchemaPartitionTable schemaPartition) { + final List partitionSlots, final SchemaPartitionTable schemaPartition) { return schemaPartitionTable.getSchemaPartition(partitionSlots, schemaPartition); } @@ -286,7 +286,8 @@ public boolean getSchemaPartition( * @return True if all the PartitionSlots are matched, false otherwise */ public boolean getDataPartition( - Map partitionSlots, DataPartitionTable dataPartition) { + final Map partitionSlots, + final DataPartitionTable dataPartition) { return dataPartitionTable.getDataPartition(partitionSlots, dataPartition); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java index 85a427cfab9a..492aceb2feca 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java @@ -29,6 +29,7 @@ import org.apache.iotdb.commons.partition.DataPartitionTable; import org.apache.iotdb.commons.partition.SchemaPartitionTable; import org.apache.iotdb.commons.snapshot.SnapshotProcessor; +import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.confignode.consensus.request.read.partition.CountTimeSlotListPlan; import org.apache.iotdb.confignode.consensus.request.read.partition.GetDataPartitionPlan; import org.apache.iotdb.confignode.consensus.request.read.partition.GetSchemaPartitionPlan; @@ -121,6 +122,8 @@ public class PartitionInfo implements SnapshotProcessor { private final AtomicInteger nextRegionGroupId; // Map + // For tree model databases: The databaseName is a partial path's full path with "root." + // For table model databases: The databaseName is a full name without "root." private final Map databasePartitionTables; /** For Region-Maintainer. */ @@ -169,9 +172,9 @@ public TSStatus updateDataNode(UpdateDataNodePlan updateDataNodePlan) { * @return {@link TSStatusCode#SUCCESS_STATUS} if the new DatabasePartitionTable is created * successfully. */ - public TSStatus createDatabase(DatabaseSchemaPlan plan) { - String databaseName = plan.getSchema().getName(); - DatabasePartitionTable databasePartitionTable = new DatabasePartitionTable(databaseName); + public TSStatus createDatabase(final DatabaseSchemaPlan plan) { + final String databaseName = plan.getSchema().getName(); + final DatabasePartitionTable databasePartitionTable = new DatabasePartitionTable(databaseName); databasePartitionTables.put(databaseName, databasePartitionTable); return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } @@ -278,11 +281,11 @@ public List getRegionMaintainEntryList() { * @param preDeleteDatabasePlan PreDeleteStorageGroupPlan * @return {@link TSStatusCode#SUCCESS_STATUS} */ - public TSStatus preDeleteDatabase(PreDeleteDatabasePlan preDeleteDatabasePlan) { + public TSStatus preDeleteDatabase(final PreDeleteDatabasePlan preDeleteDatabasePlan) { final PreDeleteDatabasePlan.PreDeleteType preDeleteType = preDeleteDatabasePlan.getPreDeleteType(); final String database = preDeleteDatabasePlan.getStorageGroup(); - DatabasePartitionTable databasePartitionTable = databasePartitionTables.get(database); + final DatabasePartitionTable databasePartitionTable = databasePartitionTables.get(database); if (databasePartitionTable == null) { return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } @@ -299,7 +302,7 @@ public TSStatus preDeleteDatabase(PreDeleteDatabasePlan preDeleteDatabasePlan) { return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } - public boolean isDatabasePreDeleted(String database) { + public boolean isDatabasePreDeleted(final String database) { DatabasePartitionTable databasePartitionTable = databasePartitionTables.get(database); return databasePartitionTable != null && !databasePartitionTable.isNotPreDeleted(); } @@ -309,7 +312,7 @@ public boolean isDatabasePreDeleted(String database) { * * @param plan DeleteDatabasePlan */ - public void deleteDatabase(DeleteDatabasePlan plan) { + public void deleteDatabase(final DeleteDatabasePlan plan) { // Clean the databaseTable cache databasePartitionTables.remove(plan.getName()); } @@ -320,10 +323,10 @@ public void deleteDatabase(DeleteDatabasePlan plan) { * @param plan SchemaPartitionPlan with partitionSlotsMap * @return SchemaPartitionDataSet that contains only existing SchemaPartition */ - public DataSet getSchemaPartition(GetSchemaPartitionPlan plan) { - AtomicBoolean isAllPartitionsExist = new AtomicBoolean(true); + public DataSet getSchemaPartition(final GetSchemaPartitionPlan plan) { + final AtomicBoolean isAllPartitionsExist = new AtomicBoolean(true); // TODO: Replace this map with new SchemaPartition - Map schemaPartition = new ConcurrentHashMap<>(); + final Map schemaPartition = new ConcurrentHashMap<>(); if (plan.getPartitionSlotsMap().isEmpty()) { // Return all SchemaPartitions when the queried PartitionSlots are empty @@ -457,7 +460,7 @@ public TConsensusGroupId getPredecessorDataPartition( * @param database The specified Database * @return True if the DatabaseSchema is exists and the Database is not pre-deleted */ - public boolean isDatabaseExisted(String database) { + public boolean isDatabaseExisted(final String database) { final DatabasePartitionTable databasePartitionTable = databasePartitionTables.get(database); return databasePartitionTable != null && databasePartitionTable.isNotPreDeleted(); } @@ -525,19 +528,25 @@ public DataSet getSchemaNodeManagementPartition(List matchedDatabases) { } /** Get Region information. */ - public DataSet getRegionInfoList(GetRegionInfoListPlan regionsInfoPlan) { - RegionInfoListResp regionResp = new RegionInfoListResp(); - List regionInfoList = new Vector<>(); + public DataSet getRegionInfoList(final GetRegionInfoListPlan regionsInfoPlan) { + final RegionInfoListResp regionResp = new RegionInfoListResp(); + final List regionInfoList = new Vector<>(); if (databasePartitionTables.isEmpty()) { regionResp.setStatus(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); regionResp.setRegionInfoList(new ArrayList<>()); return regionResp; } - TShowRegionReq showRegionReq = regionsInfoPlan.getShowRegionReq(); + final TShowRegionReq showRegionReq = regionsInfoPlan.getShowRegionReq(); final List databases = showRegionReq != null ? showRegionReq.getDatabases() : null; + final Boolean isTableModel = + showRegionReq != null + ? showRegionReq.isSetIsTableModel() && showRegionReq.isIsTableModel() + : null; databasePartitionTables.forEach( (database, databasePartitionTable) -> { - if (databases != null && !databases.contains(database)) { + if (databases != null && !databases.contains(database) + || Boolean.TRUE.equals(isTableModel) && !PathUtils.isTableModelDatabase(database) + || Boolean.FALSE.equals(isTableModel) && PathUtils.isTableModelDatabase(database)) { return; } regionInfoList.addAll(databasePartitionTable.getRegionInfoList(regionsInfoPlan)); @@ -622,8 +631,8 @@ public String getRegionDatabase(TConsensusGroupId regionId) { * partitionSlotsMap */ public Map> filterUnassignedSchemaPartitionSlots( - Map> partitionSlotsMap) { - Map> result = new ConcurrentHashMap<>(); + final Map> partitionSlotsMap) { + final Map> result = new ConcurrentHashMap<>(); partitionSlotsMap.forEach( (database, partitionSlots) -> { @@ -809,7 +818,7 @@ public int countDataNodeScatterWidth( * @return Number of Regions currently owned by the specific database * @throws DatabaseNotExistsException When the specific database doesn't exist */ - public int getRegionGroupCount(String database, TConsensusGroupType type) + public int getRegionGroupCount(final String database, final TConsensusGroupType type) throws DatabaseNotExistsException { if (!isDatabaseExisted(database)) { throw new DatabaseNotExistsException(database); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfo.java index 6256458beb34..d911e07e37f8 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfo.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfo.java @@ -79,7 +79,6 @@ import org.apache.iotdb.confignode.exception.DatabaseNotExistsException; import org.apache.iotdb.confignode.rpc.thrift.TDatabaseSchema; import org.apache.iotdb.confignode.rpc.thrift.TTableInfo; -import org.apache.iotdb.db.exception.metadata.DatabaseModelException; import org.apache.iotdb.db.exception.metadata.SchemaQuotaExceededException; import org.apache.iotdb.db.exception.sql.SemanticException; import org.apache.iotdb.db.schemaengine.template.Template; @@ -112,6 +111,7 @@ import static org.apache.iotdb.commons.conf.IoTDBConstant.ONE_LEVEL_PATH_WILDCARD; import static org.apache.iotdb.commons.conf.IoTDBConstant.TTL_INFINITE; +import static org.apache.iotdb.commons.path.PartialPath.getQualifiedDatabasePartialPath; import static org.apache.iotdb.commons.schema.SchemaConstant.ALL_MATCH_PATTERN; import static org.apache.iotdb.commons.schema.SchemaConstant.ALL_MATCH_SCOPE; import static org.apache.iotdb.commons.schema.SchemaConstant.ALL_TEMPLATE; @@ -129,9 +129,11 @@ public class ClusterSchemaInfo implements SnapshotProcessor { // Database read write lock private final ReentrantReadWriteLock databaseReadWriteLock; - private final ConfigMTree mTree; + private final ConfigMTree treeModelMTree; + private final ConfigMTree tableModelMTree; - private static final String SNAPSHOT_FILENAME = "cluster_schema.bin"; + private static final String TREE_SNAPSHOT_FILENAME = "cluster_schema.bin"; + private static final String TABLE_SNAPSHOT_FILENAME = "table_cluster_schema.bin"; private final String ERROR_NAME = "Error Database name"; @@ -143,7 +145,8 @@ public ClusterSchemaInfo() throws IOException { databaseReadWriteLock = new ReentrantReadWriteLock(); try { - mTree = new ConfigMTree(); + treeModelMTree = new ConfigMTree(); + tableModelMTree = new ConfigMTree(); templateTable = new TemplateTable(); templatePreSetTable = new TemplatePreSetTable(); } catch (MetadataException e) { @@ -168,7 +171,10 @@ public TSStatus createDatabase(final DatabaseSchemaPlan plan) { try { // Set Database final TDatabaseSchema databaseSchema = plan.getSchema(); - final PartialPath partialPathName = PartialPath.getDatabasePath(databaseSchema.getName()); + final PartialPath partialPathName = getQualifiedDatabasePartialPath(databaseSchema.getName()); + + final ConfigMTree mTree = + plan.getSchema().isIsTableModel() ? tableModelMTree : treeModelMTree; mTree.setStorageGroup(partialPathName); // Set DatabaseSchema @@ -198,22 +204,15 @@ public TSStatus alterDatabase(final DatabaseSchemaPlan plan) { databaseReadWriteLock.writeLock().lock(); try { final TDatabaseSchema alterSchema = plan.getSchema(); - final PartialPath partialPathName = new PartialPath(alterSchema.getName()); + final PartialPath partialPathName = + PartialPath.getQualifiedDatabasePartialPath(alterSchema.getName()); + + final ConfigMTree mTree = + plan.getSchema().isIsTableModel() ? tableModelMTree : treeModelMTree; final TDatabaseSchema currentSchema = mTree.getDatabaseNodeByDatabasePath(partialPathName).getAsMNode().getDatabaseSchema(); - // Model conflict detection - if (alterSchema.isIsTableModel() && !currentSchema.isIsTableModel()) { - final DatabaseModelException exception = - new DatabaseModelException(currentSchema.getName(), false); - return RpcUtils.getStatus(exception.getErrorCode(), exception.getMessage()); - } else if (!alterSchema.isIsTableModel() && currentSchema.isIsTableModel()) { - final DatabaseModelException exception = - new DatabaseModelException(currentSchema.getName(), true); - return RpcUtils.getStatus(exception.getErrorCode(), exception.getMessage()); - } - // TODO: Support alter other fields if (alterSchema.isSetMinSchemaRegionGroupNum()) { currentSchema.setMinSchemaRegionGroupNum(alterSchema.getMinSchemaRegionGroupNum()); @@ -280,7 +279,8 @@ public TSStatus deleteDatabase(final DeleteDatabasePlan plan) { databaseReadWriteLock.writeLock().lock(); try { // Delete Database - mTree.deleteDatabase(getQualifiedDatabasePartialPath(plan.getName())); + (PathUtils.isTableModelDatabase(plan.getName()) ? tableModelMTree : treeModelMTree) + .deleteDatabase(getQualifiedDatabasePartialPath(plan.getName())); result.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } catch (final MetadataException e) { @@ -301,13 +301,14 @@ public TSStatus deleteDatabase(final DeleteDatabasePlan plan) { * @throws MetadataException if other exceptions happen */ public void checkDatabaseLimit() throws MetadataException { - int limit = COMMON_CONFIG.getDatabaseLimitThreshold(); + final int limit = COMMON_CONFIG.getDatabaseLimitThreshold(); if (limit > 0) { databaseReadWriteLock.readLock().lock(); try { - int count = - mTree.getDatabaseNum(ALL_MATCH_PATTERN, ALL_MATCH_SCOPE, false) - - mTree.getDatabaseNum(SYSTEM_DATABASE_PATTERN, ALL_MATCH_SCOPE, false); + final int count = + treeModelMTree.getDatabaseNum(ALL_MATCH_PATTERN, ALL_MATCH_SCOPE, false) + - treeModelMTree.getDatabaseNum(SYSTEM_DATABASE_PATTERN, ALL_MATCH_SCOPE, false) + + tableModelMTree.getDatabaseNum(ALL_MATCH_PATTERN, ALL_MATCH_SCOPE, false); if (count >= limit) { throw new SchemaQuotaExceededException(limit); } @@ -325,7 +326,9 @@ public CountDatabaseResp countMatchedDatabases(final CountDatabasePlan plan) { databaseReadWriteLock.readLock().lock(); try { final PartialPath patternPath = new PartialPath(plan.getDatabasePattern()); - result.setCount(mTree.getDatabaseNum(patternPath, plan.getScope(), false)); + result.setCount( + (plan.isTableModel() ? tableModelMTree : treeModelMTree) + .getDatabaseNum(patternPath, plan.getScope(), false)); result.setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); } catch (final MetadataException e) { LOGGER.error(ERROR_NAME, e); @@ -347,6 +350,7 @@ public DatabaseSchemaResp getMatchedDatabaseSchemas(final GetDatabasePlan plan) try { final Map schemaMap = new HashMap<>(); final PartialPath patternPath = new PartialPath(plan.getDatabasePattern()); + final ConfigMTree mTree = plan.isTableModel() ? tableModelMTree : treeModelMTree; final List matchedPaths = mTree.getMatchedDatabases(patternPath, plan.getScope(), false); for (final PartialPath path : matchedPaths) { @@ -372,8 +376,8 @@ public TSStatus setSchemaReplicationFactor(final SetSchemaReplicationFactorPlan databaseReadWriteLock.writeLock().lock(); try { final PartialPath path = getQualifiedDatabasePartialPath(plan.getDatabase()); - if (mTree.isDatabaseAlreadySet(path)) { - mTree + if (treeModelMTree.isDatabaseAlreadySet(path)) { + treeModelMTree .getDatabaseNodeByDatabasePath(path) .getAsMNode() .getDatabaseSchema() @@ -396,8 +400,8 @@ public TSStatus setDataReplicationFactor(final SetDataReplicationFactorPlan plan databaseReadWriteLock.writeLock().lock(); try { final PartialPath path = getQualifiedDatabasePartialPath(plan.getDatabase()); - if (mTree.isDatabaseAlreadySet(path)) { - mTree + if (treeModelMTree.isDatabaseAlreadySet(path)) { + treeModelMTree .getDatabaseNodeByDatabasePath(path) .getAsMNode() .getDatabaseSchema() @@ -420,8 +424,8 @@ public TSStatus setTimePartitionInterval(final SetTimePartitionIntervalPlan plan databaseReadWriteLock.writeLock().lock(); try { final PartialPath path = getQualifiedDatabasePartialPath(plan.getDatabase()); - if (mTree.isDatabaseAlreadySet(path)) { - mTree + if (treeModelMTree.isDatabaseAlreadySet(path)) { + treeModelMTree .getDatabaseNodeByDatabasePath(path) .getAsMNode() .getDatabaseSchema() @@ -452,7 +456,7 @@ public TSStatus adjustMaxRegionGroupCount(final AdjustMaxRegionGroupNumPlan plan for (final Map.Entry> entry : plan.getMaxRegionGroupNumMap().entrySet()) { final TDatabaseSchema databaseSchema = - mTree + treeModelMTree .getDatabaseNodeByDatabasePath(getQualifiedDatabasePartialPath(entry.getKey())) .getAsMNode() .getDatabaseSchema(); @@ -481,9 +485,18 @@ public TSStatus adjustMaxRegionGroupCount(final AdjustMaxRegionGroupNumPlan plan public List getDatabaseNames(final Boolean isTableModel) { databaseReadWriteLock.readLock().lock(); try { - return mTree.getAllDatabasePaths(isTableModel).stream() - .map(PartialPath::getFullPath) - .collect(Collectors.toList()); + final List results = new ArrayList<>(); + if (!Boolean.TRUE.equals(isTableModel)) { + treeModelMTree.getAllDatabasePaths(isTableModel).stream() + .map(PartialPath::getFullPath) + .forEach(results::add); + } + if (!Boolean.FALSE.equals(isTableModel)) { + tableModelMTree.getAllDatabasePaths(isTableModel).stream() + .map(path -> path.getNodes()[1]) + .forEach(results::add); + } + return results; } finally { databaseReadWriteLock.readLock().unlock(); } @@ -496,10 +509,12 @@ public List getDatabaseNames(final Boolean isTableModel) { * @throws MetadataException If the DatabaseName invalid i.e. the specified DatabaseName is * already exist, or it's a prefix of another DatabaseName */ - public void isDatabaseNameValid(final String databaseName) throws MetadataException { + public void isDatabaseNameValid(final String databaseName, final boolean isTableModel) + throws MetadataException { databaseReadWriteLock.readLock().lock(); try { - mTree.checkDatabaseAlreadySet(getQualifiedDatabasePartialPath(databaseName)); + (isTableModel ? tableModelMTree : treeModelMTree) + .checkDatabaseAlreadySet(getQualifiedDatabasePartialPath(databaseName)); } finally { databaseReadWriteLock.readLock().unlock(); } @@ -516,7 +531,7 @@ public TDatabaseSchema getMatchedDatabaseSchemaByName(final String database) throws DatabaseNotExistsException { databaseReadWriteLock.readLock().lock(); try { - return mTree + return (PathUtils.isTableModelDatabase(database) ? tableModelMTree : treeModelMTree) .getDatabaseNodeByDatabasePath(getQualifiedDatabasePartialPath(database)) .getAsMNode() .getDatabaseSchema(); @@ -534,19 +549,15 @@ public TDatabaseSchema getMatchedDatabaseSchemaByName(final String database) * @return All DatabaseSchemas that matches to the specific Database patterns */ public Map getMatchedDatabaseSchemasByName( - final List rawPathList) { + final List rawPathList, final Boolean isTableModel) { final Map schemaMap = new HashMap<>(); databaseReadWriteLock.readLock().lock(); try { - for (final String rawPath : rawPathList) { - final PartialPath patternPath = getQualifiedDatabasePartialPath(rawPath); - final List matchedPaths = - mTree.getMatchedDatabases(patternPath, ALL_MATCH_SCOPE, false); - for (final PartialPath path : matchedPaths) { - schemaMap.put( - path.getFullPath(), - mTree.getDatabaseNodeByPath(path).getAsMNode().getDatabaseSchema()); - } + if (!Boolean.FALSE.equals(isTableModel)) { + enrichSchemaMap(rawPathList, tableModelMTree, schemaMap); + } + if (!Boolean.TRUE.equals(isTableModel)) { + enrichSchemaMap(rawPathList, treeModelMTree, schemaMap); } } catch (final MetadataException e) { LOGGER.warn(ERROR_NAME, e); @@ -556,22 +567,40 @@ public Map getMatchedDatabaseSchemasByName( return schemaMap; } + private void enrichSchemaMap( + final List rawPathList, + final ConfigMTree mTree, + final Map schemaMap) + throws MetadataException { + for (final String rawPath : rawPathList) { + final PartialPath patternPath = getQualifiedDatabasePartialPath(rawPath); + final List matchedPaths = + mTree.getMatchedDatabases(patternPath, ALL_MATCH_SCOPE, false); + for (final PartialPath path : matchedPaths) { + schemaMap.put( + path.getFullPath(), mTree.getDatabaseNodeByPath(path).getAsMNode().getDatabaseSchema()); + } + } + } + /** * Only leader use this interface. Get the matched DatabaseSchemas. * * @param prefix prefix path such as root.a * @return All DatabaseSchemas that matches to the prefix path such as root.a.db1, root.a.db2 */ - public Map getMatchedDatabaseSchemasByPrefix(PartialPath prefix) { - Map schemaMap = new HashMap<>(); + public Map getMatchedDatabaseSchemasByPrefix(final PartialPath prefix) { + final Map schemaMap = new HashMap<>(); databaseReadWriteLock.readLock().lock(); try { - List matchedPaths = mTree.getMatchedDatabases(prefix, ALL_MATCH_SCOPE, true); - for (PartialPath path : matchedPaths) { + final List matchedPaths = + treeModelMTree.getMatchedDatabases(prefix, ALL_MATCH_SCOPE, true); + for (final PartialPath path : matchedPaths) { schemaMap.put( - path.getFullPath(), mTree.getDatabaseNodeByPath(path).getAsMNode().getDatabaseSchema()); + path.getFullPath(), + treeModelMTree.getDatabaseNodeByPath(path).getAsMNode().getDatabaseSchema()); } - } catch (MetadataException e) { + } catch (final MetadataException e) { LOGGER.warn(ERROR_NAME, e); } finally { databaseReadWriteLock.readLock().unlock(); @@ -591,8 +620,8 @@ public int getMinRegionGroupNum( databaseReadWriteLock.readLock().lock(); try { final TDatabaseSchema storageGroupSchema = - mTree - .getDatabaseNodeByDatabasePath(PartialPath.getDatabasePath(database)) + (PathUtils.isTableModelDatabase(database) ? tableModelMTree : treeModelMTree) + .getDatabaseNodeByDatabasePath(getQualifiedDatabasePartialPath(database)) .getAsMNode() .getDatabaseSchema(); switch (consensusGroupType) { @@ -622,8 +651,8 @@ public int getMaxRegionGroupNum( databaseReadWriteLock.readLock().lock(); try { final TDatabaseSchema storageGroupSchema = - mTree - .getDatabaseNodeByDatabasePath(PartialPath.getDatabasePath(database)) + (PathUtils.isTableModelDatabase(database) ? tableModelMTree : treeModelMTree) + .getDatabaseNodeByDatabasePath(getQualifiedDatabasePartialPath(database)) .getAsMNode() .getDatabaseSchema(); switch (consensusGroupType) { @@ -642,14 +671,17 @@ public int getMaxRegionGroupNum( } @Override - public boolean processTakeSnapshot(File snapshotDir) throws IOException { - return processMTreeTakeSnapshot(snapshotDir) + public boolean processTakeSnapshot(final File snapshotDir) throws IOException { + return processMTreeTakeSnapshot(snapshotDir, TREE_SNAPSHOT_FILENAME, treeModelMTree) + && processMTreeTakeSnapshot(snapshotDir, TABLE_SNAPSHOT_FILENAME, tableModelMTree) && templateTable.processTakeSnapshot(snapshotDir) && templatePreSetTable.processTakeSnapshot(snapshotDir); } - public boolean processMTreeTakeSnapshot(File snapshotDir) throws IOException { - File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME); + public boolean processMTreeTakeSnapshot( + final File snapshotDir, final String snapshotFileName, final ConfigMTree mTree) + throws IOException { + final File snapshotFile = new File(snapshotDir, snapshotFileName); if (snapshotFile.exists() && snapshotFile.isFile()) { LOGGER.error( "Failed to take snapshot, because snapshot file [{}] is already exist.", @@ -657,12 +689,12 @@ public boolean processMTreeTakeSnapshot(File snapshotDir) throws IOException { return false; } - File tmpFile = new File(snapshotFile.getAbsolutePath() + "-" + UUID.randomUUID()); + final File tmpFile = new File(snapshotFile.getAbsolutePath() + "-" + UUID.randomUUID()); databaseReadWriteLock.readLock().lock(); try { - FileOutputStream fileOutputStream = new FileOutputStream(tmpFile); - BufferedOutputStream outputStream = new BufferedOutputStream(fileOutputStream); + final FileOutputStream fileOutputStream = new FileOutputStream(tmpFile); + final BufferedOutputStream outputStream = new BufferedOutputStream(fileOutputStream); try { // Take snapshot for MTree mTree.serialize(outputStream); @@ -688,14 +720,17 @@ public boolean processMTreeTakeSnapshot(File snapshotDir) throws IOException { } @Override - public void processLoadSnapshot(File snapshotDir) throws IOException { - processMTreeLoadSnapshot(snapshotDir); + public void processLoadSnapshot(final File snapshotDir) throws IOException { + processMTreeLoadSnapshot(snapshotDir, TREE_SNAPSHOT_FILENAME, treeModelMTree); + processMTreeLoadSnapshot(snapshotDir, TABLE_SNAPSHOT_FILENAME, tableModelMTree); templateTable.processLoadSnapshot(snapshotDir); templatePreSetTable.processLoadSnapshot(snapshotDir); } - public void processMTreeLoadSnapshot(File snapshotDir) throws IOException { - File snapshotFile = new File(snapshotDir, SNAPSHOT_FILENAME); + public void processMTreeLoadSnapshot( + final File snapshotDir, final String snapshotFileName, final ConfigMTree mTree) + throws IOException { + final File snapshotFile = new File(snapshotDir, snapshotFileName); if (!snapshotFile.exists() || !snapshotFile.isFile()) { LOGGER.error( "Failed to load snapshot,snapshot file [{}] is not exist.", @@ -703,8 +738,8 @@ public void processMTreeLoadSnapshot(File snapshotDir) throws IOException { return; } databaseReadWriteLock.writeLock().lock(); - try (FileInputStream fileInputStream = new FileInputStream(snapshotFile); - BufferedInputStream bufferedInputStream = new BufferedInputStream(fileInputStream)) { + try (final FileInputStream fileInputStream = new FileInputStream(snapshotFile); + final BufferedInputStream bufferedInputStream = new BufferedInputStream(fileInputStream)) { // Load snapshot of MTree mTree.clear(); mTree.deserialize(bufferedInputStream); @@ -719,7 +754,8 @@ public Pair, Set> getNodesListInGivenLevel( new Pair(new HashSet<>(), new HashSet<>()); databaseReadWriteLock.readLock().lock(); try { - matchedPathsInNextLevel = mTree.getNodesListInGivenLevel(partialPath, level, true, scope); + matchedPathsInNextLevel = + treeModelMTree.getNodesListInGivenLevel(partialPath, level, true, scope); } catch (MetadataException e) { LOGGER.error("Error get matched paths in given level.", e); } finally { @@ -734,7 +770,7 @@ public Pair, Set> getChildNodePathInNextLevel( new Pair<>(new HashSet<>(), new HashSet<>()); databaseReadWriteLock.readLock().lock(); try { - matchedPathsInNextLevel = mTree.getChildNodePathInNextLevel(partialPath, scope); + matchedPathsInNextLevel = treeModelMTree.getChildNodePathInNextLevel(partialPath, scope); } catch (MetadataException e) { LOGGER.error("Error get matched paths in next level.", e); } finally { @@ -797,7 +833,7 @@ public synchronized TemplateInfoResp checkTemplateSettable( } try { - mTree.checkTemplateOnPath(path); + treeModelMTree.checkTemplateOnPath(path); resp.setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); resp.setTemplateList( Collections.singletonList( @@ -812,108 +848,110 @@ public synchronized TemplateInfoResp checkTemplateSettable( // Before execute this method, checkTemplateSettable method should be invoked first and the whole // process must be synchronized - public synchronized TSStatus setSchemaTemplate(SetSchemaTemplatePlan setSchemaTemplatePlan) { - PartialPath path; + public synchronized TSStatus setSchemaTemplate( + final SetSchemaTemplatePlan setSchemaTemplatePlan) { + final PartialPath path; try { path = new PartialPath(setSchemaTemplatePlan.getPath()); - } catch (IllegalPathException e) { + } catch (final IllegalPathException e) { LOGGER.error(e.getMessage()); return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); } try { - int templateId = templateTable.getTemplate(setSchemaTemplatePlan.getName()).getId(); - mTree.setTemplate(templateId, path); + final int templateId = templateTable.getTemplate(setSchemaTemplatePlan.getName()).getId(); + treeModelMTree.setTemplate(templateId, path); return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - } catch (MetadataException e) { + } catch (final MetadataException e) { return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); } } public synchronized TSStatus preSetSchemaTemplate( - PreSetSchemaTemplatePlan preSetSchemaTemplatePlan) { - PartialPath path; + final PreSetSchemaTemplatePlan preSetSchemaTemplatePlan) { + final PartialPath path; try { path = new PartialPath(preSetSchemaTemplatePlan.getPath()); - } catch (IllegalPathException e) { + } catch (final IllegalPathException e) { LOGGER.error(e.getMessage()); return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); } try { - int templateId = templateTable.getTemplate(preSetSchemaTemplatePlan.getName()).getId(); + final int templateId = templateTable.getTemplate(preSetSchemaTemplatePlan.getName()).getId(); if (preSetSchemaTemplatePlan.isRollback()) { rollbackPreSetSchemaTemplate(templateId, path); } else { preSetSchemaTemplate(templateId, path); } return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - } catch (MetadataException e) { + } catch (final MetadataException e) { return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); } } - private void preSetSchemaTemplate(int templateId, PartialPath templateSetPath) + private void preSetSchemaTemplate(final int templateId, final PartialPath templateSetPath) throws MetadataException { templatePreSetTable.preSetTemplate(templateId, templateSetPath); - mTree.setTemplate(templateId, templateSetPath); + treeModelMTree.setTemplate(templateId, templateSetPath); } - private void rollbackPreSetSchemaTemplate(int templateId, PartialPath templateSetPath) + private void rollbackPreSetSchemaTemplate(final int templateId, final PartialPath templateSetPath) throws MetadataException { try { - mTree.unsetTemplate(templateId, templateSetPath); - } catch (MetadataException ignore) { + treeModelMTree.unsetTemplate(templateId, templateSetPath); + } catch (final MetadataException ignore) { // node not exists or not set template } templatePreSetTable.removeSetTemplate(templateId, templateSetPath); } public synchronized TSStatus commitSetSchemaTemplate( - CommitSetSchemaTemplatePlan commitSetSchemaTemplatePlan) { - PartialPath path; + final CommitSetSchemaTemplatePlan commitSetSchemaTemplatePlan) { + final PartialPath path; try { path = new PartialPath(commitSetSchemaTemplatePlan.getPath()); - } catch (IllegalPathException e) { + } catch (final IllegalPathException e) { LOGGER.error(e.getMessage()); return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); } try { - int templateId = templateTable.getTemplate(commitSetSchemaTemplatePlan.getName()).getId(); + final int templateId = + templateTable.getTemplate(commitSetSchemaTemplatePlan.getName()).getId(); if (commitSetSchemaTemplatePlan.isRollback()) { rollbackCommitSetSchemaTemplate(templateId, path); } else { commitSetSchemaTemplate(templateId, path); } return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - } catch (MetadataException e) { + } catch (final MetadataException e) { return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); } } - private void commitSetSchemaTemplate(int templateId, PartialPath templateSetPath) { + private void commitSetSchemaTemplate(final int templateId, final PartialPath templateSetPath) { templatePreSetTable.removeSetTemplate(templateId, templateSetPath); } - private void rollbackCommitSetSchemaTemplate(int templateId, PartialPath templateSetPath) - throws MetadataException { - mTree.unsetTemplate(templateId, templateSetPath); + private void rollbackCommitSetSchemaTemplate( + final int templateId, final PartialPath templateSetPath) throws MetadataException { + treeModelMTree.unsetTemplate(templateId, templateSetPath); } - public PathInfoResp getPathsSetTemplate(GetPathsSetTemplatePlan getPathsSetTemplatePlan) { - PathInfoResp pathInfoResp = new PathInfoResp(); + public PathInfoResp getPathsSetTemplate(final GetPathsSetTemplatePlan getPathsSetTemplatePlan) { + final PathInfoResp pathInfoResp = new PathInfoResp(); TSStatus status; try { - String templateName = getPathsSetTemplatePlan.getName(); - PathPatternTree scope = getPathsSetTemplatePlan.getScope(); - int templateId; + final String templateName = getPathsSetTemplatePlan.getName(); + final PathPatternTree scope = getPathsSetTemplatePlan.getScope(); + final int templateId; if (templateName.equals(ONE_LEVEL_PATH_WILDCARD)) { templateId = ALL_TEMPLATE; } else { templateId = templateTable.getTemplate(templateName).getId(); } - pathInfoResp.setPathList(mTree.getPathsSetOnTemplate(templateId, scope, false)); + pathInfoResp.setPathList(treeModelMTree.getPathsSetOnTemplate(templateId, scope, false)); status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } catch (MetadataException e) { status = RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); @@ -929,7 +967,7 @@ public AllTemplateSetInfoResp getAllTemplateSetInfo() { for (Template template : templateList) { id = template.getId(); try { - List pathList = mTree.getPathsSetOnTemplate(id, ALL_MATCH_SCOPE, true); + List pathList = treeModelMTree.getPathsSetOnTemplate(id, ALL_MATCH_SCOPE, true); if (!pathList.isEmpty()) { List> pathSetInfoList = new ArrayList<>(); for (String path : pathList) { @@ -964,7 +1002,8 @@ public TemplateSetInfoResp getTemplateSetInfo(final GetTemplateSetInfoPlan plan) try { final Map> allTemplateSetInfo = new HashMap<>(); for (final PartialPath pattern : plan.getPatternList()) { - final Map> templateSetInfo = mTree.getTemplateSetInfo(pattern); + final Map> templateSetInfo = + treeModelMTree.getTemplateSetInfo(pattern); if (templateSetInfo.isEmpty()) { continue; } @@ -1001,7 +1040,7 @@ public TemplateSetInfoResp getTemplateSetInfo(final GetTemplateSetInfoPlan plan) public TSStatus preUnsetSchemaTemplate(PreUnsetSchemaTemplatePlan plan) { try { - mTree.preUnsetTemplate(plan.getTemplateId(), plan.getPath()); + treeModelMTree.preUnsetTemplate(plan.getTemplateId(), plan.getPath()); return StatusUtils.OK; } catch (MetadataException e) { LOGGER.error(e.getMessage(), e); @@ -1011,7 +1050,7 @@ public TSStatus preUnsetSchemaTemplate(PreUnsetSchemaTemplatePlan plan) { public TSStatus rollbackUnsetSchemaTemplate(RollbackPreUnsetSchemaTemplatePlan plan) { try { - mTree.rollbackUnsetTemplate(plan.getTemplateId(), plan.getPath()); + treeModelMTree.rollbackUnsetTemplate(plan.getTemplateId(), plan.getPath()); return StatusUtils.OK; } catch (MetadataException e) { LOGGER.error(e.getMessage(), e); @@ -1021,7 +1060,7 @@ public TSStatus rollbackUnsetSchemaTemplate(RollbackPreUnsetSchemaTemplatePlan p public TSStatus unsetSchemaTemplate(UnsetSchemaTemplatePlan plan) { try { - mTree.unsetTemplate(plan.getTemplateId(), plan.getPath()); + treeModelMTree.unsetTemplate(plan.getTemplateId(), plan.getPath()); return StatusUtils.OK; } catch (MetadataException e) { LOGGER.error(e.getMessage(), e); @@ -1048,31 +1087,13 @@ public TSStatus extendSchemaTemplate(ExtendSchemaTemplatePlan extendSchemaTempla } } - public Map getMatchedDatabaseSchemasByOneName( - final String[] databasePathPattern) { - final Map schemaMap = new HashMap<>(); - databaseReadWriteLock.readLock().lock(); - try { - final PartialPath patternPath = new PartialPath(databasePathPattern); - final List matchedPaths = mTree.getBelongedDatabases(patternPath); - for (final PartialPath path : matchedPaths) { - schemaMap.put( - path.getFullPath(), mTree.getDatabaseNodeByPath(path).getAsMNode().getDatabaseSchema()); - } - } catch (final MetadataException e) { - LOGGER.warn(ERROR_NAME, e); - } finally { - databaseReadWriteLock.readLock().unlock(); - } - return schemaMap; - } - // region table management public TSStatus preCreateTable(final PreCreateTablePlan plan) { databaseReadWriteLock.writeLock().lock(); try { - mTree.preCreateTable(getQualifiedDatabasePartialPath(plan.getDatabase()), plan.getTable()); + tableModelMTree.preCreateTable( + getQualifiedDatabasePartialPath(plan.getDatabase()), plan.getTable()); return RpcUtils.SUCCESS_STATUS; } catch (final MetadataException e) { return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); @@ -1084,7 +1105,7 @@ public TSStatus preCreateTable(final PreCreateTablePlan plan) { public TSStatus rollbackCreateTable(final RollbackCreateTablePlan plan) { databaseReadWriteLock.writeLock().lock(); try { - mTree.rollbackCreateTable( + tableModelMTree.rollbackCreateTable( getQualifiedDatabasePartialPath(plan.getDatabase()), plan.getTableName()); return RpcUtils.SUCCESS_STATUS; } catch (final MetadataException e) { @@ -1097,7 +1118,7 @@ public TSStatus rollbackCreateTable(final RollbackCreateTablePlan plan) { public TSStatus commitCreateTable(final CommitCreateTablePlan plan) { databaseReadWriteLock.writeLock().lock(); try { - mTree.commitCreateTable( + tableModelMTree.commitCreateTable( getQualifiedDatabasePartialPath(plan.getDatabase()), plan.getTableName()); return RpcUtils.SUCCESS_STATUS; } catch (final MetadataException e) { @@ -1110,7 +1131,7 @@ public TSStatus commitCreateTable(final CommitCreateTablePlan plan) { public TSStatus preDeleteTable(final PreDeleteTablePlan plan) { databaseReadWriteLock.writeLock().lock(); try { - mTree.preDeleteTable( + tableModelMTree.preDeleteTable( getQualifiedDatabasePartialPath(plan.getDatabase()), plan.getTableName()); return RpcUtils.SUCCESS_STATUS; } catch (final MetadataException e) { @@ -1123,7 +1144,8 @@ public TSStatus preDeleteTable(final PreDeleteTablePlan plan) { public TSStatus dropTable(final CommitDeleteTablePlan plan) { databaseReadWriteLock.writeLock().lock(); try { - mTree.dropTable(getQualifiedDatabasePartialPath(plan.getDatabase()), plan.getTableName()); + tableModelMTree.dropTable( + getQualifiedDatabasePartialPath(plan.getDatabase()), plan.getTableName()); return RpcUtils.SUCCESS_STATUS; } catch (final MetadataException e) { return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); @@ -1138,7 +1160,7 @@ public ShowTableResp showTables(final ShowTablePlan plan) { return new ShowTableResp( StatusUtils.OK, plan.isDetails() - ? mTree + ? tableModelMTree .getAllTablesUnderSpecificDatabase( getQualifiedDatabasePartialPath(plan.getDatabase())) .stream() @@ -1152,7 +1174,7 @@ public ShowTableResp showTables(final ShowTablePlan plan) { return info; }) .collect(Collectors.toList()) - : mTree + : tableModelMTree .getAllUsingTablesUnderSpecificDatabase( getQualifiedDatabasePartialPath(plan.getDatabase())) .stream() @@ -1178,7 +1200,7 @@ public FetchTableResp fetchTables(final FetchTablePlan plan) { plan.getFetchTableMap().entrySet()) { result.put( database2Tables.getKey(), - mTree.getSpecificTablesUnderSpecificDatabase( + tableModelMTree.getSpecificTablesUnderSpecificDatabase( getQualifiedDatabasePartialPath(database2Tables.getKey()), database2Tables.getValue())); } @@ -1197,11 +1219,13 @@ public DescTableResp descTable(final DescTablePlan plan) { final PartialPath databasePath = getQualifiedDatabasePartialPath(plan.getDatabase()); if (plan.isDetails()) { final Pair> pair = - mTree.getTableSchemaDetails(databasePath, plan.getTableName()); + tableModelMTree.getTableSchemaDetails(databasePath, plan.getTableName()); return new DescTableResp(StatusUtils.OK, pair.getLeft(), pair.getRight()); } return new DescTableResp( - StatusUtils.OK, mTree.getUsingTableSchema(databasePath, plan.getTableName()), null); + StatusUtils.OK, + tableModelMTree.getUsingTableSchema(databasePath, plan.getTableName()), + null); } catch (final MetadataException e) { return new DescTableResp(RpcUtils.getStatus(e.getErrorCode(), e.getMessage()), null, null); } finally { @@ -1212,7 +1236,7 @@ public DescTableResp descTable(final DescTablePlan plan) { public Map> getAllUsingTables() { databaseReadWriteLock.readLock().lock(); try { - return mTree.getAllUsingTables(); + return tableModelMTree.getAllUsingTables(); } finally { databaseReadWriteLock.readLock().unlock(); } @@ -1221,7 +1245,7 @@ public Map> getAllUsingTables() { public Map> getAllPreCreateTables() { databaseReadWriteLock.readLock().lock(); try { - return mTree.getAllPreCreateTables(); + return tableModelMTree.getAllPreCreateTables(); } catch (final MetadataException e) { LOGGER.warn(e.getMessage(), e); throw new RuntimeException(e); @@ -1234,7 +1258,7 @@ public Optional getTsTableIfExists(final String database, final String throws MetadataException { databaseReadWriteLock.readLock().lock(); try { - return mTree.getTableIfExists(getQualifiedDatabasePartialPath(database), tableName); + return tableModelMTree.getTableIfExists(getQualifiedDatabasePartialPath(database), tableName); } finally { databaseReadWriteLock.readLock().unlock(); } @@ -1244,12 +1268,12 @@ public TSStatus addTableColumn(final AddTableColumnPlan plan) { databaseReadWriteLock.writeLock().lock(); try { if (plan.isRollback()) { - mTree.rollbackAddTableColumn( + tableModelMTree.rollbackAddTableColumn( getQualifiedDatabasePartialPath(plan.getDatabase()), plan.getTableName(), plan.getColumnSchemaList()); } else { - mTree.addTableColumn( + tableModelMTree.addTableColumn( getQualifiedDatabasePartialPath(plan.getDatabase()), plan.getTableName(), plan.getColumnSchemaList()); @@ -1267,7 +1291,7 @@ public TSStatus renameTableColumn(final RenameTableColumnPlan plan) { final String databaseName = PathUtils.qualifyDatabaseName(plan.getDatabase()); databaseReadWriteLock.writeLock().lock(); try { - mTree.renameTableColumn( + tableModelMTree.renameTableColumn( new PartialPath(databaseName), plan.getTableName(), plan.getOldName(), plan.getNewName()); return RpcUtils.SUCCESS_STATUS; } catch (final MetadataException e) { @@ -1281,7 +1305,7 @@ public TSStatus renameTableColumn(final RenameTableColumnPlan plan) { public TSStatus setTableProperties(final SetTablePropertiesPlan plan) { databaseReadWriteLock.writeLock().lock(); try { - mTree.setTableProperties( + tableModelMTree.setTableProperties( getQualifiedDatabasePartialPath(plan.getDatabase()), plan.getTableName(), plan.getProperties()); @@ -1298,7 +1322,7 @@ public TSStatus preDeleteColumn(final PreDeleteColumnPlan plan) { databaseReadWriteLock.writeLock().lock(); try { final TSStatus status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - if (mTree.preDeleteColumn( + if (tableModelMTree.preDeleteColumn( getQualifiedDatabasePartialPath(plan.getDatabase()), plan.getTableName(), plan.getColumnName())) { @@ -1318,7 +1342,7 @@ public TSStatus preDeleteColumn(final PreDeleteColumnPlan plan) { public TSStatus commitDeleteColumn(final CommitDeleteColumnPlan plan) { databaseReadWriteLock.writeLock().lock(); try { - mTree.commitDeleteColumn( + tableModelMTree.commitDeleteColumn( getQualifiedDatabasePartialPath(plan.getDatabase()), plan.getTableName(), plan.getColumnName()); @@ -1331,15 +1355,10 @@ public TSStatus commitDeleteColumn(final CommitDeleteColumnPlan plan) { } } - private PartialPath getQualifiedDatabasePartialPath(final String database) - throws IllegalPathException { - return PartialPath.getDatabasePath(PathUtils.qualifyDatabaseName(database)); - } - // endregion @TestOnly public void clear() { - mTree.clear(); + treeModelMTree.clear(); } } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigMTree.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigMTree.java index 5041be35a5e0..5cce781908a3 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigMTree.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/schema/ConfigMTree.java @@ -38,7 +38,6 @@ import org.apache.iotdb.confignode.persistence.schema.mnode.impl.ConfigTableNode; import org.apache.iotdb.db.exception.metadata.DatabaseAlreadySetException; import org.apache.iotdb.db.exception.metadata.DatabaseConflictException; -import org.apache.iotdb.db.exception.metadata.DatabaseModelException; import org.apache.iotdb.db.exception.metadata.DatabaseNotSetException; import org.apache.iotdb.db.exception.metadata.PathAlreadyExistException; import org.apache.iotdb.db.exception.metadata.PathNotExistException; @@ -162,8 +161,8 @@ public void setStorageGroup(final PartialPath path) throws MetadataException { } /** Delete a database */ - public void deleteDatabase(PartialPath path) throws MetadataException { - IDatabaseMNode databaseMNode = getDatabaseNodeByDatabasePath(path); + public void deleteDatabase(final PartialPath path) throws MetadataException { + final IDatabaseMNode databaseMNode = getDatabaseNodeByDatabasePath(path); IConfigMNode cur = databaseMNode.getParent(); // Suppose current system has root.a.b.sg1, root.a.sg2, and delete root.a.b.sg1 // delete the database node sg1 @@ -186,7 +185,8 @@ public void deleteDatabase(PartialPath path) throws MetadataException { * @param pathPattern a path pattern or a full path * @return a list contains all databases related to given path */ - public List getBelongedDatabases(PartialPath pathPattern) throws MetadataException { + public List getBelongedDatabases(final PartialPath pathPattern) + throws MetadataException { return collectDatabases(pathPattern, ALL_MATCH_SCOPE, false, true); } @@ -201,24 +201,24 @@ public List getBelongedDatabases(PartialPath pathPattern) throws Me * @return a list contains all database names under given path pattern */ public List getMatchedDatabases( - PartialPath pathPattern, PathPatternTree scope, boolean isPrefixMatch) + final PartialPath pathPattern, final PathPatternTree scope, final boolean isPrefixMatch) throws MetadataException { return collectDatabases(pathPattern, scope, isPrefixMatch, false); } private List collectDatabases( - PartialPath pathPattern, - PathPatternTree scope, - boolean isPrefixMatch, - boolean collectInternal) + final PartialPath pathPattern, + final PathPatternTree scope, + final boolean isPrefixMatch, + final boolean collectInternal) throws MetadataException { - List result = new LinkedList<>(); - try (DatabaseCollector collector = + final List result = new LinkedList<>(); + try (final DatabaseCollector collector = new DatabaseCollector, IConfigMNode>( root, pathPattern, store, isPrefixMatch, scope) { @Override - protected void collectDatabase(IDatabaseMNode node) { + protected void collectDatabase(final IDatabaseMNode node) { result.add(node.getPartialPath()); } }) { @@ -486,9 +486,6 @@ public void checkTemplateOnPath(final PartialPath path) throws MetadataException if (cur.getSchemaTemplateId() != NON_TEMPLATE) { throw new MetadataException("Template already exists on " + cur.getFullPath()); } - if (cur.isDatabase() && cur.getDatabaseSchema().isIsTableModel()) { - throw new DatabaseModelException(cur.getFullPath(), true); - } } checkTemplateOnSubtree(cur); @@ -651,9 +648,6 @@ private IConfigMNode getNodeSetTemplate(int templateId, PartialPath path) public void preCreateTable(final PartialPath database, final TsTable table) throws MetadataException { final IConfigMNode databaseNode = getDatabaseNodeByDatabasePath(database).getAsMNode(); - if (!databaseNode.getDatabaseSchema().isIsTableModel()) { - throw new DatabaseModelException(database.getFullPath(), false); - } final IConfigMNode node = databaseNode.getChild(table.getTableName()); if (node == null) { final ConfigTableNode tableNode = @@ -770,7 +764,7 @@ public Map> getAllUsingTables() { public Map> getAllPreCreateTables() throws MetadataException { final Map> result = new HashMap<>(); final List databaseList = getAllDatabasePaths(true); - for (PartialPath databasePath : databaseList) { + for (final PartialPath databasePath : databaseList) { final String database = databasePath.getFullPath().substring(ROOT.length() + 1); final IConfigMNode databaseNode = getDatabaseNodeByDatabasePath(databasePath).getAsMNode(); for (final IConfigMNode child : databaseNode.getChildren().values()) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/ConfigNodeProcedureEnv.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/ConfigNodeProcedureEnv.java index d9ec20316a75..f5cee035c2ca 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/ConfigNodeProcedureEnv.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/ConfigNodeProcedureEnv.java @@ -137,8 +137,8 @@ public ConfigManager getConfigManager() { * @param isGeneratedByPipe whether the deletion is triggered by pipe request * @return tsStatus */ - public TSStatus deleteDatabaseConfig(String name, boolean isGeneratedByPipe) { - DeleteDatabasePlan deleteDatabasePlan = new DeleteDatabasePlan(name); + public TSStatus deleteDatabaseConfig(final String name, final boolean isGeneratedByPipe) { + final DeleteDatabasePlan deleteDatabasePlan = new DeleteDatabasePlan(name); return getClusterSchemaManager().deleteDatabase(deleteDatabasePlan, isGeneratedByPipe); } @@ -149,7 +149,7 @@ public TSStatus deleteDatabaseConfig(String name, boolean isGeneratedByPipe) { * @param deleteSgName database name */ public void preDeleteDatabase( - PreDeleteDatabasePlan.PreDeleteType preDeleteType, String deleteSgName) { + final PreDeleteDatabasePlan.PreDeleteType preDeleteType, final String deleteSgName) { getPartitionManager().preDeleteDatabase(deleteSgName, preDeleteType); } @@ -159,7 +159,7 @@ public void preDeleteDatabase( * @throws IOException IOE * @throws TException Thrift IOE */ - public boolean invalidateCache(String storageGroupName) throws IOException, TException { + public boolean invalidateCache(final String storageGroupName) throws IOException, TException { List allDataNodes = getNodeManager().getRegisteredDataNodes(); TInvalidateCacheReq invalidateCacheReq = new TInvalidateCacheReq(); invalidateCacheReq.setStorageGroup(true); @@ -369,7 +369,8 @@ public void createConfigNodeHeartbeatCache(int nodeId) { * @return Those RegionReplicas that failed to create */ public Map doRegionCreation( - TConsensusGroupType consensusGroupType, CreateRegionGroupsPlan createRegionGroupsPlan) { + final TConsensusGroupType consensusGroupType, + final CreateRegionGroupsPlan createRegionGroupsPlan) { // Prepare clientHandler DataNodeAsyncRequestContext clientHandler; @@ -395,8 +396,8 @@ public Map doRegionCreation( Map failedRegions = new HashMap<>(); for (List regionReplicaSets : createRegionGroupsPlan.getRegionGroupMap().values()) { - for (TRegionReplicaSet regionReplicaSet : regionReplicaSets) { - for (TDataNodeLocation dataNodeLocation : regionReplicaSet.getDataNodeLocations()) { + for (final TRegionReplicaSet regionReplicaSet : regionReplicaSets) { + for (final TDataNodeLocation dataNodeLocation : regionReplicaSet.getDataNodeLocations()) { if (responseMap.get(requestId).getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { failedRegions .computeIfAbsent( diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/CreateRegionGroupsProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/CreateRegionGroupsProcedure.java index e573a35c2312..2bcaee129d58 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/CreateRegionGroupsProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/region/CreateRegionGroupsProcedure.java @@ -69,17 +69,18 @@ public CreateRegionGroupsProcedure() { } public CreateRegionGroupsProcedure( - TConsensusGroupType consensusGroupType, CreateRegionGroupsPlan createRegionGroupsPlan) { + final TConsensusGroupType consensusGroupType, + final CreateRegionGroupsPlan createRegionGroupsPlan) { this.consensusGroupType = consensusGroupType; this.createRegionGroupsPlan = createRegionGroupsPlan; } @TestOnly public CreateRegionGroupsProcedure( - TConsensusGroupType consensusGroupType, - CreateRegionGroupsPlan createRegionGroupsPlan, - CreateRegionGroupsPlan persistPlan, - Map failedRegionReplicaSets) { + final TConsensusGroupType consensusGroupType, + final CreateRegionGroupsPlan createRegionGroupsPlan, + final CreateRegionGroupsPlan persistPlan, + final Map failedRegionReplicaSets) { this.consensusGroupType = consensusGroupType; this.createRegionGroupsPlan = createRegionGroupsPlan; this.persistPlan = persistPlan; @@ -87,7 +88,8 @@ public CreateRegionGroupsProcedure( } @Override - protected Flow executeFromState(ConfigNodeProcedureEnv env, CreateRegionGroupsState state) { + protected Flow executeFromState( + final ConfigNodeProcedureEnv env, final CreateRegionGroupsState state) { switch (state) { case CREATE_REGION_GROUPS: failedRegionReplicaSets = env.doRegionCreation(consensusGroupType, createRegionGroupsPlan); @@ -95,7 +97,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, CreateRegionGroupsSt break; case SHUNT_REGION_REPLICAS: persistPlan = new CreateRegionGroupsPlan(); - OfferRegionMaintainTasksPlan offerPlan = new OfferRegionMaintainTasksPlan(); + final OfferRegionMaintainTasksPlan offerPlan = new OfferRegionMaintainTasksPlan(); // Filter those RegionGroups that created successfully createRegionGroupsPlan .getRegionGroupMap() @@ -112,7 +114,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, CreateRegionGroupsSt "[CreateRegionGroups] All replicas of RegionGroup: {} are created successfully!", regionReplicaSet.getRegionId()); } else { - TRegionReplicaSet failedRegionReplicas = + final TRegionReplicaSet failedRegionReplicas = failedRegionReplicaSets.get(regionReplicaSet.getRegionId()); if (failedRegionReplicas.getDataNodeLocationsSize() @@ -161,15 +163,15 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, CreateRegionGroupsSt env.persistRegionGroup(persistPlan); try { env.getConfigManager().getConsensusManager().write(offerPlan); - } catch (ConsensusException e) { + } catch (final ConsensusException e) { LOGGER.warn("Failed in the write API executing the consensus layer due to: ", e); } setNextState(CreateRegionGroupsState.ACTIVATE_REGION_GROUPS); break; case ACTIVATE_REGION_GROUPS: - long currentTime = System.nanoTime(); + final long currentTime = System.nanoTime(); // Build RegionGroupCache immediately to make these successfully built RegionGroup available - Map>> + final Map>> activateRegionGroupMap = new TreeMap<>(); createRegionGroupsPlan .getRegionGroupMap() @@ -182,13 +184,14 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, CreateRegionGroupsSt if (failedRegionReplicas == null || failedRegionReplicas.getDataNodeLocationsSize() <= (regionReplicaSet.getDataNodeLocationsSize() - 1) / 2) { - Set failedDataNodeIds = + final Set failedDataNodeIds = failedRegionReplicas == null ? new TreeSet<>() : failedRegionReplicas.getDataNodeLocations().stream() .map(TDataNodeLocation::getDataNodeId) .collect(Collectors.toSet()); - Map activateSampleMap = new TreeMap<>(); + final Map activateSampleMap = + new TreeMap<>(); regionReplicaSet .getDataNodeLocations() .forEach( @@ -230,18 +233,18 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, CreateRegionGroupsSt @Override protected void rollbackState( - ConfigNodeProcedureEnv configNodeProcedureEnv, - CreateRegionGroupsState createRegionGroupsState) { + final ConfigNodeProcedureEnv configNodeProcedureEnv, + final CreateRegionGroupsState createRegionGroupsState) { // Do nothing } @Override - protected CreateRegionGroupsState getState(int stateId) { + protected CreateRegionGroupsState getState(final int stateId) { return CreateRegionGroupsState.values()[stateId]; } @Override - protected int getStateId(CreateRegionGroupsState createRegionGroupsState) { + protected int getStateId(final CreateRegionGroupsState createRegionGroupsState) { return createRegionGroupsState.ordinal(); } @@ -251,7 +254,7 @@ protected CreateRegionGroupsState getInitialState() { } @Override - public void serialize(DataOutputStream stream) throws IOException { + public void serialize(final DataOutputStream stream) throws IOException { // Must serialize CREATE_REGION_GROUPS.getTypeCode() firstly stream.writeShort(ProcedureType.CREATE_REGION_GROUPS.getTypeCode()); super.serialize(stream); @@ -267,7 +270,7 @@ public void serialize(DataOutputStream stream) throws IOException { } @Override - public void deserialize(ByteBuffer byteBuffer) { + public void deserialize(final ByteBuffer byteBuffer) { super.deserialize(byteBuffer); this.consensusGroupType = TConsensusGroupType.findByValue(byteBuffer.getInt()); try { @@ -275,30 +278,30 @@ public void deserialize(ByteBuffer byteBuffer) { failedRegionReplicaSets.clear(); int failedRegionsSize = byteBuffer.getInt(); while (failedRegionsSize-- > 0) { - TConsensusGroupId groupId = + final TConsensusGroupId groupId = ThriftCommonsSerDeUtils.deserializeTConsensusGroupId(byteBuffer); - TRegionReplicaSet replica = + final TRegionReplicaSet replica = ThriftCommonsSerDeUtils.deserializeTRegionReplicaSet(byteBuffer); failedRegionReplicaSets.put(groupId, replica); } if (byteBuffer.hasRemaining()) { persistPlan.deserializeForProcedure(byteBuffer); } - } catch (Exception e) { + } catch (final Exception e) { LOGGER.error("Deserialize meets error in CreateRegionGroupsProcedure", e); throw new RuntimeException(e); } } @Override - public boolean equals(Object o) { + public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } - CreateRegionGroupsProcedure that = (CreateRegionGroupsProcedure) o; + final CreateRegionGroupsProcedure that = (CreateRegionGroupsProcedure) o; return consensusGroupType == that.consensusGroupType && createRegionGroupsPlan.equals(that.createRegionGroupsPlan) && persistPlan.equals(that.persistPlan) diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterLogicalViewProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterLogicalViewProcedure.java index 7db6c583aaec..b0210944d111 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterLogicalViewProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/AlterLogicalViewProcedure.java @@ -193,7 +193,7 @@ private TConsensusGroupId getBelongedSchemaRegion( patternTree.appendFullPath(viewPath); patternTree.constructTree(); final Map> schemaPartitionTable = - env.getConfigManager().getSchemaPartition(patternTree, false).schemaPartitionTable; + env.getConfigManager().getSchemaPartition(patternTree).schemaPartitionTable; if (schemaPartitionTable.isEmpty()) { throw new ProcedureException(new ViewNotExistException(viewPath.getFullPath())); } else { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeactivateTemplateProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeactivateTemplateProcedure.java index ba987a454f33..cffa43f3f73c 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeactivateTemplateProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeactivateTemplateProcedure.java @@ -223,7 +223,7 @@ private void invalidateCache(final ConfigNodeProcedureEnv env) { private void deleteData(final ConfigNodeProcedureEnv env) { final Map relatedDataRegionGroup = - env.getConfigManager().getRelatedDataRegionGroup(timeSeriesPatternTree, false); + env.getConfigManager().getRelatedDataRegionGroup(timeSeriesPatternTree); // target timeSeries has no data or no target timeSeries, return directly if (!relatedDataRegionGroup.isEmpty() && !timeSeriesPatternTree.isEmpty()) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteTimeSeriesProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteTimeSeriesProcedure.java index 2ea3640877ff..d95b35f62863 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteTimeSeriesProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/DeleteTimeSeriesProcedure.java @@ -235,7 +235,7 @@ private void executeDeleteData( } final Map relatedDataRegionGroup = - env.getConfigManager().getRelatedDataRegionGroup(patternTree, false); + env.getConfigManager().getRelatedDataRegionGroup(patternTree); // Target timeSeries has no data if (relatedDataRegionGroup.isEmpty()) { diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/CreateTableProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/CreateTableProcedure.java index 26a71130e39a..4ef4f0c11635 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/CreateTableProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/CreateTableProcedure.java @@ -19,16 +19,10 @@ package org.apache.iotdb.confignode.procedure.impl.schema.table; -import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; -import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; -import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.path.PathPatternTree; import org.apache.iotdb.commons.schema.table.TsTable; -import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.consensus.request.write.table.CommitCreateTablePlan; import org.apache.iotdb.confignode.consensus.request.write.table.PreCreateTablePlan; import org.apache.iotdb.confignode.consensus.request.write.table.RollbackCreateTablePlan; @@ -38,31 +32,22 @@ import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.apache.iotdb.confignode.procedure.impl.StateMachineProcedure; -import org.apache.iotdb.confignode.procedure.impl.schema.DataNodeRegionTaskExecutor; import org.apache.iotdb.confignode.procedure.impl.schema.SchemaUtils; import org.apache.iotdb.confignode.procedure.state.schema.CreateTableState; import org.apache.iotdb.confignode.procedure.store.ProcedureType; import org.apache.iotdb.confignode.rpc.thrift.TDatabaseSchema; -import org.apache.iotdb.mpp.rpc.thrift.TCheckTimeSeriesExistenceReq; -import org.apache.iotdb.mpp.rpc.thrift.TCheckTimeSeriesExistenceResp; import org.apache.iotdb.rpc.TSStatusCode; import org.apache.tsfile.utils.ReadWriteIOUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Set; -import static org.apache.iotdb.commons.conf.IoTDBConstant.MULTI_LEVEL_PATH_WILDCARD; -import static org.apache.iotdb.commons.schema.SchemaConstant.ROOT; import static org.apache.iotdb.rpc.TSStatusCode.TABLE_ALREADY_EXISTS; public class CreateTableProcedure @@ -101,11 +86,6 @@ protected Flow executeFromState(final ConfigNodeProcedureEnv env, final CreateTa LOGGER.info("Pre release table {}.{}", database, table.getTableName()); preReleaseTable(env); break; - case VALIDATE_TIMESERIES_EXISTENCE: - LOGGER.info( - "Validate timeseries existence for table {}.{}", database, table.getTableName()); - validateTimeSeriesExistence(env); - break; case COMMIT_CREATE: LOGGER.info("Commit create table {}.{}", database, table.getTableName()); commitCreateTable(env); @@ -138,9 +118,7 @@ private void checkTableExistence(final ConfigNodeProcedureEnv env) { setFailure( new ProcedureException( new IoTDBException( - String.format( - "Table '%s.%s' already exists.", - database.substring(ROOT.length() + 1), table.getTableName()), + String.format("Table '%s.%s' already exists.", database, table.getTableName()), TABLE_ALREADY_EXISTS.getStatusCode()))); } else { final TDatabaseSchema schema = @@ -182,101 +160,6 @@ private void preReleaseTable(final ConfigNodeProcedureEnv env) { return; } - setNextState(CreateTableState.VALIDATE_TIMESERIES_EXISTENCE); - } - - private void validateTimeSeriesExistence(final ConfigNodeProcedureEnv env) { - final PathPatternTree patternTree = new PathPatternTree(); - final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - final DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - final PartialPath path; - try { - path = new PartialPath(new String[] {ROOT, database.substring(5), table.getTableName()}); - patternTree.appendPathPattern(path); - patternTree.appendPathPattern(path.concatAsMeasurementPath(MULTI_LEVEL_PATH_WILDCARD)); - patternTree.serialize(dataOutputStream); - } catch (final IOException e) { - LOGGER.warn("failed to serialize request for table {}.{}", database, table.getTableName(), e); - } - final ByteBuffer patternTreeBytes = ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - - final Map relatedSchemaRegionGroup = - env.getConfigManager().getRelatedSchemaRegionGroup(patternTree); - - if (relatedSchemaRegionGroup.isEmpty()) { - setNextState(CreateTableState.COMMIT_CREATE); - return; - } - - final List respList = new ArrayList<>(); - DataNodeRegionTaskExecutor - regionTask = - new DataNodeRegionTaskExecutor< - TCheckTimeSeriesExistenceReq, TCheckTimeSeriesExistenceResp>( - env, - relatedSchemaRegionGroup, - false, - CnToDnAsyncRequestType.CHECK_TIMESERIES_EXISTENCE, - ((dataNodeLocation, consensusGroupIdList) -> - new TCheckTimeSeriesExistenceReq(patternTreeBytes, consensusGroupIdList))) { - - @Override - protected List processResponseOfOneDataNode( - final TDataNodeLocation dataNodeLocation, - final List consensusGroupIdList, - final TCheckTimeSeriesExistenceResp response) { - respList.add(response); - final List failedRegionList = new ArrayList<>(); - if (response.getStatus().getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - return failedRegionList; - } - - if (response.getStatus().getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) { - final List subStatus = response.getStatus().getSubStatus(); - for (int i = 0; i < subStatus.size(); i++) { - if (subStatus.get(i).getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() - && subStatus.get(i).getCode() - != TSStatusCode.TIMESERIES_ALREADY_EXIST.getStatusCode()) { - failedRegionList.add(consensusGroupIdList.get(i)); - } - } - } else { - failedRegionList.addAll(consensusGroupIdList); - } - return failedRegionList; - } - - @Override - protected void onAllReplicasetFailure( - final TConsensusGroupId consensusGroupId, - final Set dataNodeLocationSet) { - setFailure( - new ProcedureException( - new MetadataException( - String.format( - "Create table %s.%s failed when [check timeseries existence on DataNode] because all replicaset of schemaRegion %s failed. %s", - database, - table.getTableName(), - consensusGroupId.id, - dataNodeLocationSet)))); - interruptTask(); - } - }; - regionTask.execute(); - if (isFailed()) { - return; - } - - for (final TCheckTimeSeriesExistenceResp resp : respList) { - if (resp.isExists()) { - setFailure( - new ProcedureException( - new MetadataException( - String.format( - "Timeseries already exists under root.%s.%s", - database, table.getTableName())))); - } - } setNextState(CreateTableState.COMMIT_CREATE); } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DeleteDevicesProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DeleteDevicesProcedure.java index fdb3a12b1d68..b039526d7179 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DeleteDevicesProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DeleteDevicesProcedure.java @@ -25,8 +25,6 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.path.PathPatternTree; import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; @@ -48,7 +46,6 @@ import javax.annotation.Nonnull; -import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; @@ -59,8 +56,6 @@ import java.util.Objects; import java.util.Set; -import static org.apache.iotdb.commons.conf.IoTDBConstant.MULTI_LEVEL_PATH_WILDCARD; -import static org.apache.iotdb.commons.schema.SchemaConstant.ROOT; import static org.apache.iotdb.confignode.procedure.state.schema.DeleteDevicesState.CHECK_TABLE_EXISTENCE; import static org.apache.iotdb.confignode.procedure.state.schema.DeleteDevicesState.CLEAN_DATANODE_SCHEMA_CACHE; import static org.apache.iotdb.confignode.procedure.state.schema.DeleteDevicesState.CONSTRUCT_BLACK_LIST; @@ -74,9 +69,6 @@ public class DeleteDevicesProcedure extends AbstractAlterOrDropTableProcedure relatedSchemaRegionGroup = - env.getConfigManager().getRelatedSchemaRegionGroup(patternTree, true); + env.getConfigManager().getRelatedSchemaRegionGroup4TableModel(database); if (relatedSchemaRegionGroup.isEmpty()) { deletedDevicesNum = 0; @@ -274,7 +251,7 @@ private void deleteData(final ConfigNodeProcedureEnv env) { new TableRegionTaskExecutor<>( "delete data for table device", env, - env.getConfigManager().getRelatedDataRegionGroup(patternTree, true), + env.getConfigManager().getRelatedDataRegionGroup4TableModel(database), CnToDnAsyncRequestType.DELETE_DATA_FOR_TABLE_DEVICE, (dataNodeLocation, consensusGroupIdList) -> new TTableDeviceDeletionWithPatternOrModReq( @@ -287,7 +264,7 @@ private void deleteDeviceSchema(final ConfigNodeProcedureEnv env) { new TableRegionTaskExecutor<>( "roll back table device black list", env, - env.getConfigManager().getRelatedSchemaRegionGroup(patternTree, true), + env.getConfigManager().getRelatedSchemaRegionGroup4TableModel(database), CnToDnAsyncRequestType.DELETE_TABLE_DEVICE_IN_BLACK_LIST, (dataNodeLocation, consensusGroupIdList) -> new TTableDeviceDeletionWithPatternOrModReq( @@ -303,7 +280,7 @@ protected void rollbackState( new TableRegionTaskExecutor<>( "roll back table device black list", env, - env.getConfigManager().getRelatedSchemaRegionGroup(patternTree, true), + env.getConfigManager().getRelatedSchemaRegionGroup4TableModel(database), CnToDnAsyncRequestType.ROLLBACK_TABLE_DEVICE_BLACK_LIST, (dataNodeLocation, consensusGroupIdList) -> new TTableDeviceDeletionWithPatternOrModReq( diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DropTableColumnProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DropTableColumnProcedure.java index aa4568995406..42523cd02a8b 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DropTableColumnProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DropTableColumnProcedure.java @@ -25,8 +25,6 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.path.PathPatternTree; import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; @@ -47,7 +45,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; @@ -55,9 +52,6 @@ import java.util.Map; import java.util.Objects; -import static org.apache.iotdb.commons.conf.IoTDBConstant.MULTI_LEVEL_PATH_WILDCARD; -import static org.apache.iotdb.commons.schema.SchemaConstant.ROOT; - public class DropTableColumnProcedure extends AbstractAlterOrDropTableProcedure { @@ -179,23 +173,10 @@ private void invalidateCache(final ConfigNodeProcedureEnv env) { } private void executeOnRegions(final ConfigNodeProcedureEnv env) { - final PathPatternTree patternTree = new PathPatternTree(); - final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - final DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - final PartialPath path; - try { - path = new PartialPath(new String[] {ROOT, database.substring(5), tableName}); - patternTree.appendPathPattern(path); - patternTree.appendPathPattern(path.concatAsMeasurementPath(MULTI_LEVEL_PATH_WILDCARD)); - patternTree.serialize(dataOutputStream); - } catch (final IOException e) { - LOGGER.warn("failed to serialize request for table {}.{}", database, table.getTableName(), e); - } - final Map relatedRegionGroup = isAttributeColumn - ? env.getConfigManager().getRelatedSchemaRegionGroup(patternTree, true) - : env.getConfigManager().getRelatedDataRegionGroup(patternTree, true); + ? env.getConfigManager().getRelatedSchemaRegionGroup4TableModel(database) + : env.getConfigManager().getRelatedDataRegionGroup4TableModel(database); if (!relatedRegionGroup.isEmpty()) { new TableRegionTaskExecutor<>( diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DropTableProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DropTableProcedure.java index 4a8acb42cdad..1513287b24bc 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DropTableProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/schema/table/DropTableProcedure.java @@ -25,8 +25,6 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.path.PathPatternTree; import org.apache.iotdb.confignode.client.async.CnToDnAsyncRequestType; import org.apache.iotdb.confignode.client.async.CnToDnInternalServiceAsyncRequestManager; import org.apache.iotdb.confignode.client.async.handlers.DataNodeAsyncRequestContext; @@ -46,23 +44,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.IOException; import java.util.ArrayList; import java.util.Map; -import java.util.Objects; - -import static org.apache.iotdb.commons.conf.IoTDBConstant.MULTI_LEVEL_PATH_WILDCARD; -import static org.apache.iotdb.commons.schema.SchemaConstant.ROOT; public class DropTableProcedure extends AbstractAlterOrDropTableProcedure { private static final Logger LOGGER = LoggerFactory.getLogger(DropTableProcedure.class); - // Transient - private PathPatternTree patternTree; - public DropTableProcedure() { super(); } @@ -154,21 +144,8 @@ private void invalidateCache(final ConfigNodeProcedureEnv env) { } private void deleteData(final ConfigNodeProcedureEnv env) { - patternTree = new PathPatternTree(); - final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - final DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - final PartialPath path; - try { - path = new PartialPath(new String[] {ROOT, database.substring(5), tableName}); - patternTree.appendPathPattern(path); - patternTree.appendPathPattern(path.concatAsMeasurementPath(MULTI_LEVEL_PATH_WILDCARD)); - patternTree.serialize(dataOutputStream); - } catch (final IOException e) { - LOGGER.warn("failed to serialize request for table {}.{}", database, table.getTableName(), e); - } - final Map relatedDataRegionGroup = - env.getConfigManager().getRelatedDataRegionGroup(patternTree, true); + env.getConfigManager().getRelatedDataRegionGroup4TableModel(database); if (!relatedDataRegionGroup.isEmpty()) { new TableRegionTaskExecutor<>( @@ -186,24 +163,8 @@ private void deleteData(final ConfigNodeProcedureEnv env) { } private void deleteSchema(final ConfigNodeProcedureEnv env) { - if (Objects.isNull(patternTree)) { - patternTree = new PathPatternTree(); - final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - final DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - final PartialPath path; - try { - path = new PartialPath(new String[] {ROOT, database.substring(5), tableName}); - patternTree.appendPathPattern(path); - patternTree.appendPathPattern(path.concatAsMeasurementPath(MULTI_LEVEL_PATH_WILDCARD)); - patternTree.serialize(dataOutputStream); - } catch (final IOException e) { - LOGGER.warn( - "failed to serialize request for table {}.{}", database, table.getTableName(), e); - } - } - final Map relatedSchemaRegionGroup = - env.getConfigManager().getRelatedSchemaRegionGroup(patternTree, true); + env.getConfigManager().getRelatedSchemaRegionGroup4TableModel(database); if (!relatedSchemaRegionGroup.isEmpty()) { new TableRegionTaskExecutor<>( diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/CreateTableState.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/CreateTableState.java index 9d09a09cfb88..be765853ed05 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/CreateTableState.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/schema/CreateTableState.java @@ -23,7 +23,6 @@ public enum CreateTableState { CHECK_TABLE_EXISTENCE, PRE_CREATE, PRE_RELEASE, - VALIDATE_TIMESERIES_EXISTENCE, COMMIT_CREATE, COMMIT_RELEASE, } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java index ce360da47cfc..ecfe6455d4b4 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java @@ -529,63 +529,70 @@ public TSStatus alterDatabase(final TDatabaseSchema databaseSchema) { } @Override - public TSStatus deleteDatabase(TDeleteDatabaseReq tDeleteReq) { + public TSStatus deleteDatabase(final TDeleteDatabaseReq tDeleteReq) { return configManager.deleteDatabases( new TDeleteDatabasesReq(Collections.singletonList(tDeleteReq.getPrefixPath())) .setIsGeneratedByPipe(tDeleteReq.isIsGeneratedByPipe())); } @Override - public TSStatus deleteDatabases(TDeleteDatabasesReq tDeleteReq) { + public TSStatus deleteDatabases(final TDeleteDatabasesReq tDeleteReq) { return configManager.deleteDatabases(tDeleteReq); } @Override - public TSStatus setTTL(TSetTTLReq req) throws TException { + public TSStatus setTTL(final TSetTTLReq req) throws TException { return configManager.setTTL(new SetTTLPlan(req.getPathPattern(), req.getTTL())); } @Override - public TSStatus setSchemaReplicationFactor(TSetSchemaReplicationFactorReq req) throws TException { + public TSStatus setSchemaReplicationFactor(final TSetSchemaReplicationFactorReq req) + throws TException { return configManager.setSchemaReplicationFactor( new SetSchemaReplicationFactorPlan(req.getDatabase(), req.getSchemaReplicationFactor())); } @Override - public TSStatus setDataReplicationFactor(TSetDataReplicationFactorReq req) throws TException { + public TSStatus setDataReplicationFactor(final TSetDataReplicationFactorReq req) + throws TException { return configManager.setDataReplicationFactor( new SetDataReplicationFactorPlan(req.getDatabase(), req.getDataReplicationFactor())); } @Override - public TSStatus setTimePartitionInterval(TSetTimePartitionIntervalReq req) throws TException { + public TSStatus setTimePartitionInterval(final TSetTimePartitionIntervalReq req) + throws TException { return configManager.setTimePartitionInterval( new SetTimePartitionIntervalPlan(req.getDatabase(), req.getTimePartitionInterval())); } @Override - public TCountDatabaseResp countMatchedDatabases(TGetDatabaseReq req) { - PathPatternTree scope = + public TCountDatabaseResp countMatchedDatabases(final TGetDatabaseReq req) { + final PathPatternTree scope = req.getScopePatternTree() == null ? SchemaConstant.ALL_MATCH_SCOPE : PathPatternTree.deserialize(ByteBuffer.wrap(req.getScopePatternTree())); - CountDatabasePlan plan = new CountDatabasePlan(req.getDatabasePathPattern(), scope); - CountDatabaseResp countDatabaseResp = + final CountDatabasePlan plan = + new CountDatabasePlan( + req.getDatabasePathPattern(), scope, req.isSetIsTableModel() && req.isIsTableModel()); + final CountDatabaseResp countDatabaseResp = (CountDatabaseResp) configManager.countMatchedDatabases(plan); - TCountDatabaseResp resp = new TCountDatabaseResp(); + final TCountDatabaseResp resp = new TCountDatabaseResp(); countDatabaseResp.convertToRPCCountStorageGroupResp(resp); return resp; } @Override - public TDatabaseSchemaResp getMatchedDatabaseSchemas(TGetDatabaseReq req) { - PathPatternTree scope = + public TDatabaseSchemaResp getMatchedDatabaseSchemas(final TGetDatabaseReq req) { + final PathPatternTree scope = req.getScopePatternTree() == null ? SchemaConstant.ALL_MATCH_SCOPE : PathPatternTree.deserialize(ByteBuffer.wrap(req.getScopePatternTree())); - GetDatabasePlan plan = new GetDatabasePlan(req.getDatabasePathPattern(), scope); - DatabaseSchemaResp databaseSchemaResp = + final GetDatabasePlan plan = + new GetDatabasePlan( + req.getDatabasePathPattern(), scope, req.isSetIsTableModel() && req.isIsTableModel()); + final DatabaseSchemaResp databaseSchemaResp = (DatabaseSchemaResp) configManager.getMatchedDatabaseSchemas(plan); return databaseSchemaResp.convertToRPCStorageGroupSchemaResp(); @@ -614,29 +621,25 @@ public TSStatus callSpecialProcedure(TTestOperation operation) { @Override public TSchemaPartitionTableResp getSchemaPartitionTable(final TSchemaPartitionReq req) { - final PathPatternTree patternTree = - PathPatternTree.deserialize(ByteBuffer.wrap(req.getPathPatternTree())); return configManager.getSchemaPartition( - patternTree, req.isSetIsTableModel() && req.isIsTableModel()); + PathPatternTree.deserialize(ByteBuffer.wrap(req.getPathPatternTree()))); } @Override public TSchemaPartitionTableResp getSchemaPartitionTableWithSlots( - Map> dbSlotMap) { + final Map> dbSlotMap) { return configManager.getSchemaPartition(dbSlotMap); } @Override - public TSchemaPartitionTableResp getOrCreateSchemaPartitionTable(TSchemaPartitionReq req) { - PathPatternTree patternTree = - PathPatternTree.deserialize(ByteBuffer.wrap(req.getPathPatternTree())); + public TSchemaPartitionTableResp getOrCreateSchemaPartitionTable(final TSchemaPartitionReq req) { return configManager.getOrCreateSchemaPartition( - patternTree, req.isSetIsTableModel() && req.isIsTableModel()); + PathPatternTree.deserialize(ByteBuffer.wrap(req.getPathPatternTree()))); } @Override public TSchemaPartitionTableResp getOrCreateSchemaPartitionTableWithSlots( - Map> dbSlotMap) { + final Map> dbSlotMap) { return configManager.getOrCreateSchemaPartition(dbSlotMap); } @@ -909,18 +912,16 @@ public TSStatus merge() throws TException { } @Override - public TSStatus flush(TFlushReq req) throws TException { + public TSStatus flush(final TFlushReq req) throws TException { if (req.storageGroups != null) { - List noExistSg = - configManager - .getPartitionManager() - .filterUnExistDatabases(PartialPath.fromStringList(req.storageGroups)); + final List noExistSg = + configManager.getPartitionManager().filterUnExistDatabases(req.storageGroups); if (!noExistSg.isEmpty()) { - StringBuilder sb = new StringBuilder(); - noExistSg.forEach(storageGroup -> sb.append(storageGroup.getFullPath()).append(",")); + final StringBuilder sb = new StringBuilder(); + noExistSg.forEach(storageGroup -> sb.append(storageGroup).append(",")); return RpcUtils.getStatus( TSStatusCode.DATABASE_NOT_EXIST, - "storageGroup " + sb.subSequence(0, sb.length() - 1) + " does not exist"); + "Database " + sb.subSequence(0, sb.length() - 1) + " does not exist"); } } return configManager.flush(req); @@ -983,10 +984,10 @@ public TGetDataNodeLocationsResp getRunningDataNodeLocations() { } @Override - public TShowRegionResp showRegion(TShowRegionReq showRegionReq) { - GetRegionInfoListPlan getRegionInfoListPlan = new GetRegionInfoListPlan(showRegionReq); - RegionInfoListResp dataSet = configManager.showRegion(getRegionInfoListPlan); - TShowRegionResp showRegionResp = new TShowRegionResp(); + public TShowRegionResp showRegion(final TShowRegionReq showRegionReq) { + final GetRegionInfoListPlan getRegionInfoListPlan = new GetRegionInfoListPlan(showRegionReq); + final RegionInfoListResp dataSet = configManager.showRegion(getRegionInfoListPlan); + final TShowRegionResp showRegionResp = new TShowRegionResp(); showRegionResp.setStatus(dataSet.getStatus()); showRegionResp.setRegionInfoList(dataSet.getRegionInfoList()); return showRegionResp; @@ -1266,7 +1267,7 @@ public TGetModelInfoResp getModelInfo(TGetModelInfoReq req) { } @Override - public TSStatus setSpaceQuota(TSetSpaceQuotaReq req) throws TException { + public TSStatus setSpaceQuota(final TSetSpaceQuotaReq req) throws TException { return configManager.setSpaceQuota(req); } diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/PartitionInfoTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/PartitionInfoTest.java index c15cefff6579..8a605628aafa 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/PartitionInfoTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/PartitionInfoTest.java @@ -203,7 +203,7 @@ public void testShowRegion() { // Create a SchemaRegion CreateRegionGroupsPlan createRegionGroupsPlan = new CreateRegionGroupsPlan(); - TRegionReplicaSet schemaRegionReplicaSet = + final TRegionReplicaSet schemaRegionReplicaSet = generateTRegionReplicaSet( testFlag.SchemaPartition.getFlag(), generateTConsensusGroupId( @@ -213,7 +213,7 @@ public void testShowRegion() { // Create a DataRegion createRegionGroupsPlan = new CreateRegionGroupsPlan(); - TRegionReplicaSet dataRegionReplicaSet = + final TRegionReplicaSet dataRegionReplicaSet = generateTRegionReplicaSet( testFlag.DataPartition.getFlag(), generateTConsensusGroupId( @@ -221,11 +221,11 @@ public void testShowRegion() { createRegionGroupsPlan.addRegionGroup("root.test" + i, dataRegionReplicaSet); partitionInfo.createRegionGroups(createRegionGroupsPlan); } - GetRegionInfoListPlan regionReq = new GetRegionInfoListPlan(); - TShowRegionReq showRegionReq = new TShowRegionReq(); + final GetRegionInfoListPlan regionReq = new GetRegionInfoListPlan(); + final TShowRegionReq showRegionReq = new TShowRegionReq(); showRegionReq.setConsensusGroupType(null); regionReq.setShowRegionReq(showRegionReq); - RegionInfoListResp regionInfoList1 = + final RegionInfoListResp regionInfoList1 = (RegionInfoListResp) partitionInfo.getRegionInfoList(regionReq); Assert.assertEquals(20, regionInfoList1.getRegionInfoList().size()); regionInfoList1 @@ -233,7 +233,7 @@ public void testShowRegion() { .forEach((regionInfo) -> Assert.assertEquals("127.0.0.1", regionInfo.getClientRpcIp())); showRegionReq.setConsensusGroupType(TConsensusGroupType.SchemaRegion); - RegionInfoListResp regionInfoList2 = + final RegionInfoListResp regionInfoList2 = (RegionInfoListResp) partitionInfo.getRegionInfoList(regionReq); Assert.assertEquals(10, regionInfoList2.getRegionInfoList().size()); regionInfoList2 @@ -244,7 +244,7 @@ public void testShowRegion() { TConsensusGroupType.SchemaRegion, regionInfo.getConsensusGroupId().getType())); showRegionReq.setConsensusGroupType(TConsensusGroupType.DataRegion); - RegionInfoListResp regionInfoList3 = + final RegionInfoListResp regionInfoList3 = (RegionInfoListResp) partitionInfo.getRegionInfoList(regionReq); Assert.assertEquals(10, regionInfoList3.getRegionInfoList().size()); regionInfoList3 @@ -255,7 +255,7 @@ public void testShowRegion() { TConsensusGroupType.DataRegion, regionInfo.getConsensusGroupId().getType())); showRegionReq.setConsensusGroupType(null); showRegionReq.setDatabases(Collections.singletonList("root.test1")); - RegionInfoListResp regionInfoList4 = + final RegionInfoListResp regionInfoList4 = (RegionInfoListResp) partitionInfo.getRegionInfoList(regionReq); Assert.assertEquals(10, regionInfoList4.getRegionInfoList().size()); regionInfoList4 diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfoTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfoTest.java index 0e16ae342195..96dd26d9304f 100644 --- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfoTest.java +++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/persistence/schema/ClusterSchemaInfoTest.java @@ -112,7 +112,7 @@ public void testSnapshot() throws IOException, IllegalPathException { GetDatabasePlan getStorageGroupReq = new GetDatabasePlan( - Arrays.asList(PathUtils.splitPathToDetachedNodes("root.**")), ALL_MATCH_SCOPE); + Arrays.asList(PathUtils.splitPathToDetachedNodes("root.**")), ALL_MATCH_SCOPE, false); Map reloadResult = clusterSchemaInfo.getMatchedDatabaseSchemas(getStorageGroupReq).getSchemaMap(); Assert.assertEquals(testMap, reloadResult); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/DatabaseModelException.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/DatabaseModelException.java deleted file mode 100644 index 0ddf1174d371..000000000000 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/exception/metadata/DatabaseModelException.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.db.exception.metadata; - -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.utils.PathUtils; -import org.apache.iotdb.rpc.TSStatusCode; - -import java.util.List; -import java.util.stream.Collectors; - -public class DatabaseModelException extends MetadataException { - public DatabaseModelException(final String path, final boolean isTableModel) { - super( - "The database " - + (isTableModel - ? PathUtils.qualifyDatabaseName(path) - : PathUtils.unQualifyDatabaseName(path)) - + " is a " - + (isTableModel ? "table" : "tree") - + " model database.", - TSStatusCode.DATABASE_MODEL.getStatusCode()); - } - - public DatabaseModelException(final List paths, final boolean isTableModel) { - super( - "The databases " - + (isTableModel - ? paths.stream().map(PathUtils::qualifyDatabaseName).collect(Collectors.toList()) - : paths.stream().map(PathUtils::unQualifyDatabaseName).collect(Collectors.toList())) - + " are all " - + (isTableModel ? "table" : "tree") - + " model databases.", - TSStatusCode.DATABASE_MODEL.getStatusCode()); - } -} diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/PipeInsertionEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/PipeInsertionEvent.java index cb9182aff586..c80dd8bceff7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/PipeInsertionEvent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/PipeInsertionEvent.java @@ -23,6 +23,7 @@ import org.apache.iotdb.commons.pipe.datastructure.pattern.TablePattern; import org.apache.iotdb.commons.pipe.datastructure.pattern.TreePattern; import org.apache.iotdb.commons.pipe.event.EnrichedEvent; +import org.apache.iotdb.commons.utils.PathUtils; public abstract class PipeInsertionEvent extends EnrichedEvent { @@ -97,10 +98,7 @@ public String getTreeModelDatabaseName() { public String getTableModelDatabaseName() { return tableModelDatabaseName == null - ? tableModelDatabaseName = - treeModelDatabaseName != null && treeModelDatabaseName.startsWith("root.") - ? treeModelDatabaseName.substring(5) - : treeModelDatabaseName + ? tableModelDatabaseName = PathUtils.unQualifyDatabaseName(treeModelDatabaseName) : tableModelDatabaseName; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileAndDeletionExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileAndDeletionExtractor.java index 011bfcc389ab..1f9f13846f96 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileAndDeletionExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileAndDeletionExtractor.java @@ -321,9 +321,7 @@ public void customize( final String databaseName = dataRegion.getDatabaseName(); if (Objects.nonNull(databaseName)) { isDbNameCoveredByPattern = - treePattern.coversDb(databaseName) - // The database name is prefixed with "root." - && tablePattern.coversDb(databaseName.substring(5)); + treePattern.coversDb(databaseName) && tablePattern.coversDb(databaseName); } } @@ -660,8 +658,7 @@ private boolean mayTsFileResourceOverlappedWithPattern(final TsFileResource reso return isTableModel ? (tablePattern.isTableModelDataAllowedToBeCaptured() - // The database name in resource is prefixed with "root." - && tablePattern.matchesDatabase(resource.getDatabaseName().substring(5)) + && tablePattern.matchesDatabase(resource.getDatabaseName()) && tablePattern.matchesTable(deviceID.getTableName())) : (treePattern.isTreeModelDataAllowedToBeCaptured() && treePattern.mayOverlapWithDevice(deviceID)); @@ -678,7 +675,7 @@ private void detectModel(final TsFileResource resource, final IDeviceID deviceID isDbNameCoveredByPattern = isTableModel ? tablePattern.isTableModelDataAllowedToBeCaptured() - && tablePattern.coversDb(databaseName.substring(5)) + && tablePattern.coversDb(databaseName) : treePattern.isTreeModelDataAllowedToBeCaptured() && treePattern.coversDb(databaseName); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java index 568e863acf40..6957ccfdbc36 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java @@ -221,9 +221,7 @@ public void customize( final String databaseName = dataRegion.getDatabaseName(); if (databaseName != null) { isDbNameCoveredByPattern = - treePattern.coversDb(databaseName) - // The database name is prefixed with "root." - && tablePattern.coversDb(databaseName.substring(5)); + treePattern.coversDb(databaseName) && tablePattern.coversDb(databaseName); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java index cc46030e6c3a..7ed2bad12ece 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java @@ -127,8 +127,6 @@ import static org.apache.iotdb.db.exception.metadata.DatabaseNotSetException.DATABASE_NOT_SET; import static org.apache.iotdb.db.utils.ErrorHandlingUtils.getRootCause; -import static org.apache.iotdb.db.utils.constant.SqlConstant.ROOT; -import static org.apache.tsfile.common.constant.TsFileConstant.PATH_SEPARATOR_CHAR; public class IoTDBDataNodeReceiver extends IoTDBFileReceiver { @@ -885,8 +883,7 @@ private void autoCreateDatabaseIfNecessary(final String database) { return; } - final TDatabaseSchema schema = - new TDatabaseSchema(new TDatabaseSchema(ROOT + PATH_SEPARATOR_CHAR + database)); + final TDatabaseSchema schema = new TDatabaseSchema(new TDatabaseSchema(database)); schema.setIsTableModel(true); final CreateDBTask task = new CreateDBTask(schema, true); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/client/ConfigNodeClient.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/client/ConfigNodeClient.java index e9bfb6ce6472..a3ff327e9ef5 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/client/ConfigNodeClient.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/client/ConfigNodeClient.java @@ -604,7 +604,7 @@ public TSStatus setTimePartitionInterval(TSetTimePartitionIntervalReq req) throw } @Override - public TSchemaPartitionTableResp getSchemaPartitionTable(TSchemaPartitionReq req) + public TSchemaPartitionTableResp getSchemaPartitionTable(final TSchemaPartitionReq req) throws TException { return executeRemoteCallWithRetry( () -> client.getSchemaPartitionTable(req), resp -> !updateConfigNodeLeader(resp.status)); @@ -612,7 +612,7 @@ public TSchemaPartitionTableResp getSchemaPartitionTable(TSchemaPartitionReq req @Override public TSchemaPartitionTableResp getSchemaPartitionTableWithSlots( - Map> dbSlotMap) throws TException { + final Map> dbSlotMap) throws TException { return executeRemoteCallWithRetry( () -> client.getSchemaPartitionTableWithSlots(dbSlotMap), resp -> !updateConfigNodeLeader(resp.status)); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java index 1e9f30a2dec0..9e0da2bc828d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeInternalRPCServiceImpl.java @@ -568,10 +568,10 @@ public TSStatus invalidateSchemaCache(final TInvalidateCacheReq req) { DataNodeSchemaLockManager.getInstance().takeWriteLock(SchemaLockType.VALIDATE_VS_DELETION); TreeDeviceSchemaCacheManager.getInstance().takeWriteLock(); try { + final String database = req.getFullPath(); // req.getFullPath() is a database path - ClusterTemplateManager.getInstance().invalid(req.getFullPath()); + ClusterTemplateManager.getInstance().invalid(database); // clear table related cache - final String database = req.getFullPath().substring(5); DataNodeTableCache.getInstance().invalid(database); tableDeviceSchemaCache.invalidate(database); LOGGER.info("Schema cache of {} has been invalidated", req.getFullPath()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeRegionManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeRegionManager.java index 6581a77735d3..a2dddacda48c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeRegionManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/protocol/thrift/impl/DataNodeRegionManager.java @@ -29,7 +29,6 @@ import org.apache.iotdb.commons.consensus.SchemaRegionId; import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.consensus.common.Peer; import org.apache.iotdb.consensus.exception.ConsensusException; import org.apache.iotdb.consensus.exception.ConsensusGroupAlreadyExistException; @@ -110,8 +109,7 @@ public TSStatus createSchemaRegion( final SchemaRegionId schemaRegionId = new SchemaRegionId(regionReplicaSet.getRegionId().getId()); try { - final PartialPath storageGroupPartitionPath = PartialPath.getDatabasePath(storageGroup); - schemaEngine.createSchemaRegion(storageGroupPartitionPath, schemaRegionId); + schemaEngine.createSchemaRegion(storageGroup, schemaRegionId); schemaRegionLockMap.put(schemaRegionId, new ReentrantReadWriteLock(false)); final List peers = new ArrayList<>(); for (final TDataNodeLocation dataNodeLocation : regionReplicaSet.getDataNodeLocations()) { @@ -172,20 +170,20 @@ public TSStatus createDataRegion(TRegionReplicaSet regionReplicaSet, String stor return tsStatus; } - public TSStatus createNewRegion(ConsensusGroupId regionId, String storageGroup) { - TSStatus status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + public TSStatus createNewRegion(final ConsensusGroupId regionId, final String storageGroup) { + final TSStatus status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); LOGGER.info("start to create new region {}", regionId); try { if (regionId instanceof DataRegionId) { - DataRegionId dataRegionId = (DataRegionId) regionId; + final DataRegionId dataRegionId = (DataRegionId) regionId; storageEngine.createDataRegion(dataRegionId, storageGroup); dataRegionLockMap.putIfAbsent(dataRegionId, new ReentrantReadWriteLock(false)); } else { SchemaRegionId schemaRegionId = (SchemaRegionId) regionId; - schemaEngine.createSchemaRegion(new PartialPath(storageGroup), schemaRegionId); + schemaEngine.createSchemaRegion(storageGroup, schemaRegionId); schemaRegionLockMap.putIfAbsent(schemaRegionId, new ReentrantReadWriteLock(false)); } - } catch (Exception e) { + } catch (final Exception e) { LOGGER.error("create new region {} error", regionId, e); status.setCode(TSStatusCode.CREATE_REGION_ERROR.getStatusCode()); status.setMessage("create new region " + regionId + "error, exception:" + e.getMessage()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/Analysis.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/Analysis.java index 1f8905cc666f..770b20ebcf50 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/Analysis.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/Analysis.java @@ -436,12 +436,12 @@ public Expression getGlobalTimePredicate() { return globalTimePredicate; } - public void setGlobalTimePredicate(Expression timeFilter) { + public void setGlobalTimePredicate(final Expression timeFilter) { this.globalTimePredicate = timeFilter; } @Override - public TimePredicate getCovertedTimePredicate() { + public TimePredicate getConvertedTimePredicate() { return globalTimePredicate == null ? null : new TreeModelTimePredicate(globalTimePredicate); } @@ -450,11 +450,11 @@ public DatasetHeader getRespDatasetHeader() { return respDatasetHeader; } - public void setRespDatasetHeader(DatasetHeader respDatasetHeader) { + public void setRespDatasetHeader(final DatasetHeader respDatasetHeader) { this.respDatasetHeader = respDatasetHeader; } - public TSDataType getType(Expression expression) { + public TSDataType getType(final Expression expression) { // NULL_Operand needn't check if (expression.getExpressionType().equals(ExpressionType.NULL)) { return null; @@ -466,13 +466,13 @@ public TSDataType getType(Expression expression) { return deviceTemplate.getSchemaMap().get(seriesOperand.getPath().getMeasurement()).getType(); } - TSDataType type = expressionTypes.get(NodeRef.of(expression)); + final TSDataType type = expressionTypes.get(NodeRef.of(expression)); checkArgument(type != null, "Expression is not analyzed: %s", expression); return type; } @Override - public boolean canSkipExecute(MPPQueryContext context) { + public boolean canSkipExecute(final MPPQueryContext context) { return isFinishQueryAfterAnalyze() || (context.getQueryType() == QueryType.READ && !hasDataSource()); } @@ -486,8 +486,8 @@ private boolean hasDataSource() { } @Override - public TsBlock constructResultForMemorySource(MPPQueryContext context) { - StatementMemorySource memorySource = + public TsBlock constructResultForMemorySource(final MPPQueryContext context) { + final StatementMemorySource memorySource = new StatementMemorySourceVisitor() .process(getTreeStatement(), new StatementMemorySourceContext(context, this)); setRespDatasetHeader(memorySource.getDatasetHeader()); @@ -515,7 +515,8 @@ public Map> getCrossGroupByExpressions() { return crossGroupByExpressions; } - public void setCrossGroupByExpressions(Map> crossGroupByExpressions) { + public void setCrossGroupByExpressions( + final Map> crossGroupByExpressions) { this.crossGroupByExpressions = crossGroupByExpressions; } @@ -523,7 +524,7 @@ public FillDescriptor getFillDescriptor() { return fillDescriptor; } - public void setFillDescriptor(FillDescriptor fillDescriptor) { + public void setFillDescriptor(final FillDescriptor fillDescriptor) { this.fillDescriptor = fillDescriptor; } @@ -531,7 +532,7 @@ public boolean hasValueFilter() { return hasValueFilter; } - public void setHasValueFilter(boolean hasValueFilter) { + public void setHasValueFilter(final boolean hasValueFilter) { this.hasValueFilter = hasValueFilter; } @@ -539,7 +540,7 @@ public Expression getWhereExpression() { return whereExpression; } - public void setWhereExpression(Expression whereExpression) { + public void setWhereExpression(final Expression whereExpression) { this.whereExpression = whereExpression; } @@ -547,7 +548,7 @@ public Map getDeviceToWhereExpression() { return deviceToWhereExpression; } - public void setDeviceToWhereExpression(Map deviceToWhereExpression) { + public void setDeviceToWhereExpression(final Map deviceToWhereExpression) { this.deviceToWhereExpression = deviceToWhereExpression; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeUtils.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeUtils.java index a59c614902d3..2b60abcfb736 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeUtils.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeUtils.java @@ -74,8 +74,6 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_ROOT; -import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_SEPARATOR; import static org.apache.iotdb.db.queryengine.plan.execution.config.TableConfigTaskVisitor.DATABASE_NOT_SPECIFIED; public class AnalyzeUtils { @@ -115,7 +113,8 @@ public static InsertBaseStatement analyzeInsert( return realStatement; } - public static String getDatabaseName(InsertBaseStatement statement, MPPQueryContext context) { + public static String getDatabaseName( + final InsertBaseStatement statement, final MPPQueryContext context) { if (statement.getDatabaseName().isPresent()) { return statement.getDatabaseName().get(); } @@ -125,34 +124,22 @@ public static String getDatabaseName(InsertBaseStatement statement, MPPQueryCont return null; } - public static String getDatabaseNameForTableWithRoot( - InsertBaseStatement statement, MPPQueryContext context) { - if (statement.getDatabaseName().isPresent()) { - return PATH_ROOT + PATH_SEPARATOR + statement.getDatabaseName().get(); - } - if (context != null && context.getDatabaseName().isPresent()) { - return PATH_ROOT + PATH_SEPARATOR + context.getDatabaseName().get(); - } - return null; - } - public static List computeTableDataPartitionParams( - InsertBaseStatement statement, MPPQueryContext context) { + final InsertBaseStatement statement, final MPPQueryContext context) { if (statement instanceof InsertTabletStatement) { - InsertTabletStatement insertTabletStatement = (InsertTabletStatement) statement; - Map> timePartitionSlotMap = new HashMap<>(); + final InsertTabletStatement insertTabletStatement = (InsertTabletStatement) statement; + final Map> timePartitionSlotMap = new HashMap<>(); for (int i = 0; i < insertTabletStatement.getRowCount(); i++) { timePartitionSlotMap .computeIfAbsent(insertTabletStatement.getTableDeviceID(i), id -> new HashSet<>()) .add(insertTabletStatement.getTimePartitionSlot(i)); } - return computeDataPartitionParams( - timePartitionSlotMap, getDatabaseNameForTableWithRoot(statement, context)); + return computeDataPartitionParams(timePartitionSlotMap, getDatabaseName(statement, context)); } else if (statement instanceof InsertMultiTabletsStatement) { - InsertMultiTabletsStatement insertMultiTabletsStatement = + final InsertMultiTabletsStatement insertMultiTabletsStatement = (InsertMultiTabletsStatement) statement; - Map> timePartitionSlotMap = new HashMap<>(); - for (InsertTabletStatement insertTabletStatement : + final Map> timePartitionSlotMap = new HashMap<>(); + for (final InsertTabletStatement insertTabletStatement : insertMultiTabletsStatement.getInsertTabletStatementList()) { for (int i = 0; i < insertTabletStatement.getRowCount(); i++) { timePartitionSlotMap @@ -160,26 +147,24 @@ public static List computeTableDataPartitionParams( .add(insertTabletStatement.getTimePartitionSlot(i)); } } - return computeDataPartitionParams( - timePartitionSlotMap, getDatabaseNameForTableWithRoot(statement, context)); + return computeDataPartitionParams(timePartitionSlotMap, getDatabaseName(statement, context)); } else if (statement instanceof InsertRowStatement) { - InsertRowStatement insertRowStatement = (InsertRowStatement) statement; + final InsertRowStatement insertRowStatement = (InsertRowStatement) statement; return computeDataPartitionParams( Collections.singletonMap( insertRowStatement.getTableDeviceID(), Collections.singleton(insertRowStatement.getTimePartitionSlot())), - getDatabaseNameForTableWithRoot(statement, context)); + getDatabaseName(statement, context)); } else if (statement instanceof InsertRowsStatement) { - InsertRowsStatement insertRowsStatement = (InsertRowsStatement) statement; - Map> timePartitionSlotMap = new HashMap<>(); - for (InsertRowStatement insertRowStatement : + final InsertRowsStatement insertRowsStatement = (InsertRowsStatement) statement; + final Map> timePartitionSlotMap = new HashMap<>(); + for (final InsertRowStatement insertRowStatement : insertRowsStatement.getInsertRowStatementList()) { timePartitionSlotMap .computeIfAbsent(insertRowStatement.getTableDeviceID(), id -> new HashSet<>()) .add(insertRowStatement.getTimePartitionSlot()); } - return computeDataPartitionParams( - timePartitionSlotMap, getDatabaseNameForTableWithRoot(statement, context)); + return computeDataPartitionParams(timePartitionSlotMap, getDatabaseName(statement, context)); } throw new UnsupportedOperationException("computeDataPartitionParams for " + statement); } @@ -305,12 +290,12 @@ public static InsertBaseStatement removeLogicalView( /** get analysis according to statement and params */ public static void analyzeDataPartition( - IAnalysis analysis, - List dataPartitionQueryParams, - String userName, - DataPartitionQueryFunc partitionQueryFunc) { + final IAnalysis analysis, + final List dataPartitionQueryParams, + final String userName, + final DataPartitionQueryFunc partitionQueryFunc) { - DataPartition dataPartition = + final DataPartition dataPartition = partitionQueryFunc.queryDataPartition(dataPartitionQueryParams, userName); if (dataPartition.isEmpty()) { analysis.setFinishQueryAfterAnalyze(true); @@ -323,20 +308,20 @@ public static void analyzeDataPartition( analysis.setDataPartitionInfo(dataPartition); } - public static void analyzeDelete(Delete node, MPPQueryContext queryContext) { + public static void analyzeDelete(final Delete node, final MPPQueryContext queryContext) { queryContext.setQueryType(QueryType.WRITE); validateSchema(node, queryContext); - try (ConfigNodeClient configNodeClient = + try (final ConfigNodeClient configNodeClient = ConfigNodeClientManager.getInstance().borrowClient(ConfigNodeInfo.CONFIG_REGION_ID); ) { // TODO: may use time and db/table to filter - TRegionRouteMapResp latestRegionRouteMap = configNodeClient.getLatestRegionRouteMap(); - Set replicaSets = new HashSet<>(); + final TRegionRouteMapResp latestRegionRouteMap = configNodeClient.getLatestRegionRouteMap(); + final Set replicaSets = new HashSet<>(); latestRegionRouteMap.getRegionRouteMap().entrySet().stream() .filter(e -> e.getKey().getType() == TConsensusGroupType.DataRegion) .forEach(e -> replicaSets.add(e.getValue())); node.setReplicaSets(replicaSets); - } catch (Exception e) { + } catch (final Exception e) { throw new IoTDBRuntimeException(e, TSStatusCode.CAN_NOT_CONNECT_CONFIGNODE.getStatusCode()); } } @@ -589,7 +574,7 @@ private static IDPredicate getIdPredicate( public interface DataPartitionQueryFunc { DataPartition queryDataPartition( - List dataPartitionQueryParams, String userName); + final List dataPartitionQueryParams, final String userName); } public interface DataPartitionQueryParamComputation { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java index 224d22520a0a..bc8959eb5009 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java @@ -45,7 +45,6 @@ import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.exception.ainode.GetModelInfoException; -import org.apache.iotdb.db.exception.metadata.table.TableAlreadyExistsException; import org.apache.iotdb.db.exception.metadata.template.TemplateIncompatibleException; import org.apache.iotdb.db.exception.metadata.view.UnsupportedViewException; import org.apache.iotdb.db.exception.sql.SemanticException; @@ -160,7 +159,6 @@ import org.apache.iotdb.db.queryengine.plan.statement.sys.ExplainStatement; import org.apache.iotdb.db.queryengine.plan.statement.sys.ShowQueriesStatement; import org.apache.iotdb.db.queryengine.plan.statement.sys.ShowVersionStatement; -import org.apache.iotdb.db.schemaengine.table.DataNodeTableCache; import org.apache.iotdb.db.schemaengine.template.Template; import org.apache.iotdb.db.storageengine.load.config.LoadTsFileConfigurator; import org.apache.iotdb.db.storageengine.load.metrics.LoadTsFileCostMetricsSet; @@ -2603,7 +2601,6 @@ public Analysis visitCreateTimeseries( Analysis analysis = new Analysis(); analysis.setRealStatement(createTimeSeriesStatement); - checkIsTableCompatible(createTimeSeriesStatement.getPath(), context, true); checkIsTemplateCompatible( createTimeSeriesStatement.getPath(), createTimeSeriesStatement.getAlias(), context, true); @@ -2659,19 +2656,6 @@ private void checkIsTemplateCompatible( } } - private void checkIsTableCompatible( - final PartialPath timeSeriesPath, final MPPQueryContext context, final boolean takeLock) { - if (takeLock) { - DataNodeSchemaLockManager.getInstance() - .takeReadLock(context, SchemaLockType.TIMESERIES_VS_TABLE); - } - final Pair tableInfo = - DataNodeTableCache.getInstance().checkTableCreateAndPreCreateOnGivenPath(timeSeriesPath); - if (tableInfo != null) { - throw new SemanticException(new TableAlreadyExistsException(tableInfo.left, tableInfo.right)); - } - } - private void analyzeSchemaProps(final Map props) { if (props == null || props.isEmpty()) { return; @@ -2721,7 +2705,6 @@ public Analysis visitCreateAlignedTimeseries( Analysis analysis = new Analysis(); analysis.setRealStatement(createAlignedTimeSeriesStatement); - checkIsTableCompatible(createAlignedTimeSeriesStatement.getDevicePath(), context, true); checkIsTemplateCompatible( createAlignedTimeSeriesStatement.getDevicePath(), createAlignedTimeSeriesStatement.getMeasurements(), @@ -2750,7 +2733,6 @@ public Analysis visitInternalCreateTimeseries( Analysis analysis = new Analysis(); analysis.setRealStatement(internalCreateTimeSeriesStatement); - checkIsTableCompatible(internalCreateTimeSeriesStatement.getDevicePath(), context, true); checkIsTemplateCompatible( internalCreateTimeSeriesStatement.getDevicePath(), internalCreateTimeSeriesStatement.getMeasurements(), @@ -2788,7 +2770,6 @@ public Analysis visitInternalCreateMultiTimeSeries( .takeReadLock(context, SchemaLockType.TIMESERIES_VS_TEMPLATE); for (final Map.Entry> entry : internalCreateMultiTimeSeriesStatement.getDeviceMap().entrySet()) { - checkIsTableCompatible(entry.getKey(), context, false); checkIsTemplateCompatible( entry.getKey(), entry.getValue().right.getMeasurements(), null, context, false); pathPatternTree.appendFullPath(entry.getKey().concatNode(ONE_LEVEL_PATH_WILDCARD)); @@ -2820,7 +2801,6 @@ public Analysis visitCreateMultiTimeSeries( DataNodeSchemaLockManager.getInstance() .takeReadLock(context, SchemaLockType.TIMESERIES_VS_TEMPLATE); for (int i = 0; i < timeseriesPathList.size(); i++) { - checkIsTableCompatible(timeseriesPathList.get(i), context, false); checkIsTemplateCompatible( timeseriesPathList.get(i), aliasList == null ? null : aliasList.get(i), context, false); } @@ -4087,7 +4067,6 @@ private void checkTargetPathsInCreateLogicalView( DataNodeSchemaLockManager.getInstance() .takeReadLock(context, SchemaLockType.TIMESERIES_VS_TEMPLATE); for (final PartialPath path : createLogicalViewStatement.getTargetPathList()) { - checkIsTableCompatible(path, context, false); checkIsTemplateCompatible(path, null, context, false); } } catch (final Exception e) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java index 53b1c9420461..d08d4ca013b9 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/ClusterPartitionFetcher.java @@ -25,18 +25,14 @@ import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; import org.apache.iotdb.commons.client.IClientManager; import org.apache.iotdb.commons.client.exception.ClientManagerException; -import org.apache.iotdb.commons.conf.IoTDBConstant; import org.apache.iotdb.commons.consensus.ConfigRegionId; -import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.exception.IoTDBException; import org.apache.iotdb.commons.partition.DataPartition; import org.apache.iotdb.commons.partition.DataPartitionQueryParam; import org.apache.iotdb.commons.partition.SchemaNodeManagementPartition; import org.apache.iotdb.commons.partition.SchemaPartition; import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor; -import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.path.PathPatternTree; -import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.confignode.rpc.thrift.TDataPartitionReq; import org.apache.iotdb.confignode.rpc.thrift.TDataPartitionTableResp; import org.apache.iotdb.confignode.rpc.thrift.TSchemaNodeManagementReq; @@ -46,8 +42,6 @@ import org.apache.iotdb.confignode.rpc.thrift.TTimeSlotList; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.metadata.DatabaseModelException; -import org.apache.iotdb.db.exception.sql.SemanticException; import org.apache.iotdb.db.exception.sql.StatementAnalyzeException; import org.apache.iotdb.db.protocol.client.ConfigNodeClient; import org.apache.iotdb.db.protocol.client.ConfigNodeClientManager; @@ -59,6 +53,8 @@ import org.apache.thrift.TException; import org.apache.tsfile.file.metadata.IDeviceID; +import javax.annotation.Nullable; + import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -67,6 +63,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; @@ -101,11 +98,6 @@ private ClusterPartitionFetcher() { @Override public SchemaPartition getSchemaPartition(final PathPatternTree patternTree) { - return getSchemaPartitionWithModel(patternTree, false); - } - - private SchemaPartition getSchemaPartitionWithModel( - final PathPatternTree patternTree, final boolean isTableModel) { try (final ConfigNodeClient client = configNodeClientManager.borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) { patternTree.constructTree(); @@ -115,7 +107,7 @@ private SchemaPartition getSchemaPartitionWithModel( SchemaPartition schemaPartition = partitionCache.getSchemaPartition(storageGroupToDeviceMap); if (null == schemaPartition) { final TSchemaPartitionTableResp schemaPartitionTableResp = - client.getSchemaPartitionTable(constructSchemaPartitionReq(patternTree, isTableModel)); + client.getSchemaPartitionTable(constructSchemaPartitionReq(patternTree)); if (schemaPartitionTableResp.getStatus().getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { schemaPartition = parseSchemaPartitionTableResp(schemaPartitionTableResp); @@ -129,7 +121,7 @@ private SchemaPartition getSchemaPartitionWithModel( } } return schemaPartition; - } catch (final ClientManagerException | TException | DatabaseModelException e) { + } catch (final ClientManagerException | TException e) { throw new StatementAnalyzeException( "An error occurred when executing getSchemaPartition():" + e.getMessage()); } @@ -147,7 +139,7 @@ public SchemaPartition getOrCreateSchemaPartition( SchemaPartition schemaPartition = partitionCache.getSchemaPartition(storageGroupToDeviceMap); if (null == schemaPartition) { final TSchemaPartitionTableResp schemaPartitionTableResp = - client.getOrCreateSchemaPartitionTable(constructSchemaPartitionReq(patternTree, false)); + client.getOrCreateSchemaPartitionTable(constructSchemaPartitionReq(patternTree)); if (schemaPartitionTableResp.getStatus().getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { schemaPartition = parseSchemaPartitionTableResp(schemaPartitionTableResp); @@ -161,7 +153,7 @@ public SchemaPartition getOrCreateSchemaPartition( } } return schemaPartition; - } catch (final ClientManagerException | TException | DatabaseModelException e) { + } catch (final ClientManagerException | TException e) { throw new StatementAnalyzeException( "An error occurred when executing getOrCreateSchemaPartition():" + e.getMessage()); } @@ -288,7 +280,7 @@ public DataPartition getOrCreateDataPartition( dataPartitionTableResp.getStatus().getCode())); } } - } catch (final ClientManagerException | TException | DatabaseModelException e) { + } catch (final ClientManagerException | TException e) { throw new StatementAnalyzeException( "An error occurred when executing getOrCreateDataPartition():" + e.getMessage()); } @@ -311,28 +303,34 @@ public SchemaPartition getOrCreateSchemaPartition( return getOrCreateSchemaPartition(database, deviceIDs, true, userName); } + // deviceIDs is null if search the whole database, in which case we skip the cache and fetch the + // configNode directly @Override public SchemaPartition getSchemaPartition( - final String database, final List deviceIDs) { + final String database, final @Nullable List deviceIDs) { return getOrCreateSchemaPartition(database, deviceIDs, false, null); } private SchemaPartition getOrCreateSchemaPartition( final String database, - final List deviceIDs, + final @Nullable List deviceIDs, final boolean isAutoCreate, final String userName) { try (final ConfigNodeClient client = configNodeClientManager.borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) { partitionCache.checkAndAutoCreateDatabase(database, isAutoCreate, userName); SchemaPartition schemaPartition = - partitionCache.getSchemaPartition(Collections.singletonMap(database, deviceIDs)); + Objects.nonNull(deviceIDs) + ? partitionCache.getSchemaPartition(Collections.singletonMap(database, deviceIDs)) + : null; if (null == schemaPartition) { final List partitionSlots = - deviceIDs.stream() - .map(partitionExecutor::getSeriesPartitionSlot) - .distinct() - .collect(Collectors.toList()); + Objects.nonNull(deviceIDs) + ? deviceIDs.stream() + .map(partitionExecutor::getSeriesPartitionSlot) + .distinct() + .collect(Collectors.toList()) + : Collections.emptyList(); final TSchemaPartitionTableResp schemaPartitionTableResp = isAutoCreate ? client.getOrCreateSchemaPartitionTableWithSlots( @@ -351,32 +349,19 @@ private SchemaPartition getOrCreateSchemaPartition( schemaPartitionTableResp.getStatus().getCode())); } } + System.out.println(schemaPartition.getSchemaPartitionMap()); return schemaPartition; - } catch (final ClientManagerException | TException | DatabaseModelException e) { + } catch (final ClientManagerException | TException e) { throw new StatementAnalyzeException( "An error occurred when executing getSchemaPartition():" + e.getMessage()); } } - @Override - public SchemaPartition getSchemaPartition(final String database) { - final PathPatternTree patternTree = new PathPatternTree(); - try { - patternTree.appendPathPattern( - PartialPath.getDatabasePath(database) - .concatNode(IoTDBConstant.MULTI_LEVEL_PATH_WILDCARD)); - } catch (final IllegalPathException e) { - throw new SemanticException(e); - } - return getSchemaPartitionWithModel(patternTree, true); - } - /** split data partition query param by database */ private Map> splitDataPartitionQueryParam( final List dataPartitionQueryParams, final boolean isAutoCreate, - final String userName) - throws DatabaseModelException { + final String userName) { final List deviceIDs = new ArrayList<>(); for (final DataPartitionQueryParam dataPartitionQueryParam : dataPartitionQueryParams) { deviceIDs.add(dataPartitionQueryParam.getDeviceID()); @@ -404,11 +389,10 @@ private Map> splitDataPartitionQueryParam( return result; } - private TSchemaPartitionReq constructSchemaPartitionReq( - final PathPatternTree patternTree, final boolean isTableModel) { + private TSchemaPartitionReq constructSchemaPartitionReq(final PathPatternTree patternTree) { try { - return new TSchemaPartitionReq(patternTree.serialize()).setIsTableModel(isTableModel); - } catch (IOException e) { + return new TSchemaPartitionReq(patternTree.serialize()); + } catch (final IOException e) { throw new StatementAnalyzeException("An error occurred when serializing pattern tree"); } } @@ -425,7 +409,7 @@ private TSchemaNodeManagementReq constructSchemaNodeManagementPartitionReq( schemaNodeManagementReq.setLevel(level); } return schemaNodeManagementReq; - } catch (IOException e) { + } catch (final IOException e) { throw new StatementAnalyzeException("An error occurred when serializing pattern tree"); } } @@ -473,8 +457,7 @@ private TDataPartitionReq constructDataPartitionReq( k, new TTimeSlotList( new ArrayList<>(v.timeSlotList), v.needLeftAll, v.needRightAll))); - partitionSlotsMap.put( - PathUtils.qualifyDatabaseName(entry.getKey()), deviceToTimePartitionMap); + partitionSlotsMap.put(entry.getKey(), deviceToTimePartitionMap); } return new TDataPartitionReq(partitionSlotsMap); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/IAnalysis.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/IAnalysis.java index e03f75db230f..4f48961691d7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/IAnalysis.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/IAnalysis.java @@ -72,7 +72,7 @@ default void setRealStatement(Statement realStatement) {} void addEndPointToRedirectNodeList(TEndPoint endPoint); - TimePredicate getCovertedTimePredicate(); + TimePredicate getConvertedTimePredicate(); void setDatabaseName(String databaseName); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/IPartitionFetcher.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/IPartitionFetcher.java index f90e349568db..75a59bd43594 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/IPartitionFetcher.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/IPartitionFetcher.java @@ -28,6 +28,8 @@ import org.apache.tsfile.file.metadata.IDeviceID; +import javax.annotation.Nullable; + import java.util.List; import java.util.Map; @@ -75,7 +77,7 @@ DataPartition getOrCreateDataPartition( * @param userName */ DataPartition getOrCreateDataPartition( - List dataPartitionQueryParams, String userName); + final List dataPartitionQueryParams, final String userName); /** Get schema partition and matched nodes according to path pattern tree. */ default SchemaNodeManagementPartition getSchemaNodeManagementPartition( @@ -113,14 +115,6 @@ SchemaPartition getOrCreateSchemaPartition( * *

The device id shall be [table, seg1, ....] */ - SchemaPartition getSchemaPartition(final String database, final List deviceIDs); - - /** - * For data query with partial device id conditions. - * - *

The database shall start with "root.". Concat this to a user-provided db name if necessary. - * - *

The device id shall be [table, seg1, ....] - */ - SchemaPartition getSchemaPartition(final String database); + SchemaPartition getSchemaPartition( + final String database, final @Nullable List deviceIDs); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java index e5d11d74f785..2ca3c4ce749f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/partition/PartitionCache.java @@ -48,7 +48,6 @@ import org.apache.iotdb.db.auth.AuthorityChecker; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.metadata.DatabaseModelException; import org.apache.iotdb.db.exception.sql.StatementAnalyzeException; import org.apache.iotdb.db.protocol.client.ConfigNodeClient; import org.apache.iotdb.db.protocol.client.ConfigNodeClientManager; @@ -75,7 +74,6 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.stream.Collectors; public class PartitionCache { @@ -90,7 +88,7 @@ public class PartitionCache { private final SeriesPartitionExecutor partitionExecutor; /** the cache of database */ - private final Map databaseCache = new HashMap<>(); + private final Set databaseCache = new HashSet<>(); /** database -> schemaPartitionTable */ private final Cache schemaPartitionCache; @@ -141,8 +139,7 @@ public Map> getDatabaseToDevice( final List deviceIDs, final boolean tryToFetch, final boolean isAutoCreate, - final String userName) - throws DatabaseModelException { + final String userName) { final DatabaseCacheResult> result = new DatabaseCacheResult>() { @Override @@ -166,8 +163,7 @@ public Map getDeviceToDatabase( final List deviceIDs, final boolean tryToFetch, final boolean isAutoCreate, - final String userName) - throws DatabaseModelException { + final String userName) { final DatabaseCacheResult result = new DatabaseCacheResult() { @Override @@ -185,14 +181,10 @@ public void put(final IDeviceID device, final String databaseName) { * @param deviceID the path of device * @return database name, return {@code null} if cache miss */ - private String getDatabaseName(final IDeviceID deviceID) throws DatabaseModelException { - for (final Map.Entry entry : databaseCache.entrySet()) { - final String database = entry.getKey(); + private String getDatabaseName(final IDeviceID deviceID) { + for (final String database : databaseCache) { if (PathUtils.isStartWith(deviceID, database)) { - if (Boolean.TRUE.equals(entry.getValue())) { - throw new DatabaseModelException(database, true); - } - return entry.getKey(); + return database; } } return null; @@ -204,16 +196,10 @@ private String getDatabaseName(final IDeviceID deviceID) throws DatabaseModelExc * @param database name * @return {@code true} if this database exists */ - private boolean containsDatabase(final String database) throws DatabaseModelException { + private boolean containsDatabase(final String database) { try { databaseCacheLock.readLock().lock(); - if (databaseCache.containsKey(database)) { - if (Boolean.FALSE.equals(databaseCache.get(database))) { - throw new DatabaseModelException(PathUtils.unQualifyDatabaseName(database), false); - } - return true; - } - return false; + return databaseCache.contains(database); } finally { databaseCacheLock.readLock().unlock(); } @@ -227,7 +213,7 @@ private boolean containsDatabase(final String database) throws DatabaseModelExce */ private void fetchDatabaseAndUpdateCache( final DatabaseCacheResult result, final List deviceIDs) - throws ClientManagerException, TException, DatabaseModelException { + throws ClientManagerException, TException { databaseCacheLock.writeLock().lock(); try (final ConfigNodeClient client = configNodeClientManager.borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) { @@ -235,16 +221,13 @@ private void fetchDatabaseAndUpdateCache( getDatabaseMap(result, deviceIDs, true); if (!result.isSuccess()) { final TGetDatabaseReq req = - new TGetDatabaseReq(ROOT_PATH, SchemaConstant.ALL_MATCH_SCOPE_BINARY); + new TGetDatabaseReq(ROOT_PATH, SchemaConstant.ALL_MATCH_SCOPE_BINARY) + .setIsTableModel(false); final TDatabaseSchemaResp databaseSchemaResp = client.getMatchedDatabaseSchemas(req); if (databaseSchemaResp.getStatus().getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { // update all database into cache - updateDatabaseCache( - databaseSchemaResp.getDatabaseSchemaMap().entrySet().stream() - .collect( - Collectors.toMap( - Map.Entry::getKey, entry -> entry.getValue().isIsTableModel()))); + updateDatabaseCache(databaseSchemaResp.getDatabaseSchemaMap().keySet()); getDatabaseMap(result, deviceIDs, true); } } @@ -259,15 +242,12 @@ private void fetchDatabaseAndUpdateCache() throws ClientManagerException, TExcep try (final ConfigNodeClient client = configNodeClientManager.borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) { final TGetDatabaseReq req = - new TGetDatabaseReq(ROOT_PATH, SchemaConstant.ALL_MATCH_SCOPE_BINARY); + new TGetDatabaseReq(ROOT_PATH, SchemaConstant.ALL_MATCH_SCOPE_BINARY) + .setIsTableModel(true); final TDatabaseSchemaResp databaseSchemaResp = client.getMatchedDatabaseSchemas(req); if (databaseSchemaResp.getStatus().getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { // update all database into cache - updateDatabaseCache( - databaseSchemaResp.getDatabaseSchemaMap().entrySet().stream() - .collect( - Collectors.toMap( - Map.Entry::getKey, entry -> entry.getValue().isIsTableModel()))); + updateDatabaseCache(databaseSchemaResp.getDatabaseSchemaMap().keySet()); } } finally { databaseCacheLock.writeLock().unlock(); @@ -309,7 +289,7 @@ private void createDatabaseAndUpdateCache( } // Try to create databases one by one until done or one database fail - final Map successFullyCreatedDatabase = new HashMap<>(); + final Set successFullyCreatedDatabase = new HashSet<>(); for (final String databaseName : databaseNamesNeedCreated) { final long startTime = System.nanoTime(); try { @@ -333,7 +313,7 @@ private void createDatabaseAndUpdateCache( final TSStatus tsStatus = client.setDatabase(databaseSchema); if (TSStatusCode.SUCCESS_STATUS.getStatusCode() == tsStatus.getCode() || TSStatusCode.DATABASE_ALREADY_EXISTS.getStatusCode() == tsStatus.getCode()) { - successFullyCreatedDatabase.put(databaseName, false); + successFullyCreatedDatabase.add(databaseName); // In tree model, if the user creates a conflict database concurrently, for instance, // the database created by user is root.db.ss.a, the auto-creation failed database is // root.db, we wait till "getOrCreatePartition" to judge if the time series (like @@ -392,7 +372,7 @@ private void createDatabaseAndUpdateCache(final String database, final String us if (TSStatusCode.SUCCESS_STATUS.getStatusCode() == tsStatus.getCode() || TSStatusCode.DATABASE_ALREADY_EXISTS.getStatusCode() == tsStatus.getCode()) { // Try to update cache by databases successfully created - updateDatabaseCache(Collections.singletonMap(database, true)); + updateDatabaseCache(Collections.singleton(database)); } else { logger.warn( "[{} Cache] failed to create database {}", CacheMetrics.DATABASE_CACHE_NAME, database); @@ -414,8 +394,7 @@ private void createDatabaseAndUpdateCache(final String database, final String us private void getDatabaseMap( final DatabaseCacheResult result, final List deviceIDs, - final boolean failFast) - throws DatabaseModelException { + final boolean failFast) { try { databaseCacheLock.readLock().lock(); // reset result before try @@ -464,8 +443,7 @@ private void getDatabaseCacheResult( final List deviceIDs, final boolean tryToFetch, final boolean isAutoCreate, - final String userName) - throws DatabaseModelException { + final String userName) { if (!isAutoCreate) { // TODO: avoid IDeviceID contains "*" // miss when deviceId contains * @@ -498,8 +476,7 @@ private void getDatabaseCacheResult( } public void checkAndAutoCreateDatabase( - final String database, final boolean isAutoCreate, final String userName) - throws DatabaseModelException { + final String database, final boolean isAutoCreate, final String userName) { boolean isExisted = containsDatabase(database); if (!isExisted) { try { @@ -522,10 +499,10 @@ public void checkAndAutoCreateDatabase( * * @param databaseNames the database names that need to update */ - public void updateDatabaseCache(final Map databaseNames) { + public void updateDatabaseCache(final Set databaseNames) { databaseCacheLock.writeLock().lock(); try { - databaseCache.putAll(databaseNames); + databaseCache.addAll(databaseNames); } finally { databaseCacheLock.writeLock().unlock(); } @@ -637,21 +614,23 @@ public void invalidReplicaSetCache() { * @param databaseToDeviceMap database to devices map * @return SchemaPartition of databaseToDeviceMap */ - public SchemaPartition getSchemaPartition(Map> databaseToDeviceMap) { + public SchemaPartition getSchemaPartition( + final Map> databaseToDeviceMap) { schemaPartitionCacheLock.readLock().lock(); try { if (databaseToDeviceMap.isEmpty()) { cacheMetrics.record(false, CacheMetrics.SCHEMA_PARTITION_CACHE_NAME); return null; } - Map> schemaPartitionMap = + final Map> schemaPartitionMap = new HashMap<>(); // check cache for each database - for (Map.Entry> entry : databaseToDeviceMap.entrySet()) { - String databaseName = entry.getKey(); - Map regionReplicaSetMap = + for (final Map.Entry> entry : databaseToDeviceMap.entrySet()) { + final String databaseName = entry.getKey(); + final Map regionReplicaSetMap = schemaPartitionMap.computeIfAbsent(databaseName, k -> new HashMap<>()); - SchemaPartitionTable schemaPartitionTable = schemaPartitionCache.getIfPresent(databaseName); + final SchemaPartitionTable schemaPartitionTable = + schemaPartitionCache.getIfPresent(databaseName); if (null == schemaPartitionTable) { // if database not find, then return cache miss. logger.debug( @@ -661,11 +640,11 @@ public SchemaPartition getSchemaPartition(Map> databaseT cacheMetrics.record(false, CacheMetrics.SCHEMA_PARTITION_CACHE_NAME); return null; } - Map map = + final Map map = schemaPartitionTable.getSchemaPartitionMap(); // check cache for each device - for (IDeviceID device : entry.getValue()) { - TSeriesPartitionSlot seriesPartitionSlot = + for (final IDeviceID device : entry.getValue()) { + final TSeriesPartitionSlot seriesPartitionSlot = partitionExecutor.getSeriesPartitionSlot(device); if (!map.containsKey(seriesPartitionSlot)) { // if one device not find, then return cache miss. @@ -676,8 +655,8 @@ public SchemaPartition getSchemaPartition(Map> databaseT cacheMetrics.record(false, CacheMetrics.SCHEMA_PARTITION_CACHE_NAME); return null; } - TConsensusGroupId consensusGroupId = map.get(seriesPartitionSlot); - TRegionReplicaSet regionReplicaSet = getRegionReplicaSet(consensusGroupId); + final TConsensusGroupId consensusGroupId = map.get(seriesPartitionSlot); + final TRegionReplicaSet regionReplicaSet = getRegionReplicaSet(consensusGroupId); regionReplicaSetMap.put(seriesPartitionSlot, regionReplicaSet); } } @@ -734,18 +713,18 @@ public SchemaPartition getSchemaPartition(String database) { * @param schemaPartitionTable database to SeriesPartitionSlot to ConsensusGroupId map */ public void updateSchemaPartitionCache( - Map> schemaPartitionTable) { + final Map> schemaPartitionTable) { schemaPartitionCacheLock.writeLock().lock(); try { - for (Map.Entry> entry1 : + for (final Map.Entry> entry1 : schemaPartitionTable.entrySet()) { - String databaseName = entry1.getKey(); + final String databaseName = entry1.getKey(); SchemaPartitionTable result = schemaPartitionCache.getIfPresent(databaseName); if (null == result) { result = new SchemaPartitionTable(); schemaPartitionCache.put(databaseName, result); } - Map seriesPartitionSlotTConsensusGroupIdMap = + final Map seriesPartitionSlotTConsensusGroupIdMap = result.getSchemaPartitionMap(); seriesPartitionSlotTConsensusGroupIdMap.putAll(entry1.getValue()); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileToTableModelAnalyzer.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileToTableModelAnalyzer.java index 33c1f73e6fc5..2975365ec94f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileToTableModelAnalyzer.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/load/LoadTsFileToTableModelAnalyzer.java @@ -60,8 +60,6 @@ import java.util.concurrent.ExecutionException; import static org.apache.iotdb.db.queryengine.plan.execution.config.TableConfigTaskVisitor.validateDatabaseName; -import static org.apache.iotdb.db.utils.constant.SqlConstant.ROOT; -import static org.apache.tsfile.common.constant.TsFileConstant.PATH_SEPARATOR_CHAR; public class LoadTsFileToTableModelAnalyzer extends LoadTsFileAnalyzer { private static final Logger LOGGER = @@ -192,8 +190,7 @@ private void autoCreateDatabaseIfAbsent(final String database) throws VerifyMeta } final CreateDBTask task = - new CreateDBTask( - new TDatabaseSchema(ROOT + PATH_SEPARATOR_CHAR + database).setIsTableModel(true), true); + new CreateDBTask(new TDatabaseSchema(database).setIsTableModel(true), true); try { final ListenableFuture future = task.execute(ClusterConfigTaskExecutor.getInstance()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/QueryExecution.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/QueryExecution.java index c1e6d725455e..88e53be6a42f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/QueryExecution.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/QueryExecution.java @@ -289,11 +289,11 @@ public void doLogicalPlan() { // Generate the distributed plan and split it into fragments public void doDistributedPlan() { - long startTime = System.nanoTime(); + final long startTime = System.nanoTime(); this.distributedPlan = planner.doDistributionPlan(analysis, logicalPlan); if (analysis.isQuery()) { - long distributionPlanCost = System.nanoTime() - startTime; + final long distributionPlanCost = System.nanoTime() - startTime; context.setDistributionPlanCost(distributionPlanCost); QUERY_PLAN_COST_METRIC_SET.recordPlanCost( TREE_TYPE, DISTRIBUTION_PLANNER, distributionPlanCost); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/TableConfigTaskVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/TableConfigTaskVisitor.java index 0b5c6f2379ba..7454c724e252 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/TableConfigTaskVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/TableConfigTaskVisitor.java @@ -177,9 +177,7 @@ import static org.apache.iotdb.db.queryengine.plan.execution.config.metadata.relational.CreateDBTask.TTL_KEY; import static org.apache.iotdb.db.queryengine.plan.relational.type.InternalTypeManager.getTSDataType; import static org.apache.iotdb.db.queryengine.plan.relational.type.TypeSignatureTranslator.toTypeSignature; -import static org.apache.iotdb.db.utils.constant.SqlConstant.ROOT; import static org.apache.tsfile.common.constant.TsFileConstant.PATH_SEPARATOR; -import static org.apache.tsfile.common.constant.TsFileConstant.PATH_SEPARATOR_CHAR; public class TableConfigTaskVisitor extends AstVisitor { @@ -228,7 +226,7 @@ private IConfigTask visitDatabaseStatement( accessControl.checkCanCreateDatabase(context.getSession().getUserName(), dbName); - schema.setName(ROOT + PATH_SEPARATOR_CHAR + dbName); + schema.setName(dbName); for (final Property property : node.getProperties()) { final String key = property.getName().getValue().toLowerCase(Locale.ENGLISH); @@ -511,7 +509,6 @@ protected IConfigTask visitSetProperties( public static void validateDatabaseName(final String dbName) throws SemanticException { // Check database length here - // We need to calculate the database name without "root." if (dbName.contains(PATH_SEPARATOR) || !IoTDBConfig.STORAGE_GROUP_PATTERN.matcher(dbName).matches() || dbName.length() > MAX_DATABASE_NAME_LENGTH) { @@ -628,7 +625,7 @@ protected IConfigTask visitDescribeTable( } @Override - protected IConfigTask visitFlush(Flush node, MPPQueryContext context) { + protected IConfigTask visitFlush(final Flush node, final MPPQueryContext context) { context.setQueryType(QueryType.WRITE); accessControl.checkUserHasMaintainPrivilege(context.getSession().getUserName()); return new FlushTask(((FlushStatement) node.getInnerTreeStatement())); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java index 0ee63cd981a6..5e4d68b084c1 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java @@ -139,7 +139,6 @@ import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.exception.BatchProcessException; import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.metadata.DatabaseModelException; import org.apache.iotdb.db.exception.metadata.PathNotExistException; import org.apache.iotdb.db.exception.metadata.SchemaQuotaExceededException; import org.apache.iotdb.db.exception.sql.SemanticException; @@ -318,7 +317,6 @@ import static org.apache.iotdb.commons.schema.SchemaConstant.ALL_RESULT_NODES; import static org.apache.iotdb.db.protocol.client.ConfigNodeClient.MSG_RECONNECTION_FAIL; import static org.apache.iotdb.db.utils.constant.SqlConstant.ROOT; -import static org.apache.tsfile.common.constant.TsFileConstant.PATH_SEPARATOR_CHAR; public class ClusterConfigTaskExecutor implements IConfigTaskExecutor { @@ -436,7 +434,8 @@ public SettableFuture showDatabase( // Send request to some API server final TGetDatabaseReq req = new TGetDatabaseReq( - databasePathPattern, showDatabaseStatement.getAuthorityScope().serialize()); + databasePathPattern, showDatabaseStatement.getAuthorityScope().serialize()) + .setIsTableModel(false); final TShowDatabaseResp resp = client.showDatabase(req); // build TSBlock showDatabaseStatement.buildTSBlock(resp.getDatabaseInfoMap(), future); @@ -1083,22 +1082,23 @@ public SettableFuture merge(boolean onCluster) { } @Override - public SettableFuture flush(TFlushReq tFlushReq, boolean onCluster) { - SettableFuture future = SettableFuture.create(); + public SettableFuture flush( + final TFlushReq tFlushReq, final boolean onCluster) { + final SettableFuture future = SettableFuture.create(); TSStatus tsStatus = new TSStatus(); if (onCluster) { - try (ConfigNodeClient client = + try (final ConfigNodeClient client = CONFIG_NODE_CLIENT_MANAGER.borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) { // Send request to some API server tsStatus = client.flush(tFlushReq); - } catch (ClientManagerException | TException e) { + } catch (final ClientManagerException | TException e) { future.setException(e); } } else { try { StorageEngine.getInstance().operateFlush(tFlushReq); tsStatus = RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); - } catch (Exception e) { + } catch (final Exception e) { tsStatus = RpcUtils.getStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR, e.getMessage()); } } @@ -1479,7 +1479,7 @@ public SettableFuture showRegion( final ShowRegionStatement showRegionStatement, final boolean isTableModel) { final SettableFuture future = SettableFuture.create(); TShowRegionResp showRegionResp = new TShowRegionResp(); - final TShowRegionReq showRegionReq = new TShowRegionReq(); + final TShowRegionReq showRegionReq = new TShowRegionReq().setIsTableModel(isTableModel); showRegionReq.setConsensusGroupType(showRegionStatement.getRegionType()); if (showRegionStatement.getStorageGroups() == null) { showRegionReq.setDatabases(null); @@ -3051,7 +3051,8 @@ public SettableFuture showDatabases( CONFIG_NODE_CLIENT_MANAGER.borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) { // Send request to some API server final TGetDatabaseReq req = - new TGetDatabaseReq(databasePathPattern, ALL_MATCH_SCOPE.serialize()); + new TGetDatabaseReq(databasePathPattern, ALL_MATCH_SCOPE.serialize()) + .setIsTableModel(true); final TShowDatabaseResp resp = client.showDatabase(req); // build TSBlock ShowDBTask.buildTSBlock(resp.getDatabaseInfoMap(), future, showDB.isDetails(), canSeenDB); @@ -3080,16 +3081,10 @@ public SettableFuture useDatabase( CONFIG_NODE_CLIENT_MANAGER.borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) { // Send request to some API server final TGetDatabaseReq req = - new TGetDatabaseReq(databasePathPattern, ALL_MATCH_SCOPE.serialize()); + new TGetDatabaseReq(databasePathPattern, ALL_MATCH_SCOPE.serialize()) + .setIsTableModel(true); final TShowDatabaseResp resp = client.showDatabase(req); if (!resp.getDatabaseInfoMap().isEmpty()) { - if (!resp.getDatabaseInfoMap() - .get(PathUtils.qualifyDatabaseName(useDB.getDatabaseId().getValue())) - .isIsTableModel()) { - throw new IoTDBException( - new DatabaseModelException(useDB.getDatabaseId().getValue(), false).getMessage(), - TSStatusCode.DATABASE_MODEL.getStatusCode()); - } clientSession.setDatabaseName(useDB.getDatabaseId().getValue()); future.set(new ConfigTaskResult(TSStatusCode.SUCCESS_STATUS)); } else { @@ -3098,22 +3093,17 @@ public SettableFuture useDatabase( String.format("Unknown database %s", useDB.getDatabaseId().getValue()), TSStatusCode.DATABASE_NOT_EXIST.getStatusCode())); } - } catch (final IOException | ClientManagerException | TException | IoTDBException e) { + } catch (final IOException | ClientManagerException | TException e) { future.setException(e); } return future; } - private String transformDBName(final String dbName) { - return ROOT + PATH_SEPARATOR_CHAR + dbName; - } - @Override public SettableFuture dropDatabase(final DropDB dropDB) { final SettableFuture future = SettableFuture.create(); final TDeleteDatabasesReq req = - new TDeleteDatabasesReq( - Collections.singletonList(transformDBName(dropDB.getDbName().getValue()))) + new TDeleteDatabasesReq(Collections.singletonList(dropDB.getDbName().getValue())) .setIsTableModel(true); try (final ConfigNodeClient client = CONFIG_NODE_CLIENT_MANAGER.borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) { @@ -3164,8 +3154,7 @@ public SettableFuture createDatabase( } else { future.setException( new IoTDBException( - String.format( - "Database %s already exists", databaseSchema.getName().substring(5)), + String.format("Database %s already exists", databaseSchema.getName()), TSStatusCode.DATABASE_ALREADY_EXISTS.getStatusCode())); } } else { @@ -3579,7 +3568,7 @@ public SettableFuture deleteDevice( final TDeleteTableDeviceReq req = new TDeleteTableDeviceReq( - PathUtils.qualifyDatabaseName(deleteDevice.getDatabase()), + deleteDevice.getDatabase(), deleteDevice.getTableName(), queryId, ByteBuffer.wrap(patternStream.toByteArray()), diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/metadata/relational/AbstractAlterOrDropTableTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/metadata/relational/AbstractAlterOrDropTableTask.java index 809bd8cbfd48..66312ea1441a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/metadata/relational/AbstractAlterOrDropTableTask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/metadata/relational/AbstractAlterOrDropTableTask.java @@ -19,8 +19,6 @@ package org.apache.iotdb.db.queryengine.plan.execution.config.metadata.relational; -import org.apache.iotdb.commons.utils.PathUtils; - abstract class AbstractAlterOrDropTableTask extends AbstractTableTask { protected final String queryId; @@ -32,7 +30,7 @@ protected AbstractAlterOrDropTableTask( final String tableName, final String queryId, final boolean tableIfExists) { - super(PathUtils.qualifyDatabaseName(database), tableName); + super(database, tableName); this.queryId = queryId; this.tableIfExists = tableIfExists; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/metadata/relational/CreateTableTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/metadata/relational/CreateTableTask.java index 963440330463..45c5bfcf9f26 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/metadata/relational/CreateTableTask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/metadata/relational/CreateTableTask.java @@ -20,7 +20,6 @@ package org.apache.iotdb.db.queryengine.plan.execution.config.metadata.relational; import org.apache.iotdb.commons.schema.table.TsTable; -import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.db.queryengine.plan.execution.config.ConfigTaskResult; import org.apache.iotdb.db.queryengine.plan.execution.config.IConfigTask; import org.apache.iotdb.db.queryengine.plan.execution.config.executor.IConfigTaskExecutor; @@ -35,8 +34,7 @@ public class CreateTableTask implements IConfigTask { private final boolean ifNotExists; - public CreateTableTask(final TsTable table, String database, final boolean ifNotExists) { - database = PathUtils.qualifyDatabaseName(database); + public CreateTableTask(final TsTable table, final String database, final boolean ifNotExists) { this.table = table; this.database = database; this.ifNotExists = ifNotExists; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/metadata/relational/ShowDBTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/metadata/relational/ShowDBTask.java index 8873e34272b4..b1cf77601fe6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/metadata/relational/ShowDBTask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/metadata/relational/ShowDBTask.java @@ -84,7 +84,7 @@ private static void buildTSBlockForNonDetails( final TsBlockBuilder builder = new TsBlockBuilder(outputDataTypes); for (final Map.Entry entry : storageGroupInfoMap.entrySet()) { - final String dbName = entry.getKey().substring(5); + final String dbName = entry.getKey(); if (!canSeenDB.apply(dbName)) { continue; } @@ -123,7 +123,7 @@ private static void buildTSBlockForDetails( final TsBlockBuilder builder = new TsBlockBuilder(outputDataTypes); for (final Map.Entry entry : storageGroupInfoMap.entrySet()) { - final String dbName = entry.getKey().substring(5); + final String dbName = entry.getKey(); if (!canSeenDB.apply(dbName)) { continue; } @@ -144,12 +144,8 @@ private static void buildTSBlockForDetails( builder.getColumnBuilder(2).writeInt(storageGroupInfo.getSchemaReplicationFactor()); builder.getColumnBuilder(3).writeInt(storageGroupInfo.getDataReplicationFactor()); builder.getColumnBuilder(4).writeLong(storageGroupInfo.getTimePartitionInterval()); - builder - .getColumnBuilder(5) - .writeBinary( - new Binary( - storageGroupInfo.isIsTableModel() ? "TABLE" : "TREE", - TSFileConfig.STRING_CHARSET)); + builder.getColumnBuilder(5).writeInt(storageGroupInfo.getSchemaRegionNum()); + builder.getColumnBuilder(6).writeInt(storageGroupInfo.getDataRegionNum()); builder.declarePosition(); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/sys/FlushTask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/sys/FlushTask.java index 69df99a92581..b4b58224f900 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/sys/FlushTask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/sys/FlushTask.java @@ -20,7 +20,6 @@ package org.apache.iotdb.db.queryengine.plan.execution.config.sys; import org.apache.iotdb.common.rpc.thrift.TFlushReq; -import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.db.queryengine.plan.execution.config.ConfigTaskResult; import org.apache.iotdb.db.queryengine.plan.execution.config.IConfigTask; import org.apache.iotdb.db.queryengine.plan.execution.config.executor.IConfigTaskExecutor; @@ -28,27 +27,20 @@ import com.google.common.util.concurrent.ListenableFuture; -import java.util.ArrayList; -import java.util.List; - public class FlushTask implements IConfigTask { private final FlushStatement flushStatement; - public FlushTask(FlushStatement flushStatement) { + public FlushTask(final FlushStatement flushStatement) { this.flushStatement = flushStatement; } @Override - public ListenableFuture execute(IConfigTaskExecutor configTaskExecutor) + public ListenableFuture execute(final IConfigTaskExecutor configTaskExecutor) throws InterruptedException { - TFlushReq tFlushReq = new TFlushReq(); - List storageGroups = new ArrayList<>(); - if (flushStatement.getStorageGroups() != null) { - for (PartialPath partialPath : flushStatement.getStorageGroups()) { - storageGroups.add(partialPath.getFullPath()); - } - tFlushReq.setStorageGroups(storageGroups); + final TFlushReq tFlushReq = new TFlushReq(); + if (flushStatement.getDatabases() != null) { + tFlushReq.setStorageGroups(flushStatement.getDatabases()); } if (flushStatement.isSeq() != null) { tFlushReq.setIsSeq(flushStatement.isSeq().toString()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/ASTVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/ASTVisitor.java index 60bfcb81cd0a..207ff7522a73 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/ASTVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/ASTVisitor.java @@ -3329,20 +3329,20 @@ private String parseAttributeValue(IoTDBSqlParser.AttributeValueContext ctx) { // Flush @Override - public Statement visitFlush(IoTDBSqlParser.FlushContext ctx) { - FlushStatement flushStatement = new FlushStatement(StatementType.FLUSH); - List storageGroups = null; + public Statement visitFlush(final IoTDBSqlParser.FlushContext ctx) { + final FlushStatement flushStatement = new FlushStatement(StatementType.FLUSH); + List storageGroups = null; if (ctx.boolean_literal() != null) { flushStatement.setSeq(Boolean.parseBoolean(ctx.boolean_literal().getText())); } flushStatement.setOnCluster(ctx.LOCAL() == null); if (ctx.prefixPath(0) != null) { storageGroups = new ArrayList<>(); - for (IoTDBSqlParser.PrefixPathContext prefixPathContext : ctx.prefixPath()) { - storageGroups.add(parsePrefixPath(prefixPathContext)); + for (final IoTDBSqlParser.PrefixPathContext prefixPathContext : ctx.prefixPath()) { + storageGroups.add(parsePrefixPath(prefixPathContext).getFullPath()); } } - flushStatement.setStorageGroups(storageGroups); + flushStatement.setDatabases(storageGroups); return flushStatement; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/WriteFragmentParallelPlanner.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/WriteFragmentParallelPlanner.java index d43b18c3241f..53c4d3210d2f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/WriteFragmentParallelPlanner.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/WriteFragmentParallelPlanner.java @@ -74,7 +74,7 @@ public List parallelPlan() { new FragmentInstance( new PlanFragment(fragment.getId(), split), fragment.getId().genFragmentInstanceId(), - analysis.getCovertedTimePredicate(), + analysis.getConvertedTimePredicate(), queryContext.getQueryType(), queryContext.getTimeOut(), queryContext.getSession()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertTabletNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertTabletNode.java index ed88d5cf191a..703217434535 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertTabletNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/plan/node/write/RelationalInsertTabletNode.java @@ -184,11 +184,11 @@ protected InsertTabletNode getEmptySplit(int count) { } protected Map> splitByReplicaSet( - Map deviceIDSplitInfoMap, IAnalysis analysis) { - Map> splitMap = new HashMap<>(); - Map endPointMap = new HashMap<>(); + final Map deviceIDSplitInfoMap, final IAnalysis analysis) { + final Map> splitMap = new HashMap<>(); + final Map endPointMap = new HashMap<>(); - for (Map.Entry entry : deviceIDSplitInfoMap.entrySet()) { + for (final Map.Entry entry : deviceIDSplitInfoMap.entrySet()) { final IDeviceID deviceID = entry.getKey(); final PartitionSplitInfo splitInfo = entry.getValue(); final List replicaSets = @@ -206,15 +206,15 @@ protected Map> splitByReplicaSet( .get(0) .getClientRpcEndPoint()); for (int i = 0; i < replicaSets.size(); i++) { - List subRanges = + final List subRanges = splitMap.computeIfAbsent(replicaSets.get(i), x -> new ArrayList<>()); subRanges.add(splitInfo.ranges.get(2 * i)); subRanges.add(splitInfo.ranges.get(2 * i + 1)); } } - List redirectNodeList = new ArrayList<>(times.length); + final List redirectNodeList = new ArrayList<>(times.length); for (int i = 0; i < times.length; i++) { - IDeviceID deviceId = getDeviceID(i); + final IDeviceID deviceId = getDeviceID(i); redirectNodeList.add(endPointMap.get(deviceId)); } analysis.setRedirectNodeList(redirectNodeList); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/Analysis.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/Analysis.java index f5f769a3d280..c056c9a3ff9e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/Analysis.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/Analysis.java @@ -642,37 +642,37 @@ public RelationType getOutputDescriptor() { return getOutputDescriptor(root); } - public RelationType getOutputDescriptor(Node node) { + public RelationType getOutputDescriptor(final Node node) { return getScope(node).getRelationType(); } - public void addSourceColumns(Field field, Set sourceColumn) { + public void addSourceColumns(final Field field, final Set sourceColumn) { originColumnDetails.putAll(field, sourceColumn); } - public Set getSourceColumns(Field field) { + public Set getSourceColumns(final Field field) { return ImmutableSet.copyOf(originColumnDetails.get(field)); } - public void addExpressionFields(Expression expression, Collection fields) { + public void addExpressionFields(final Expression expression, final Collection fields) { fieldLineage.putAll(NodeRef.of(expression), fields); } - public Set getExpressionSourceColumns(Expression expression) { + public Set getExpressionSourceColumns(final Expression expression) { return fieldLineage.get(NodeRef.of(expression)).stream() .flatMap(field -> getSourceColumns(field).stream()) .collect(toImmutableSet()); } - public void setRelationName(Relation relation, QualifiedName name) { + public void setRelationName(final Relation relation, final QualifiedName name) { relationNames.put(NodeRef.of(relation), name); } - public QualifiedName getRelationName(Relation relation) { + public QualifiedName getRelationName(final Relation relation) { return relationNames.get(NodeRef.of(relation)); } - public void addAliased(Relation relation) { + public void addAliased(final Relation relation) { aliasedRelations.add(NodeRef.of(relation)); } @@ -681,19 +681,21 @@ public boolean isAliased(Relation relation) { } public void addTableSchema( - QualifiedObjectName qualifiedObjectName, Map tableColumnSchema) { + final QualifiedObjectName qualifiedObjectName, + final Map tableColumnSchema) { tableColumnSchemas.put(qualifiedObjectName, tableColumnSchema); } - public Map getTableColumnSchema(QualifiedObjectName qualifiedObjectName) { + public Map getTableColumnSchema( + final QualifiedObjectName qualifiedObjectName) { return tableColumnSchemas.get(qualifiedObjectName); } - public void addPredicateCoercions(Map, PredicateCoercions> coercions) { + public void addPredicateCoercions(final Map, PredicateCoercions> coercions) { predicateCoercions.putAll(coercions); } - public PredicateCoercions getPredicateCoercions(Expression expression) { + public PredicateCoercions getPredicateCoercions(final Expression expression) { return predicateCoercions.get(NodeRef.of(expression)); } @@ -709,7 +711,7 @@ public boolean hasSortNode() { return hasSortNode; } - public void setSortNode(boolean hasSortNode) { + public void setSortNode(final boolean hasSortNode) { this.hasSortNode = hasSortNode; } @@ -717,7 +719,7 @@ public boolean isEmptyDataSource() { return emptyDataSource; } - public void setEmptyDataSource(boolean emptyDataSource) { + public void setEmptyDataSource(final boolean emptyDataSource) { this.emptyDataSource = emptyDataSource; } @@ -732,12 +734,12 @@ public TSStatus getFailStatus() { } @Override - public void setFailStatus(TSStatus failStatus) { + public void setFailStatus(final TSStatus failStatus) { this.failStatus = failStatus; } @Override - public boolean canSkipExecute(MPPQueryContext context) { + public boolean canSkipExecute(final MPPQueryContext context) { return isFinishQueryAfterAnalyze(); } @@ -746,7 +748,7 @@ public void setFinishQueryAfterAnalyze() { } @Override - public void setFinishQueryAfterAnalyze(boolean finishQueryAfterAnalyze) { + public void setFinishQueryAfterAnalyze(final boolean finishQueryAfterAnalyze) { this.finishQueryAfterAnalyze = finishQueryAfterAnalyze; } @@ -755,14 +757,14 @@ public boolean isFinishQueryAfterAnalyze() { } @Override - public void setDataPartitionInfo(DataPartition dataPartition) { + public void setDataPartitionInfo(final DataPartition dataPartition) { this.dataPartition = dataPartition; } @Override - public TsBlock constructResultForMemorySource(MPPQueryContext context) { + public TsBlock constructResultForMemorySource(final MPPQueryContext context) { requireNonNull(getStatement(), "root statement is analysis is null"); - StatementMemorySource memorySource = + final StatementMemorySource memorySource = new TableModelStatementMemorySourceVisitor() .process(getStatement(), new TableModelStatementMemorySourceContext(context, this)); setRespDatasetHeader(memorySource.getDatasetHeader()); @@ -813,11 +815,11 @@ public DataPartition getDataPartitionInfo() { return dataPartition; } - public void setDataPartition(DataPartition dataPartition) { + public void setDataPartition(final DataPartition dataPartition) { this.dataPartition = dataPartition; } - public void upsertDataPartition(DataPartition targetDataPartition) { + public void upsertDataPartition(final DataPartition targetDataPartition) { if (this.dataPartition == null) { this.dataPartition = targetDataPartition; } else { @@ -831,12 +833,12 @@ public List getRedirectNodeList() { } @Override - public void setRedirectNodeList(List redirectNodeList) { + public void setRedirectNodeList(final List redirectNodeList) { this.redirectNodeList = redirectNodeList; } @Override - public void addEndPointToRedirectNodeList(TEndPoint endPoint) { + public void addEndPointToRedirectNodeList(final TEndPoint endPoint) { if (redirectNodeList == null) { redirectNodeList = new ArrayList<>(); } @@ -844,7 +846,7 @@ public void addEndPointToRedirectNodeList(TEndPoint endPoint) { } public List getDataRegionReplicaSetWithTimeFilter( - String database, IDeviceID deviceId, Filter timeFilter) { + final String database, final IDeviceID deviceId, final Filter timeFilter) { if (dataPartition == null) { return Collections.singletonList(NOT_ASSIGNED); } else { @@ -853,7 +855,7 @@ public List getDataRegionReplicaSetWithTimeFilter( } @Override - public TimePredicate getCovertedTimePredicate() { + public TimePredicate getConvertedTimePredicate() { return null; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/metadata/TableMetadataImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/metadata/TableMetadataImpl.java index c974eff54141..289d20598ec7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/metadata/TableMetadataImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/metadata/TableMetadataImpl.java @@ -66,8 +66,6 @@ import java.util.Optional; import java.util.stream.Collectors; -import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_ROOT; -import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_SEPARATOR; import static org.apache.iotdb.commons.schema.table.InformationSchemaTable.INFORMATION_SCHEMA; import static org.apache.tsfile.read.common.type.BinaryType.TEXT; import static org.apache.tsfile.read.common.type.BooleanType.BOOLEAN; @@ -723,25 +721,25 @@ public void validateDeviceSchema( @Override public DataPartition getOrCreateDataPartition( - List dataPartitionQueryParams, String userName) { + final List dataPartitionQueryParams, final String userName) { return partitionFetcher.getOrCreateDataPartition(dataPartitionQueryParams, userName); } @Override public SchemaPartition getOrCreateSchemaPartition( - String database, List deviceIDList, String userName) { - return partitionFetcher.getOrCreateSchemaPartition( - PATH_ROOT + PATH_SEPARATOR + database, deviceIDList, userName); + final String database, final List deviceIDList, final String userName) { + return partitionFetcher.getOrCreateSchemaPartition(database, deviceIDList, userName); } @Override - public SchemaPartition getSchemaPartition(String database, List deviceIDList) { - return partitionFetcher.getSchemaPartition(PATH_ROOT + PATH_SEPARATOR + database, deviceIDList); + public SchemaPartition getSchemaPartition( + final String database, final List deviceIDList) { + return partitionFetcher.getSchemaPartition(database, deviceIDList); } @Override - public SchemaPartition getSchemaPartition(String database) { - return partitionFetcher.getSchemaPartition(PATH_ROOT + PATH_SEPARATOR + database); + public SchemaPartition getSchemaPartition(final String database) { + return partitionFetcher.getSchemaPartition(database, null); } @Override diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/metadata/fetcher/cache/TableDeviceSchemaCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/metadata/fetcher/cache/TableDeviceSchemaCache.java index 946210300591..5fb056671b26 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/metadata/fetcher/cache/TableDeviceSchemaCache.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/metadata/fetcher/cache/TableDeviceSchemaCache.java @@ -489,46 +489,51 @@ long getRequestCount() { return dualKeyCache.stats().requestCount(); } - // This database is with "root" - void invalidateLastCache(final @Nonnull String qualifiedDatabase) { - final String database = PathUtils.unQualifyDatabaseName(qualifiedDatabase); + void invalidateLastCache(final @Nonnull String database) { readWriteLock.writeLock().lock(); try { - dualKeyCache.update( - tableId -> - tableId.belongTo(database) - || Objects.isNull(tableId.getDatabase()) - && tableId.getTableName().startsWith(qualifiedDatabase), - deviceID -> true, - entry -> -entry.invalidateLastCache()); - dualKeyCache.update( - tableId -> - Objects.isNull(tableId.getDatabase()) - && qualifiedDatabase.startsWith(tableId.getTableName()), - deviceID -> deviceID.matchDatabaseName(qualifiedDatabase), - entry -> -entry.invalidateLastCache()); + if (PathUtils.isTableModelDatabase(database)) { + dualKeyCache.update( + tableId -> tableId.belongTo(database), + deviceID -> true, + entry -> -entry.invalidateLastCache()); + } else { + dualKeyCache.update( + tableId -> + Objects.isNull(tableId.getDatabase()) + && tableId.getTableName().startsWith(database), + deviceID -> true, + entry -> -entry.invalidateLastCache()); + dualKeyCache.update( + tableId -> + Objects.isNull(tableId.getDatabase()) + && database.startsWith(tableId.getTableName()), + deviceID -> deviceID.matchDatabaseName(database), + entry -> -entry.invalidateLastCache()); + } } finally { readWriteLock.writeLock().unlock(); } } - // This database is without "root" public void invalidate(final @Nonnull String database) { - final String qualifiedDatabase = PathUtils.qualifyDatabaseName(database); readWriteLock.writeLock().lock(); try { - dualKeyCache.invalidate( - tableId -> - tableId.belongTo(database) - || Objects.isNull(tableId.getDatabase()) - && tableId.getTableName().startsWith(qualifiedDatabase), - deviceID -> true); - dualKeyCache.invalidate( - tableId -> - Objects.isNull(tableId.getDatabase()) - && qualifiedDatabase.startsWith(tableId.getTableName()), - deviceID -> deviceID.matchDatabaseName(qualifiedDatabase)); + if (PathUtils.isTableModelDatabase(database)) { + dualKeyCache.invalidate(tableId -> tableId.belongTo(database), deviceID -> true); + } else { + dualKeyCache.invalidate( + tableId -> + Objects.isNull(tableId.getDatabase()) + && tableId.getTableName().startsWith(database), + deviceID -> true); + dualKeyCache.invalidate( + tableId -> + Objects.isNull(tableId.getDatabase()) + && database.startsWith(tableId.getTableName()), + deviceID -> deviceID.matchDatabaseName(database)); + } } finally { readWriteLock.writeLock().unlock(); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/RelationPlanner.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/RelationPlanner.java index 33bf0a6d5be7..4fda32333cde 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/RelationPlanner.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/RelationPlanner.java @@ -681,8 +681,9 @@ protected RelationPlan visitInsertRow(InsertRow node, Void context) { insertNode, analysis.getRootScope(), Collections.emptyList(), outerContext); } - protected RelationalInsertRowNode fromInsertRowStatement(InsertRowStatement insertRowStatement) { - RelationalInsertRowNode insertNode = + protected RelationalInsertRowNode fromInsertRowStatement( + final InsertRowStatement insertRowStatement) { + final RelationalInsertRowNode insertNode = new RelationalInsertRowNode( idAllocator.genPlanNodeId(), insertRowStatement.getDevicePath(), diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/distribute/TableDistributedPlanGenerator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/distribute/TableDistributedPlanGenerator.java index c392fb357a42..dbab02fcf2c6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/distribute/TableDistributedPlanGenerator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/distribute/TableDistributedPlanGenerator.java @@ -24,7 +24,6 @@ import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.commons.partition.SchemaPartition; import org.apache.iotdb.commons.schema.table.column.TsTableColumnCategory; -import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.db.queryengine.common.MPPQueryContext; import org.apache.iotdb.db.queryengine.common.QueryId; import org.apache.iotdb.db.queryengine.plan.planner.distribution.NodeDistribution; @@ -112,22 +111,22 @@ public class TableDistributedPlanGenerator private final DataNodeLocationSupplierFactory.DataNodeLocationSupplier dataNodeLocationSupplier; public TableDistributedPlanGenerator( - MPPQueryContext queryContext, - Analysis analysis, - SymbolAllocator symbolAllocator, - DataNodeLocationSupplierFactory.DataNodeLocationSupplier dataNodeLocationSupplier) { + final MPPQueryContext queryContext, + final Analysis analysis, + final SymbolAllocator symbolAllocator, + final DataNodeLocationSupplierFactory.DataNodeLocationSupplier dataNodeLocationSupplier) { this.queryId = queryContext.getQueryId(); this.analysis = analysis; this.symbolAllocator = symbolAllocator; this.dataNodeLocationSupplier = dataNodeLocationSupplier; } - public List genResult(PlanNode node, PlanContext context) { - List res = node.accept(this, context); + public List genResult(final PlanNode node, final PlanContext context) { + final List res = node.accept(this, context); if (res.size() == 1) { return res; } else if (res.size() > 1) { - CollectNode collectNode = + final CollectNode collectNode = new CollectNode(queryId.genPlanNodeId(), res.get(0).getOutputSymbols()); res.forEach(collectNode::addChild); return Collections.singletonList(collectNode); @@ -138,34 +137,35 @@ public List genResult(PlanNode node, PlanContext context) { @Override public List visitPlan( - PlanNode node, TableDistributedPlanGenerator.PlanContext context) { + final PlanNode node, final TableDistributedPlanGenerator.PlanContext context) { if (node instanceof WritePlanNode) { return Collections.singletonList(node); } - List> children = + final List> children = node.getChildren().stream() .map(child -> child.accept(this, context)) .collect(toImmutableList()); - PlanNode newNode = node.clone(); - for (List planNodes : children) { + final PlanNode newNode = node.clone(); + for (final List planNodes : children) { planNodes.forEach(newNode::addChild); } return Collections.singletonList(newNode); } @Override - public List visitExplainAnalyze(ExplainAnalyzeNode node, PlanContext context) { - List children = genResult(node.getChild(), context); + public List visitExplainAnalyze( + final ExplainAnalyzeNode node, final PlanContext context) { + final List children = genResult(node.getChild(), context); node.setChild(children.get(0)); return Collections.singletonList(node); } @Override - public List visitOutput(OutputNode node, PlanContext context) { - List childrenNodes = node.getChild().accept(this, context); - OrderingScheme childOrdering = nodeOrderingMap.get(childrenNodes.get(0).getPlanNodeId()); + public List visitOutput(final OutputNode node, final PlanContext context) { + final List childrenNodes = node.getChild().accept(this, context); + final OrderingScheme childOrdering = nodeOrderingMap.get(childrenNodes.get(0).getPlanNodeId()); if (childOrdering != null) { nodeOrderingMap.put(node.getPlanNodeId(), childOrdering); } @@ -457,18 +457,19 @@ public List visitJoin(JoinNode node, PlanContext context) { } @Override - public List visitDeviceTableScan(DeviceTableScanNode node, PlanContext context) { - Map tableScanNodeMap = new HashMap<>(); + public List visitDeviceTableScan( + final DeviceTableScanNode node, final PlanContext context) { + final Map tableScanNodeMap = new HashMap<>(); - for (DeviceEntry deviceEntry : node.getDeviceEntries()) { - List regionReplicaSets = + for (final DeviceEntry deviceEntry : node.getDeviceEntries()) { + final List regionReplicaSets = analysis.getDataRegionReplicaSetWithTimeFilter( node.getQualifiedObjectName().getDatabaseName(), deviceEntry.getDeviceID(), node.getTimeFilter()); - for (TRegionReplicaSet regionReplicaSet : regionReplicaSets) { - DeviceTableScanNode deviceTableScanNode = + for (final TRegionReplicaSet regionReplicaSet : regionReplicaSets) { + final DeviceTableScanNode deviceTableScanNode = tableScanNodeMap.computeIfAbsent( regionReplicaSet, k -> { @@ -498,12 +499,13 @@ public List visitDeviceTableScan(DeviceTableScanNode node, PlanContext return Collections.singletonList(node); } - List resultTableScanNodeList = new ArrayList<>(); + final List resultTableScanNodeList = new ArrayList<>(); TRegionReplicaSet mostUsedDataRegion = null; int maxDeviceEntrySizeOfTableScan = 0; - for (Map.Entry entry : tableScanNodeMap.entrySet()) { - TRegionReplicaSet regionReplicaSet = entry.getKey(); - DeviceTableScanNode subDeviceTableScanNode = entry.getValue(); + for (final Map.Entry entry : + tableScanNodeMap.entrySet()) { + final TRegionReplicaSet regionReplicaSet = entry.getKey(); + final DeviceTableScanNode subDeviceTableScanNode = entry.getValue(); subDeviceTableScanNode.setPlanNodeId(queryId.genPlanNodeId()); subDeviceTableScanNode.setRegionReplicaSet(regionReplicaSet); resultTableScanNodeList.add(subDeviceTableScanNode); @@ -741,18 +743,18 @@ private static OrderingScheme constructOrderingSchema(List symbols) { } private PlanNode mergeChildrenViaCollectOrMergeSort( - OrderingScheme childOrdering, List childrenNodes) { + final OrderingScheme childOrdering, final List childrenNodes) { checkArgument(!childrenNodes.isEmpty(), "childrenNodes should not be empty"); if (childrenNodes.size() == 1) { return childrenNodes.get(0); } - PlanNode firstChild = childrenNodes.get(0); + final PlanNode firstChild = childrenNodes.get(0); // children has sort property, use MergeSort to merge children if (childOrdering != null) { - MergeSortNode mergeSortNode = + final MergeSortNode mergeSortNode = new MergeSortNode(queryId.genPlanNodeId(), childOrdering, firstChild.getOutputSymbols()); childrenNodes.forEach(mergeSortNode::addChild); nodeOrderingMap.put(mergeSortNode.getPlanNodeId(), childOrdering); @@ -760,7 +762,7 @@ private PlanNode mergeChildrenViaCollectOrMergeSort( } // children has no sort property, use CollectNode to merge children - CollectNode collectNode = + final CollectNode collectNode = new CollectNode(queryId.genPlanNodeId(), firstChild.getOutputSymbols()); childrenNodes.forEach(collectNode::addChild); return collectNode; @@ -941,12 +943,11 @@ public List visitTableDeviceQueryCount( private List visitAbstractTableDeviceQuery( final AbstractTableDeviceQueryNode node, final PlanContext context) { - final String database = PathUtils.qualifyDatabaseName(node.getDatabase()); final Set schemaRegionSet = new HashSet<>(); analysis .getSchemaPartitionInfo() .getSchemaPartitionMap() - .get(database) + .get(node.getDatabase()) .forEach( (deviceGroupId, schemaRegionReplicaSet) -> schemaRegionSet.add(schemaRegionReplicaSet)); @@ -970,7 +971,7 @@ private List visitAbstractTableDeviceQuery( @Override public List visitTableDeviceFetch( final TableDeviceFetchNode node, final PlanContext context) { - final String database = PathUtils.qualifyDatabaseName(node.getDatabase()); + final String database = node.getDatabase(); final Set schemaRegionSet = new HashSet<>(); final SchemaPartition schemaPartition = analysis.getSchemaPartitionInfo(); final Map databaseMap = diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/node/schema/CreateOrUpdateTableDeviceNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/node/schema/CreateOrUpdateTableDeviceNode.java index efc5c8f92784..316e483a34d1 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/node/schema/CreateOrUpdateTableDeviceNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/node/schema/CreateOrUpdateTableDeviceNode.java @@ -43,8 +43,6 @@ import java.util.Map; import java.util.Objects; -import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_ROOT; -import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_SEPARATOR; import static org.apache.iotdb.db.storageengine.dataregion.memtable.DeviceIDFactory.convertRawDeviceIDs2PartitionKeys; public class CreateOrUpdateTableDeviceNode extends WritePlanNode implements ISchemaRegionPlan { @@ -252,7 +250,6 @@ public static CreateOrUpdateTableDeviceNode deserialize(final ByteBuffer buffer) @Override public List splitByPartition(final IAnalysis analysis) { - final String dbNameForInvoke = PATH_ROOT + PATH_SEPARATOR + database; final Map> splitMap = new HashMap<>(); final List partitionKeyList = getPartitionKeyList(); for (int i = 0; i < partitionKeyList.size(); i++) { @@ -260,7 +257,7 @@ public List splitByPartition(final IAnalysis analysis) { final TRegionReplicaSet regionReplicaSet = analysis .getSchemaPartitionInfo() - .getSchemaRegionReplicaSet(dbNameForInvoke, partitionKeyList.get(i)); + .getSchemaRegionReplicaSet(database, partitionKeyList.get(i)); splitMap.computeIfAbsent(regionReplicaSet, k -> new ArrayList<>()).add(i); } final List result = new ArrayList<>(splitMap.size()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/node/schema/TableDeviceAttributeUpdateNode.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/node/schema/TableDeviceAttributeUpdateNode.java index 3932db6a3ff0..4ecc483b5185 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/node/schema/TableDeviceAttributeUpdateNode.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/node/schema/TableDeviceAttributeUpdateNode.java @@ -48,9 +48,6 @@ import java.util.Objects; import java.util.stream.Collectors; -import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_ROOT; -import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_SEPARATOR; - public class TableDeviceAttributeUpdateNode extends WritePlanNode implements ISchemaRegionPlan { protected String database; @@ -322,11 +319,7 @@ public TRegionReplicaSet getRegionReplicaSet() { @Override public List splitByPartition(final IAnalysis analysis) { return new HashSet<>( - analysis - .getSchemaPartitionInfo() - .getSchemaPartitionMap() - .get(PATH_ROOT + PATH_SEPARATOR + database) - .values()) + analysis.getSchemaPartitionInfo().getSchemaPartitionMap().get(database).values()) .stream() .map( replicaSet -> diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/optimizations/PushPredicateIntoTableScan.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/optimizations/PushPredicateIntoTableScan.java index 26dd85498f83..420e682f7237 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/optimizations/PushPredicateIntoTableScan.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/planner/optimizations/PushPredicateIntoTableScan.java @@ -420,15 +420,16 @@ private SplitExpression splitPredicate(DeviceTableScanNode node, Expression pred } private void getDeviceEntriesWithDataPartitions( - DeviceTableScanNode tableScanNode, - List metadataExpressions, + final DeviceTableScanNode tableScanNode, + final List metadataExpressions, String timeColumnName) { - List attributeColumns = new ArrayList<>(); + final List attributeColumns = new ArrayList<>(); int attributeIndex = 0; - for (Map.Entry entry : tableScanNode.getAssignments().entrySet()) { - Symbol columnSymbol = entry.getKey(); - ColumnSchema columnSchema = entry.getValue(); + for (final Map.Entry entry : + tableScanNode.getAssignments().entrySet()) { + final Symbol columnSymbol = entry.getKey(); + final ColumnSchema columnSchema = entry.getValue(); if (ATTRIBUTE.equals(columnSchema.getColumnCategory())) { attributeColumns.add(columnSchema.getName()); tableScanNode.getIdAndAttributeIndexMap().put(columnSymbol, attributeIndex++); @@ -436,7 +437,7 @@ private void getDeviceEntriesWithDataPartitions( } long startTime = System.nanoTime(); - List deviceEntries = + final List deviceEntries = metadata.indexScan( tableScanNode.getQualifiedObjectName(), metadataExpressions.stream() @@ -449,7 +450,7 @@ private void getDeviceEntriesWithDataPartitions( queryContext); tableScanNode.setDeviceEntries(deviceEntries); - long schemaFetchCost = System.nanoTime() - startTime; + final long schemaFetchCost = System.nanoTime() - startTime; QueryPlanCostMetricSet.getInstance() .recordPlanCost(TABLE_TYPE, SCHEMA_FETCHER, schemaFetchCost); queryContext.setFetchSchemaCost(schemaFetchCost); @@ -461,19 +462,20 @@ private void getDeviceEntriesWithDataPartitions( analysis.setFinishQueryAfterAnalyze(); } } else { - Filter timeFilter = + final Filter timeFilter = tableScanNode .getTimePredicate() .map(value -> value.accept(new ConvertPredicateToTimeFilterVisitor(), null)) .orElse(null); tableScanNode.setTimeFilter(timeFilter); - String treeModelDatabase = - "root." + tableScanNode.getQualifiedObjectName().getDatabaseName(); startTime = System.nanoTime(); - DataPartition dataPartition = - fetchDataPartitionByDevices(treeModelDatabase, deviceEntries, timeFilter); + final DataPartition dataPartition = + fetchDataPartitionByDevices( + tableScanNode.getQualifiedObjectName().getDatabaseName(), + deviceEntries, + timeFilter); if (dataPartition.getDataPartitionMap().size() > 1) { throw new IllegalStateException( @@ -490,7 +492,7 @@ private void getDeviceEntriesWithDataPartitions( analysis.upsertDataPartition(dataPartition); } - long fetchPartitionCost = System.nanoTime() - startTime; + final long fetchPartitionCost = System.nanoTime() - startTime; QueryPlanCostMetricSet.getInstance() .recordPlanCost(TABLE_TYPE, PARTITION_FETCHER, fetchPartitionCost); queryContext.setFetchPartitionCost(fetchPartitionCost); @@ -752,8 +754,10 @@ public PlanNode visitRelationalInsertTablet( } private DataPartition fetchDataPartitionByDevices( - String database, List deviceEntries, Filter globalTimeFilter) { - Pair, Pair> res = + final String database, + final List deviceEntries, + final Filter globalTimeFilter) { + final Pair, Pair> res = getTimePartitionSlotList(globalTimeFilter, queryContext); // there is no satisfied time range @@ -764,7 +768,7 @@ private DataPartition fetchDataPartitionByDevices( CONFIG.getSeriesPartitionSlotNum()); } - List dataPartitionQueryParams = + final List dataPartitionQueryParams = deviceEntries.stream() .map( deviceEntry -> diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/Flush.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/Flush.java index 608d4e34ce8e..a005a121c6a7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/Flush.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/ast/Flush.java @@ -24,12 +24,12 @@ public class Flush extends WrappedStatement { - public Flush(Statement innerTreeStatement, MPPQueryContext context) { + public Flush(final Statement innerTreeStatement, final MPPQueryContext context) { super(innerTreeStatement, context); } @Override - public R accept(AstVisitor visitor, C context) { + public R accept(final AstVisitor visitor, final C context) { return visitor.visitFlush(this, context); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/parser/AstBuilder.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/parser/AstBuilder.java index fcf65be1eb3a..3536fbf7ccf0 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/parser/AstBuilder.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/relational/sql/parser/AstBuilder.java @@ -28,7 +28,6 @@ import org.apache.iotdb.commons.schema.table.column.TsTableColumnSchema; import org.apache.iotdb.commons.udf.builtin.relational.TableBuiltinScalarFunction; import org.apache.iotdb.commons.utils.CommonDateTimeUtils; -import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.db.exception.query.QueryProcessException; import org.apache.iotdb.db.exception.sql.SemanticException; import org.apache.iotdb.db.protocol.session.IClientSession; @@ -467,8 +466,8 @@ public Node visitShowIndexStatement(RelationalSqlParser.ShowIndexStatementContex } @Override - public Node visitInsertStatement(RelationalSqlParser.InsertStatementContext ctx) { - QualifiedName qualifiedName = getQualifiedName(ctx.tableName); + public Node visitInsertStatement(final RelationalSqlParser.InsertStatementContext ctx) { + final QualifiedName qualifiedName = getQualifiedName(ctx.tableName); String tableName = qualifiedName.getSuffix(); String databaseName = qualifiedName @@ -481,9 +480,10 @@ public Node visitInsertStatement(RelationalSqlParser.InsertStatementContext ctx) tableName = tableName.toLowerCase(); databaseName = databaseName.toLowerCase(); - Query query = (Query) visit(ctx.query()); + final Query query = (Query) visit(ctx.query()); if (ctx.columnAliases() != null) { - List identifiers = visit(ctx.columnAliases().identifier(), Identifier.class); + final List identifiers = + visit(ctx.columnAliases().identifier(), Identifier.class); if (query.getQueryBody() instanceof Values) { return visitInsertValues( databaseName, tableName, identifiers, ((Values) query.getQueryBody())); @@ -492,7 +492,7 @@ public Node visitInsertStatement(RelationalSqlParser.InsertStatementContext ctx) } } else { if (query.getQueryBody() instanceof Values) { - TsTable table = DataNodeTableCache.getInstance().getTable(databaseName, tableName); + final TsTable table = DataNodeTableCache.getInstance().getTable(databaseName, tableName); if (table == null) { throw new SemanticException(new NoTableException(tableName)); } @@ -503,9 +503,10 @@ public Node visitInsertStatement(RelationalSqlParser.InsertStatementContext ctx) } } - private Node visitInsertValues(String databaseName, TsTable table, Values queryBody) { - List rows = queryBody.getRows(); - List rowStatements = + private Node visitInsertValues( + final String databaseName, final TsTable table, final Values queryBody) { + final List rows = queryBody.getRows(); + final List rowStatements = rows.stream() .map( r -> { @@ -528,8 +529,12 @@ private Node visitInsertValues(String databaseName, TsTable table, Values queryB } private Node visitInsertValues( - String databaseName, String tableName, List identifiers, Values queryBody) { - List columnNames = identifiers.stream().map(Identifier::getValue).collect(toList()); + final String databaseName, + final String tableName, + final List identifiers, + final Values queryBody) { + final List columnNames = + identifiers.stream().map(Identifier::getValue).collect(toList()); int timeColumnIndex = -1; for (int i = 0; i < columnNames.size(); i++) { if (TIME_COLUMN_NAME.equalsIgnoreCase(columnNames.get(i))) { @@ -1115,26 +1120,19 @@ public Node visitMigrateRegionStatement(RelationalSqlParser.MigrateRegionStateme } @Override - public Node visitFlushStatement(RelationalSqlParser.FlushStatementContext ctx) { - FlushStatement flushStatement = new FlushStatement(StatementType.FLUSH); - List storageGroups = null; + public Node visitFlushStatement(final RelationalSqlParser.FlushStatementContext ctx) { + final FlushStatement flushStatement = new FlushStatement(StatementType.FLUSH); + List storageGroups = null; if (ctx.booleanValue() != null) { flushStatement.setSeq(Boolean.parseBoolean(ctx.booleanValue().getText())); } flushStatement.setOnCluster( ctx.localOrClusterMode() == null || ctx.localOrClusterMode().LOCAL() == null); if (ctx.identifier() != null) { - storageGroups = new ArrayList<>(); - List identifiers = getIdentifiers(ctx.identifier()); - for (Identifier identifier : identifiers) { - try { - storageGroups.add(new PartialPath(PathUtils.qualifyDatabaseName(identifier.getValue()))); - } catch (IllegalPathException e) { - throw new RuntimeException(e); - } - } + storageGroups = + getIdentifiers(ctx.identifier()).stream().map(Identifier::getValue).collect(toList()); } - flushStatement.setStorageGroups(storageGroups); + flushStatement.setDatabases(storageGroups); return new Flush(flushStatement, null); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/metadata/ShowDatabaseStatement.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/metadata/ShowDatabaseStatement.java index 7c30cd29836b..8d04a48d8c3f 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/metadata/ShowDatabaseStatement.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/metadata/ShowDatabaseStatement.java @@ -105,12 +105,6 @@ public void buildTSBlock( builder.getColumnBuilder(8).writeInt(storageGroupInfo.getDataRegionNum()); builder.getColumnBuilder(9).writeInt(storageGroupInfo.getMinDataRegionNum()); builder.getColumnBuilder(10).writeInt(storageGroupInfo.getMaxDataRegionNum()); - builder - .getColumnBuilder(11) - .writeBinary( - new Binary( - storageGroupInfo.isIsTableModel() ? "TABLE" : "TREE", - TSFileConfig.STRING_CHARSET)); } builder.declarePosition(); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/sys/FlushStatement.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/sys/FlushStatement.java index b16aaa2ed6b2..24d5458a1f00 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/sys/FlushStatement.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/statement/sys/FlushStatement.java @@ -19,6 +19,7 @@ package org.apache.iotdb.db.queryengine.plan.statement.sys; +import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.db.queryengine.plan.analyze.QueryType; import org.apache.iotdb.db.queryengine.plan.statement.IConfigStatement; @@ -26,36 +27,37 @@ import org.apache.iotdb.db.queryengine.plan.statement.StatementType; import org.apache.iotdb.db.queryengine.plan.statement.StatementVisitor; +import java.util.ArrayList; import java.util.Collections; import java.util.List; public class FlushStatement extends Statement implements IConfigStatement { /** list of database */ - private List storageGroups; + private List databases; // being null indicates flushing both seq and unseq data private Boolean isSeq; private boolean onCluster; - public FlushStatement(StatementType flushType) { + public FlushStatement(final StatementType flushType) { this.statementType = flushType; } - public List getStorageGroups() { - return storageGroups; + public List getDatabases() { + return databases; } - public void setStorageGroups(List storageGroups) { - this.storageGroups = storageGroups; + public void setDatabases(final List databases) { + this.databases = databases; } public Boolean isSeq() { return isSeq; } - public void setSeq(Boolean seq) { + public void setSeq(final Boolean seq) { isSeq = seq; } @@ -63,7 +65,7 @@ public boolean isOnCluster() { return onCluster; } - public void setOnCluster(boolean onCluster) { + public void setOnCluster(final boolean onCluster) { this.onCluster = onCluster; } @@ -74,14 +76,23 @@ public QueryType getQueryType() { @Override public List getPaths() { - if (storageGroups == null) { + if (databases == null) { return Collections.emptyList(); } - return storageGroups; + + final List paths = new ArrayList<>(databases.size()); + try { + for (final String database : databases) { + paths.add(new PartialPath(database)); + } + } catch (final IllegalPathException e) { + // ignore + } + return paths; } @Override - public R accept(StatementVisitor visitor, C context) { + public R accept(final StatementVisitor visitor, final C context) { return visitor.visitFlush(this, context); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/SchemaEngine.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/SchemaEngine.java index de336b0a9df3..4608e52c29a2 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/SchemaEngine.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/SchemaEngine.java @@ -24,9 +24,7 @@ import org.apache.iotdb.commons.concurrent.threadpool.ScheduledExecutorUtil; import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.consensus.SchemaRegionId; -import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.schema.table.TsTable; import org.apache.iotdb.commons.utils.FileUtils; import org.apache.iotdb.commons.utils.PathUtils; @@ -50,6 +48,7 @@ import org.apache.iotdb.mpp.rpc.thrift.TDataNodeHeartbeatReq; import org.apache.iotdb.mpp.rpc.thrift.TDataNodeHeartbeatResp; +import org.apache.tsfile.common.constant.TsFileConstant; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -148,15 +147,7 @@ public static Map> getLocalSchemaRegionInfo() { continue; } - final PartialPath database; - try { - database = PartialPath.getDatabasePath(file.getName()); - } catch (IllegalPathException illegalPathException) { - // not a legal sg dir - continue; - } - - final File sgDir = new File(config.getSchemaDir(), database.getFullPath()); + final File sgDir = new File(config.getSchemaDir(), file.getName()); if (!sgDir.exists()) { continue; @@ -177,7 +168,7 @@ public static Map> getLocalSchemaRegionInfo() { } schemaRegionIds.add(schemaRegionId); } - localSchemaPartitionTable.put(database.getFullPath(), schemaRegionIds); + localSchemaPartitionTable.put(file.getName(), schemaRegionIds); } return localSchemaPartitionTable; } @@ -189,26 +180,19 @@ public static Map> getLocalSchemaRegionInfo() { @SuppressWarnings("java:S2142") private void initSchemaRegion() { // recover SchemaRegion concurrently - Map> localSchemaRegionInfo = getLocalSchemaRegionInfo(); + final Map> localSchemaRegionInfo = getLocalSchemaRegionInfo(); final ExecutorService schemaRegionRecoverPools = IoTDBThreadPoolFactory.newFixedThreadPool( Runtime.getRuntime().availableProcessors(), ThreadName.SCHEMA_REGION_RECOVER_TASK.getName()); final List> futures = new ArrayList<>(); localSchemaRegionInfo.forEach( - (k, v) -> { - for (SchemaRegionId schemaRegionId : v) { - PartialPath database; - try { - database = PartialPath.getDatabasePath(k); - } catch (IllegalPathException e) { - logger.warn("Illegal database path: {}", k); - continue; - } - futures.add( - schemaRegionRecoverPools.submit(recoverSchemaRegionTask(database, schemaRegionId))); - } - }); + (k, v) -> + v.forEach( + schemaRegionId -> + futures.add( + schemaRegionRecoverPools.submit( + recoverSchemaRegionTask(k, schemaRegionId))))); for (final Future future : futures) { try { final ISchemaRegion schemaRegion = future.get(); @@ -263,7 +247,7 @@ public void clear() { ClusterTemplateManager.getInstance().clear(); } - public ISchemaRegion getSchemaRegion(SchemaRegionId regionId) { + public ISchemaRegion getSchemaRegion(final SchemaRegionId regionId) { return schemaRegionMap.get(regionId); } @@ -276,17 +260,17 @@ public List getAllSchemaRegionIds() { } public synchronized void createSchemaRegion( - PartialPath storageGroup, SchemaRegionId schemaRegionId) throws MetadataException { - ISchemaRegion schemaRegion = schemaRegionMap.get(schemaRegionId); + final String storageGroup, final SchemaRegionId schemaRegionId) throws MetadataException { + final ISchemaRegion schemaRegion = schemaRegionMap.get(schemaRegionId); if (schemaRegion != null) { - if (schemaRegion.getDatabaseFullPath().equals(storageGroup.getFullPath())) { + if (schemaRegion.getDatabaseFullPath().equals(storageGroup)) { return; } else { throw new MetadataException( String.format( "SchemaRegion [%s] is duplicated between [%s] and [%s], " + "and the former one has been recovered.", - schemaRegionId, schemaRegion.getDatabaseFullPath(), storageGroup.getFullPath())); + schemaRegionId, schemaRegion.getDatabaseFullPath(), storageGroup)); } } schemaRegionMap.put( @@ -294,7 +278,7 @@ public synchronized void createSchemaRegion( } private Callable recoverSchemaRegionTask( - PartialPath storageGroup, SchemaRegionId schemaRegionId) { + final String storageGroup, final SchemaRegionId schemaRegionId) { // this method is called for concurrent recovery of schema regions return () -> { long timeRecord = System.currentTimeMillis(); @@ -305,24 +289,24 @@ private Callable recoverSchemaRegionTask( timeRecord = System.currentTimeMillis() - timeRecord; logger.info( "Recover [{}] spend: {} ms", - storageGroup.concatNode(schemaRegionId.toString()), + storageGroup + TsFileConstant.PATH_SEPARATOR + schemaRegionId.toString(), timeRecord); return schemaRegion; - } catch (MetadataException e) { + } catch (final MetadataException e) { logger.error( String.format( "SchemaRegion [%d] in StorageGroup [%s] failed to recover.", - schemaRegionId.getId(), storageGroup.getFullPath())); + schemaRegionId.getId(), storageGroup)); throw new RuntimeException(e); } }; } private ISchemaRegion createSchemaRegionWithoutExistenceCheck( - PartialPath database, SchemaRegionId schemaRegionId) throws MetadataException { - ISchemaRegionParams schemaRegionParams = + final String database, final SchemaRegionId schemaRegionId) throws MetadataException { + final ISchemaRegionParams schemaRegionParams = new SchemaRegionParams(database, schemaRegionId, schemaEngineStatistics); - ISchemaRegion schemaRegion = schemaRegionLoader.createSchemaRegion(schemaRegionParams); + final ISchemaRegion schemaRegion = schemaRegionLoader.createSchemaRegion(schemaRegionParams); schemaMetricManager.addSchemaRegionMetric( schemaRegionId.getId(), schemaRegion.getSchemaRegionMetric()); return schemaRegion; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/ISchemaRegionParams.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/ISchemaRegionParams.java index c83294defcb6..6082c6a69c83 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/ISchemaRegionParams.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/ISchemaRegionParams.java @@ -20,12 +20,11 @@ package org.apache.iotdb.db.schemaengine.schemaregion; import org.apache.iotdb.commons.consensus.SchemaRegionId; -import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.db.schemaengine.rescon.ISchemaEngineStatistics; public interface ISchemaRegionParams { - PartialPath getDatabase(); + String getDatabase(); SchemaRegionId getSchemaRegionId(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionParams.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionParams.java index fa5e3db8a73a..1d9f24bd34c9 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionParams.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/SchemaRegionParams.java @@ -20,28 +20,27 @@ package org.apache.iotdb.db.schemaengine.schemaregion; import org.apache.iotdb.commons.consensus.SchemaRegionId; -import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.db.schemaengine.rescon.ISchemaEngineStatistics; public class SchemaRegionParams implements ISchemaRegionParams { - private final PartialPath database; + private final String database; private final SchemaRegionId schemaRegionId; private final ISchemaEngineStatistics schemaEngineStatistics; public SchemaRegionParams( - PartialPath database, - SchemaRegionId schemaRegionId, - ISchemaEngineStatistics schemaEngineStatistics) { + final String database, + final SchemaRegionId schemaRegionId, + final ISchemaEngineStatistics schemaEngineStatistics) { this.database = database; this.schemaRegionId = schemaRegionId; this.schemaEngineStatistics = schemaEngineStatistics; } @Override - public PartialPath getDatabase() { + public String getDatabase() { return database; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/attribute/update/GeneralRegionAttributeSecurityService.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/attribute/update/GeneralRegionAttributeSecurityService.java index cf1a03958892..3bbb53052a27 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/attribute/update/GeneralRegionAttributeSecurityService.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/attribute/update/GeneralRegionAttributeSecurityService.java @@ -31,6 +31,7 @@ import org.apache.iotdb.commons.exception.StartupException; import org.apache.iotdb.commons.service.IService; import org.apache.iotdb.commons.service.ServiceType; +import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.confignode.rpc.thrift.TShowClusterResp; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; @@ -93,9 +94,10 @@ public class GeneralRegionAttributeSecurityService implements IService { private volatile boolean allowSubmitListen = false; public void startBroadcast(final ISchemaRegion schemaRegion) { - if (schemaRegion instanceof SchemaRegionMemoryImpl) { + if (schemaRegion instanceof SchemaRegionMemoryImpl + && PathUtils.isTableModelDatabase(schemaRegion.getDatabaseFullPath())) { regionId2DatabaseMap.put( - schemaRegion.getSchemaRegionId(), schemaRegion.getDatabaseFullPath().substring(5)); + schemaRegion.getSchemaRegionId(), schemaRegion.getDatabaseFullPath()); regionLeaders.add(schemaRegion); } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionMemoryImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionMemoryImpl.java index 2b484d64df90..13624cafb335 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionMemoryImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionMemoryImpl.java @@ -192,6 +192,9 @@ public class SchemaRegionMemoryImpl implements ISchemaRegion { private final String storageGroupDirPath; private final String schemaRegionDirPath; + + // For table model db: without "root." + // For tree model db: with "root." private final String storageGroupFullPath; private final SchemaRegionId schemaRegionId; @@ -213,7 +216,7 @@ public class SchemaRegionMemoryImpl implements ISchemaRegion { public SchemaRegionMemoryImpl(final ISchemaRegionParams schemaRegionParams) throws MetadataException { - storageGroupFullPath = schemaRegionParams.getDatabase().getFullPath(); + storageGroupFullPath = schemaRegionParams.getDatabase(); this.schemaRegionId = schemaRegionParams.getSchemaRegionId(); storageGroupDirPath = config.getSchemaDir() + File.separator + storageGroupFullPath; @@ -265,7 +268,7 @@ public synchronized void init() throws MetadataException { tagManager = new TagManager(schemaRegionDirPath, regionStatistics); mtree = new MTreeBelowSGMemoryImpl( - PartialPath.getDatabasePath(storageGroupFullPath), + PartialPath.getQualifiedDatabasePartialPath(storageGroupFullPath), tagManager::readTags, tagManager::readAttributes, regionStatistics, @@ -564,7 +567,7 @@ public void loadSnapshot(final File latestSnapshotRootDir) { snapshotStartTime = System.currentTimeMillis(); deviceAttributeCacheUpdater = - new DeviceAttributeCacheUpdater(regionStatistics, storageGroupFullPath.substring(5)); + new DeviceAttributeCacheUpdater(regionStatistics, storageGroupFullPath); deviceAttributeCacheUpdater.loadFromSnapshot(latestSnapshotRootDir); logger.info( "Device attribute remote updater snapshot loading of schemaRegion {} costs {}ms.", @@ -872,7 +875,7 @@ public void checkSchemaQuota(final String tableName, final List device schemaQuotaManager.check( (long) DataNodeTableCache.getInstance() - .getTable(storageGroupFullPath.substring(5), tableName) + .getTable(storageGroupFullPath, tableName) .getMeasurementNum() * notExistNum, notExistNum); @@ -1394,7 +1397,7 @@ public long countPathsUsingTemplate(final int templateId, final PathPatternTree public void createOrUpdateTableDevice(final CreateOrUpdateTableDeviceNode node) throws MetadataException { for (int i = 0; i < node.getDeviceIdList().size(); i++) { - final String databaseName = storageGroupFullPath.substring(5); + final String databaseName = storageGroupFullPath; final String tableName = node.getTableName(); final String[] deviceId = Arrays.stream(node.getDeviceIdList().get(i)) diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionPBTreeImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionPBTreeImpl.java index 5e09c9e4a1a1..85038d219083 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionPBTreeImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/impl/SchemaRegionPBTreeImpl.java @@ -182,7 +182,7 @@ public class SchemaRegionPBTreeImpl implements ISchemaRegion { // region Interfaces and Implementation of initialization、snapshot、recover and clear public SchemaRegionPBTreeImpl(ISchemaRegionParams schemaRegionParams) throws MetadataException { - storageGroupFullPath = schemaRegionParams.getDatabase().getFullPath(); + storageGroupFullPath = schemaRegionParams.getDatabase(); this.schemaRegionId = schemaRegionParams.getSchemaRegionId(); storageGroupDirPath = config.getSchemaDir() + File.separator + storageGroupFullPath; @@ -190,9 +190,7 @@ public SchemaRegionPBTreeImpl(ISchemaRegionParams schemaRegionParams) throws Met this.regionStatistics = new CachedSchemaRegionStatistics( schemaRegionId.getId(), schemaRegionParams.getSchemaEngineStatistics()); - this.metric = - new SchemaRegionCachedMetric( - regionStatistics, schemaRegionParams.getDatabase().getFullPath()); + this.metric = new SchemaRegionCachedMetric(regionStatistics, schemaRegionParams.getDatabase()); init(); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/mtree/impl/mem/MTreeBelowSGMemoryImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/mtree/impl/mem/MTreeBelowSGMemoryImpl.java index d9a09bed98d9..3611ebd29113 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/mtree/impl/mem/MTreeBelowSGMemoryImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/schemaregion/mtree/impl/mem/MTreeBelowSGMemoryImpl.java @@ -200,7 +200,7 @@ public static MTreeBelowSGMemoryImpl loadFromSnapshot( final Function, Map> attributeGetter) throws IOException, IllegalPathException { return new MTreeBelowSGMemoryImpl( - PartialPath.getDatabasePath(storageGroupFullPath), + PartialPath.getQualifiedDatabasePartialPath(storageGroupFullPath), MemMTreeStore.loadFromSnapshot( snapshotDir, measurementProcess, diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/table/DataNodeTableCache.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/table/DataNodeTableCache.java index 3d49790fc4f6..61d2ecd70fad 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/table/DataNodeTableCache.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/schemaengine/table/DataNodeTableCache.java @@ -19,7 +19,6 @@ package org.apache.iotdb.db.schemaengine.table; -import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.schema.table.TsTable; import org.apache.iotdb.commons.schema.table.TsTableInternalRPCUtil; import org.apache.iotdb.commons.utils.PathUtils; @@ -47,9 +46,6 @@ import java.util.function.Function; import java.util.stream.Collectors; -import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_ROOT; -import static org.apache.iotdb.commons.conf.IoTDBConstant.PATH_SEPARATOR; - /** It contains all tables' latest column schema */ public class DataNodeTableCache implements ITableCache { @@ -305,9 +301,7 @@ private Map> getTablesInConfigNode( .fetchTables( tableInput.entrySet().stream() .collect( - Collectors.toMap( - entry -> PathUtils.qualifyDatabaseName(entry.getKey()), - entry -> entry.getValue().keySet()))); + Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().keySet()))); if (TSStatusCode.SUCCESS_STATUS.getStatusCode() == resp.getStatus().getCode()) { result = TsTableInternalRPCUtil.deserializeTsTableFetchResult(resp.getTableInfoMap()); } @@ -408,41 +402,4 @@ public String tryGetInternColumnName( return null; } } - - /** Check whether the given path overlap with some table existence. */ - public Pair checkTableCreateAndPreCreateOnGivenPath(final PartialPath path) { - readWriteLock.writeLock().lock(); - try { - final String pathString = path.getFullPath(); - Pair result = checkTableExistenceOnGivenPath(pathString, databaseTableMap); - if (result == null) { - result = checkTableExistenceOnGivenPath(pathString, preUpdateTableMap); - } - return result; - } finally { - readWriteLock.writeLock().unlock(); - } - } - - private Pair checkTableExistenceOnGivenPath( - final String path, final Map> tableMap) { - final int dbStartIndex = PATH_ROOT.length() + 1; - for (final Map.Entry> dbEntry : tableMap.entrySet()) { - final String database = dbEntry.getKey(); - if (!(path.startsWith(database, dbStartIndex) - && path.length() > dbStartIndex + database.length() - && path.charAt(dbStartIndex + database.length()) == PATH_SEPARATOR)) { - continue; - } - final int tableStartIndex = dbStartIndex + database.length() + 1; - for (final String tableName : dbEntry.getValue().keySet()) { - if (path.startsWith(tableName, tableStartIndex) - && path.length() > tableStartIndex + tableName.length() - && path.charAt(tableStartIndex + tableName.length()) == PATH_SEPARATOR) { - return new Pair<>(database, tableName); - } - } - } - return null; - } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/tools/schema/SRStatementGenerator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/tools/schema/SRStatementGenerator.java index 471cf1853fd6..934395655756 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/tools/schema/SRStatementGenerator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/tools/schema/SRStatementGenerator.java @@ -102,7 +102,8 @@ public class SRStatementGenerator implements Iterator, Iterable deviceIdList = Arrays.asList( @@ -133,7 +133,7 @@ public void testDeviceQuery() throws Exception { if (!testParams.getTestModeName().equals("MemoryMode")) { return; } - final ISchemaRegion schemaRegion = getSchemaRegion("root.db", 0); + final ISchemaRegion schemaRegion = getSchemaRegion("db", 0); final String tableName = "t"; final Map attributeMap = new HashMap<>(); @@ -204,7 +204,7 @@ public void testDeviceIdWithNull() throws Exception { if (!testParams.getTestModeName().equals("MemoryMode")) { return; } - final ISchemaRegion schemaRegion = getSchemaRegion("root.db", 0); + final ISchemaRegion schemaRegion = getSchemaRegion("db", 0); final String tableName = "t"; final Map attributeMap = new HashMap<>(); @@ -280,7 +280,7 @@ public void testDeviceWithDifferentIdLength() throws Exception { if (!testParams.getTestModeName().equals("MemoryMode")) { return; } - final ISchemaRegion schemaRegion = getSchemaRegion("root.db", 0); + final ISchemaRegion schemaRegion = getSchemaRegion("db", 0); final String tableName = "t"; final Map attributeMap = new HashMap<>(); @@ -325,7 +325,7 @@ public void testMultiTableDevice() throws Exception { if (!testParams.getTestModeName().equals("MemoryMode")) { return; } - final ISchemaRegion schemaRegion = getSchemaRegion("root.db", 0); + final ISchemaRegion schemaRegion = getSchemaRegion("db", 0); final String tableName1 = "t1"; final Map attributeMap = new HashMap<>(); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/SchemaRegionTestUtil.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/SchemaRegionTestUtil.java index 5a700d74d773..1c5c40207d5d 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/SchemaRegionTestUtil.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/SchemaRegionTestUtil.java @@ -57,7 +57,6 @@ import static org.apache.iotdb.commons.conf.IoTDBConstant.ONE_LEVEL_PATH_WILDCARD; import static org.apache.iotdb.commons.schema.SchemaConstant.ALL_MATCH_SCOPE; -import static org.apache.iotdb.commons.schema.SchemaConstant.ROOT; public class SchemaRegionTestUtil { @@ -455,7 +454,7 @@ public static List getTableDevice( final List idDeterminedFilterList) { final List patternList = DeviceFilterUtil.convertToDevicePattern( - schemaRegion.getDatabaseFullPath().substring(ROOT.length() + 1), + schemaRegion.getDatabaseFullPath(), table, idColumnNum, Collections.singletonList(idDeterminedFilterList)); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/SchemaStatisticsTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/SchemaStatisticsTest.java index b0432c15e9ee..61d641f757d9 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/SchemaStatisticsTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/metadata/schemaRegion/SchemaStatisticsTest.java @@ -405,7 +405,7 @@ public void testTableDeviceStatistics() throws Exception { if (!testParams.getTestModeName().equals("MemoryMode")) { return; } - final ISchemaRegion schemaRegion = getSchemaRegion("root.db", 0); + final ISchemaRegion schemaRegion = getSchemaRegion("db", 0); final String tableName1 = "t1"; final Map attributeMap = new HashMap<>(); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/analyze/FakePartitionFetcherImpl.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/analyze/FakePartitionFetcherImpl.java index b7e4ac4567f4..338a05b45759 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/analyze/FakePartitionFetcherImpl.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/analyze/FakePartitionFetcherImpl.java @@ -311,9 +311,4 @@ public SchemaPartition getOrCreateSchemaPartition( public SchemaPartition getSchemaPartition(String database, List deviceIDList) { return null; } - - @Override - public SchemaPartition getSchemaPartition(String database) { - return null; - } } diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/PartitionCacheTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/PartitionCacheTest.java index 725860eb70d2..c198c3971b45 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/PartitionCacheTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/analyze/cache/PartitionCacheTest.java @@ -32,7 +32,6 @@ import org.apache.iotdb.db.auth.AuthorityChecker; import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.metadata.DatabaseModelException; import org.apache.iotdb.db.queryengine.plan.analyze.cache.partition.PartitionCache; import org.apache.tsfile.file.metadata.IDeviceID; @@ -49,8 +48,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.function.Function; -import java.util.stream.Collectors; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -151,8 +148,7 @@ private static String getDeviceName(String storageGroupName, int deviceNumber) { @Before public void setUp() throws Exception { partitionCache = new PartitionCache(); - partitionCache.updateDatabaseCache( - storageGroups.stream().collect(Collectors.toMap(Function.identity(), k -> false))); + partitionCache.updateDatabaseCache(storageGroups); partitionCache.updateSchemaPartitionCache(schemaPartitionTable); partitionCache.updateDataPartitionCache(dataPartitionTable); partitionCache.updateGroupIdToReplicaSetMap(100, consensusGroupIdToRegionReplicaSet); @@ -164,7 +160,7 @@ public void tearDown() throws Exception { } @Test - public void testStorageGroupCache() throws DatabaseModelException { + public void testStorageGroupCache() { Map> storageGroupToDeviceMap; Map deviceToStorageGroupMap; // test devices in one database diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/Util.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/Util.java index 9f4930eb54d7..8fcc6ae7972c 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/Util.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/Util.java @@ -431,11 +431,6 @@ public SchemaPartition getOrCreateSchemaPartition( public SchemaPartition getSchemaPartition(String database, List deviceIDList) { return null; } - - @Override - public SchemaPartition getSchemaPartition(String database) { - return null; - } }; } diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/Util2.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/Util2.java index 312f257309fa..42e8dc1f2c25 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/Util2.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/Util2.java @@ -324,11 +324,6 @@ public SchemaPartition getOrCreateSchemaPartition( public SchemaPartition getSchemaPartition(String database, List deviceIDList) { return null; } - - @Override - public SchemaPartition getSchemaPartition(String database) { - return null; - } }; } diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/AnalyzerTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/AnalyzerTest.java index 5e5f49e0b5d5..fc20d094add4 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/AnalyzerTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/AnalyzerTest.java @@ -29,7 +29,6 @@ import org.apache.iotdb.commons.partition.DataPartition; import org.apache.iotdb.commons.partition.DataPartitionQueryParam; import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor; -import org.apache.iotdb.commons.utils.PathUtils; import org.apache.iotdb.db.protocol.session.IClientSession; import org.apache.iotdb.db.queryengine.common.MPPQueryContext; import org.apache.iotdb.db.queryengine.common.QueryId; @@ -974,7 +973,7 @@ public void limitEliminationTest() { assertTrue( getChildrenNode(distributedQueryPlan.getFragments().get(0).getPlanNodeTree(), 4) instanceof CollectNode); - CollectNode collectNode = + final CollectNode collectNode = (CollectNode) getChildrenNode(distributedQueryPlan.getFragments().get(0).getPlanNodeTree(), 4); assertTrue(collectNode.getChildren().get(1) instanceof DeviceTableScanNode); @@ -1030,7 +1029,7 @@ public void limitEliminationTest() { new TableDistributedPlanner( analysis, symbolAllocator, logicalQueryPlan, TEST_MATADATA, null); distributedQueryPlan = distributionPlanner.plan(); - List fragments = distributedQueryPlan.getFragments(); + final List fragments = distributedQueryPlan.getFragments(); identitySinkNode = (IdentitySinkNode) fragments.get(0).getPlanNodeTree(); assertTrue(getChildrenNode(identitySinkNode, 3) instanceof LimitNode); assertTrue(getChildrenNode(identitySinkNode, 4) instanceof DeviceTableScanNode); @@ -1104,27 +1103,27 @@ public void validateDeviceSchema( @Override public DataPartition getOrCreateDataPartition( - List dataPartitionQueryParams, String userName) { - int seriesSlotNum = StatementTestUtils.TEST_SERIES_SLOT_NUM; - String partitionExecutorName = StatementTestUtils.TEST_PARTITION_EXECUTOR; - SeriesPartitionExecutor seriesPartitionExecutor = + final List dataPartitionQueryParams, final String userName) { + final int seriesSlotNum = StatementTestUtils.TEST_SERIES_SLOT_NUM; + final String partitionExecutorName = StatementTestUtils.TEST_PARTITION_EXECUTOR; + final SeriesPartitionExecutor seriesPartitionExecutor = SeriesPartitionExecutor.getSeriesPartitionExecutor( partitionExecutorName, seriesSlotNum); - Map>>> + final Map< + String, Map>>> dataPartitionMap = new HashMap<>(); - for (DataPartitionQueryParam dataPartitionQueryParam : dataPartitionQueryParams) { - String databaseName = dataPartitionQueryParam.getDatabaseName(); - assertEquals("root." + sessionInfo.getDatabaseName().get(), databaseName); - databaseName = PathUtils.qualifyDatabaseName(databaseName); + for (final DataPartitionQueryParam dataPartitionQueryParam : dataPartitionQueryParams) { + final String databaseName = dataPartitionQueryParam.getDatabaseName(); + assertEquals(sessionInfo.getDatabaseName().get(), databaseName); - String tableName = dataPartitionQueryParam.getDeviceID().getTableName(); + final String tableName = dataPartitionQueryParam.getDeviceID().getTableName(); assertEquals(StatementTestUtils.tableName(), tableName); - TSeriesPartitionSlot partitionSlot = + final TSeriesPartitionSlot partitionSlot = seriesPartitionExecutor.getSeriesPartitionSlot(dataPartitionQueryParam.getDeviceID()); - for (TTimePartitionSlot tTimePartitionSlot : + for (final TTimePartitionSlot tTimePartitionSlot : dataPartitionQueryParam.getTimePartitionSlotList()) { dataPartitionMap .computeIfAbsent(databaseName, d -> new HashMap<>()) @@ -1162,7 +1161,7 @@ public void analyzeInsertTablet() { analysis .getDataPartition() .getDataPartitionMap() - .get(PathUtils.qualifyDatabaseName(sessionInfo.getDatabaseName().orElse(null))); + .get(sessionInfo.getDatabaseName().orElse(null)); assertEquals(3, partitionSlotMapMap.size()); SymbolAllocator symbolAllocator = new SymbolAllocator(); @@ -1195,9 +1194,9 @@ public void analyzeInsertTablet() { @Test public void analyzeInsertRow() { - Metadata mockMetadata = mockMetadataForInsertion(); + final Metadata mockMetadata = mockMetadataForInsertion(); - InsertRowStatement insertStatement = StatementTestUtils.genInsertRowStatement(true); + final InsertRowStatement insertStatement = StatementTestUtils.genInsertRowStatement(true); context = new MPPQueryContext("", queryId, sessionInfo, null, null); analysis = analyzeStatement( @@ -1207,26 +1206,28 @@ public void analyzeInsertRow() { new SqlParser(), sessionInfo); assertEquals(1, analysis.getDataPartition().getDataPartitionMap().size()); - Map>> + assertEquals(1, analysis.getDataPartition().getDataPartitionMap().size()); + final Map>> partitionSlotMapMap = analysis .getDataPartition() .getDataPartitionMap() - .get(PathUtils.qualifyDatabaseName(sessionInfo.getDatabaseName().orElse(null))); + .get(sessionInfo.getDatabaseName().orElse(null)); assertEquals(1, partitionSlotMapMap.size()); - SymbolAllocator symbolAllocator = new SymbolAllocator(); + final SymbolAllocator symbolAllocator = new SymbolAllocator(); logicalQueryPlan = new TableLogicalPlanner( context, mockMetadata, sessionInfo, symbolAllocator, WarningCollector.NOOP) .plan(analysis); - RelationalInsertRowNode insertNode = (RelationalInsertRowNode) logicalQueryPlan.getRootNode(); + final RelationalInsertRowNode insertNode = + (RelationalInsertRowNode) logicalQueryPlan.getRootNode(); assertEquals(insertNode.getTableName(), StatementTestUtils.tableName()); - Object[] columns = StatementTestUtils.genValues(0); + final Object[] columns = StatementTestUtils.genValues(0); assertEquals( Factory.DEFAULT_FACTORY.create( - new String[] {StatementTestUtils.tableName(), ((Binary) columns[0]).toString()}), + new String[] {StatementTestUtils.tableName(), columns[0].toString()}), insertNode.getDeviceID()); assertArrayEquals(columns, insertNode.getValues()); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/MockTableModelDataPartition.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/MockTableModelDataPartition.java index db6e1addeb0a..c536d21f17df 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/MockTableModelDataPartition.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/MockTableModelDataPartition.java @@ -48,7 +48,7 @@ public class MockTableModelDataPartition { IoTDBDescriptor.getInstance().getConfig().getSeriesPartitionExecutorClass(), IoTDBDescriptor.getInstance().getConfig().getSeriesPartitionSlotNum()); - private static final String DB_NAME = "root.testdb"; + private static final String DB_NAME = "testdb"; static final String DEVICE_1 = "table1.beijing.A1.ZZ"; static final String DEVICE_2 = "table1.beijing.A2.XX"; @@ -139,25 +139,26 @@ public static DataPartition constructDataPartition() { } public static SchemaPartition constructSchemaPartition() { - SchemaPartition schemaPartition = + final SchemaPartition schemaPartition = new SchemaPartition( IoTDBDescriptor.getInstance().getConfig().getSeriesPartitionExecutorClass(), IoTDBDescriptor.getInstance().getConfig().getSeriesPartitionSlotNum()); - Map> schemaPartitionMap = new HashMap<>(); + final Map> schemaPartitionMap = + new HashMap<>(); - TRegionReplicaSet schemaRegion1 = + final TRegionReplicaSet schemaRegion1 = new TRegionReplicaSet( new TConsensusGroupId(TConsensusGroupType.SchemaRegion, 11), Arrays.asList( genDataNodeLocation(11, "192.0.1.1"), genDataNodeLocation(12, "192.0.1.2"))); - TRegionReplicaSet schemaRegion2 = + final TRegionReplicaSet schemaRegion2 = new TRegionReplicaSet( new TConsensusGroupId(TConsensusGroupType.SchemaRegion, 21), Arrays.asList( genDataNodeLocation(21, "192.0.2.1"), genDataNodeLocation(22, "192.0.2.2"))); - Map schemaRegionMap = new HashMap<>(); + final Map schemaRegionMap = new HashMap<>(); schemaRegionMap.put(EXECUTOR.getSeriesPartitionSlot(DEVICE_1), schemaRegion1); schemaRegionMap.put(EXECUTOR.getSeriesPartitionSlot(DEVICE_2), schemaRegion2); schemaRegionMap.put(EXECUTOR.getSeriesPartitionSlot(DEVICE_3), schemaRegion2); @@ -168,7 +169,7 @@ public static SchemaPartition constructSchemaPartition() { } private static TRegionReplicaSet genDataRegionGroup( - int regionGroupId, int dataNodeId1, int dataNodeId2) { + final int regionGroupId, final int dataNodeId1, final int dataNodeId2) { return new TRegionReplicaSet( new TConsensusGroupId(TConsensusGroupType.DataRegion, regionGroupId), Arrays.asList( @@ -176,7 +177,7 @@ private static TRegionReplicaSet genDataRegionGroup( genDataNodeLocation(dataNodeId2, String.format("192.0.%s.2", regionGroupId)))); } - public static TDataNodeLocation genDataNodeLocation(int dataNodeId, String ip) { + public static TDataNodeLocation genDataNodeLocation(final int dataNodeId, final String ip) { return new TDataNodeLocation() .setDataNodeId(dataNodeId) .setClientRpcEndPoint(new TEndPoint(ip, 9000)) diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/TSBSMetadata.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/TSBSMetadata.java index 2747a9e752a0..a34129a73f88 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/TSBSMetadata.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/TSBSMetadata.java @@ -451,11 +451,6 @@ public SchemaPartition getOrCreateSchemaPartition( public SchemaPartition getSchemaPartition(String database, List deviceIDList) { return SCHEMA_PARTITION; } - - @Override - public SchemaPartition getSchemaPartition(String database) { - return SCHEMA_PARTITION; - } }; } } diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/TestMatadata.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/TestMatadata.java index 6a6257fb2264..99d97626c9b2 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/TestMatadata.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/relational/analyzer/TestMatadata.java @@ -116,19 +116,20 @@ public class TestMatadata implements Metadata { public static final String TABLE2 = "table2"; @Override - public boolean tableExists(QualifiedObjectName name) { + public boolean tableExists(final QualifiedObjectName name) { return name.getDatabaseName().equalsIgnoreCase(DB1) && name.getObjectName().equalsIgnoreCase(TABLE1); } @Override - public Optional getTableSchema(SessionInfo session, QualifiedObjectName name) { + public Optional getTableSchema( + final SessionInfo session, final QualifiedObjectName name) { if (name.getDatabaseName().equals(INFORMATION_SCHEMA)) { - TsTable table = InformationSchemaTable.getTableFromStringValue(name.getObjectName()); + final TsTable table = InformationSchemaTable.getTableFromStringValue(name.getObjectName()); if (table == null) { return Optional.empty(); } - List columnSchemaList = + final List columnSchemaList = table.getColumnList().stream() .map( o -> @@ -141,7 +142,7 @@ public Optional getTableSchema(SessionInfo session, QualifiedObject return Optional.of(new TableSchema(table.getTableName(), columnSchemaList)); } - List columnSchemas = + final List columnSchemas = Arrays.asList( ColumnSchema.builder(TIME_CM).setColumnCategory(TsTableColumnCategory.TIME).build(), ColumnSchema.builder(TAG1_CM).setColumnCategory(TsTableColumnCategory.ID).build(), @@ -167,7 +168,8 @@ public Optional getTableSchema(SessionInfo session, QualifiedObject } @Override - public Type getOperatorReturnType(OperatorType operatorType, List argumentTypes) + public Type getOperatorReturnType( + final OperatorType operatorType, final List argumentTypes) throws OperatorNotFoundException { switch (operatorType) { @@ -208,24 +210,25 @@ public Type getOperatorReturnType(OperatorType operatorType, List argumentTypes) { + public Type getFunctionReturnType( + final String functionName, final List argumentTypes) { return getFunctionType(functionName, argumentTypes); } @Override public boolean isAggregationFunction( - SessionInfo session, String functionName, AccessControl accessControl) { + final SessionInfo session, final String functionName, final AccessControl accessControl) { return BuiltinAggregationFunction.getNativeFunctionNames() .contains(functionName.toLowerCase(Locale.ENGLISH)); } @Override - public Type getType(TypeSignature signature) throws TypeNotFoundException { + public Type getType(final TypeSignature signature) throws TypeNotFoundException { return typeManager.getType(signature); } @Override - public boolean canCoerce(Type from, Type to) { + public boolean canCoerce(final Type from, final Type to) { return true; } @@ -236,10 +239,10 @@ public IPartitionFetcher getPartitionFetcher() { @Override public List indexScan( - QualifiedObjectName tableName, - List expressionList, - List attributeColumns, - MPPQueryContext context) { + final QualifiedObjectName tableName, + final List expressionList, + final List attributeColumns, + final MPPQueryContext context) { if (expressionList.size() == 2) { if (compareEqualsMatch(expressionList.get(0), "tag1", "beijing") @@ -290,12 +293,13 @@ && compareEqualsMatch(expressionList.get(0), "tag2", "B3")) { new DeviceEntry(new StringArrayDeviceID(DEVICE_2.split("\\.")), DEVICE_2_ATTRIBUTES)); } - private boolean compareEqualsMatch(Expression expression, String idOrAttr, String value) { + private boolean compareEqualsMatch( + final Expression expression, final String idOrAttr, final String value) { if (expression instanceof ComparisonExpression && ((ComparisonExpression) expression).getOperator() == ComparisonExpression.Operator.EQUAL) { - Expression leftExpression = ((ComparisonExpression) expression).getLeft(); - Expression rightExpression = ((ComparisonExpression) expression).getRight(); + final Expression leftExpression = ((ComparisonExpression) expression).getLeft(); + final Expression rightExpression = ((ComparisonExpression) expression).getRight(); if (leftExpression instanceof SymbolReference && rightExpression instanceof StringLiteral) { return ((SymbolReference) leftExpression).getName().equalsIgnoreCase(idOrAttr) && ((StringLiteral) rightExpression).getValue().equalsIgnoreCase(value); @@ -309,12 +313,13 @@ private boolean compareEqualsMatch(Expression expression, String idOrAttr, Strin return false; } - private boolean compareNotEqualsMatch(Expression expression, String idOrAttr, String value) { + private boolean compareNotEqualsMatch( + final Expression expression, final String idOrAttr, final String value) { if (expression instanceof ComparisonExpression && ((ComparisonExpression) expression).getOperator() == ComparisonExpression.Operator.NOT_EQUAL) { - Expression leftExpression = ((ComparisonExpression) expression).getLeft(); - Expression rightExpression = ((ComparisonExpression) expression).getRight(); + final Expression leftExpression = ((ComparisonExpression) expression).getLeft(); + final Expression rightExpression = ((ComparisonExpression) expression).getRight(); if (leftExpression instanceof SymbolReference && rightExpression instanceof StringLiteral) { return ((SymbolReference) leftExpression).getName().equalsIgnoreCase(idOrAttr) && ((StringLiteral) rightExpression).getValue().equalsIgnoreCase(value); @@ -330,45 +335,46 @@ private boolean compareNotEqualsMatch(Expression expression, String idOrAttr, St @Override public Optional validateTableHeaderSchema( - String database, - TableSchema tableSchema, - MPPQueryContext context, - boolean allowCreateTable, - boolean isStrictIdColumn) { + final String database, + final TableSchema tableSchema, + final MPPQueryContext context, + final boolean allowCreateTable, + final boolean isStrictIdColumn) { throw new UnsupportedOperationException(); } @Override public void validateDeviceSchema( - ITableDeviceSchemaValidation schemaValidation, MPPQueryContext context) { + final ITableDeviceSchemaValidation schemaValidation, final MPPQueryContext context) { throw new UnsupportedOperationException(); } @Override public SchemaPartition getOrCreateSchemaPartition( - String database, List deviceIDList, String userName) { + final String database, final List deviceIDList, final String userName) { return null; } @Override - public SchemaPartition getSchemaPartition(String database, List deviceIDList) { + public SchemaPartition getSchemaPartition( + final String database, final List deviceIDList) { return null; } @Override - public SchemaPartition getSchemaPartition(String database) { + public SchemaPartition getSchemaPartition(final String database) { return null; } @Override public DataPartition getDataPartition( - String database, List sgNameToQueryParamsMap) { + final String database, final List sgNameToQueryParamsMap) { return DATA_PARTITION; } @Override public DataPartition getDataPartitionWithUnclosedTimeRange( - String database, List sgNameToQueryParamsMap) { + final String database, final List sgNameToQueryParamsMap) { return DATA_PARTITION; } @@ -441,11 +447,6 @@ public SchemaPartition getOrCreateSchemaPartition( public SchemaPartition getSchemaPartition(String database, List deviceIDList) { return SCHEMA_PARTITION; } - - @Override - public SchemaPartition getSchemaPartition(String database) { - return SCHEMA_PARTITION; - } }; } } diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/service/DataNodeInternalRPCServiceImplTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/service/DataNodeInternalRPCServiceImplTest.java index b80057b43c1f..0ecca76aa845 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/service/DataNodeInternalRPCServiceImplTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/service/DataNodeInternalRPCServiceImplTest.java @@ -80,8 +80,7 @@ public static void setUpBeforeClass() throws IOException, MetadataException { conf.setDataNodeId(dataNodeId); SchemaEngine.getInstance().init(); - SchemaEngine.getInstance() - .createSchemaRegion(new PartialPath("root.ln"), new SchemaRegionId(0)); + SchemaEngine.getInstance().createSchemaRegion("root.ln", new SchemaRegionId(0)); DataRegionConsensusImpl.getInstance().start(); SchemaRegionConsensusImpl.getInstance().start(); DataNodeRegionManager.getInstance().init(); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/tools/MLogParserTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/tools/MLogParserTest.java index 7d6149810eb8..244e1d459f9e 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/tools/MLogParserTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/tools/MLogParserTest.java @@ -91,8 +91,7 @@ private void prepareData() throws Exception { SchemaEngine schemaEngine = SchemaEngine.getInstance(); for (int i = 0; i < storageGroups.length; i++) { SchemaEngine.getInstance() - .createSchemaRegion( - new PartialPath(storageGroups[i]), new SchemaRegionId(schemaRegionIds[i])); + .createSchemaRegion(storageGroups[i], new SchemaRegionId(schemaRegionIds[i])); } for (int i = 0; i < 2; i++) { @@ -140,7 +139,7 @@ private void prepareData() throws Exception { try { SchemaEngine.getInstance() - .createSchemaRegion(new PartialPath("root.sg"), new SchemaRegionId(schemaRegionIds[2])); + .createSchemaRegion("root.sg", new SchemaRegionId(schemaRegionIds[2])); } catch (MetadataException e) { e.printStackTrace(); } diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/utils/SchemaRegionSnapshotParserTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/utils/SchemaRegionSnapshotParserTest.java index 1a3454192b14..92129f9a225a 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/utils/SchemaRegionSnapshotParserTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/utils/SchemaRegionSnapshotParserTest.java @@ -129,14 +129,15 @@ protected void cleanEnv() throws IOException { FileUtils.deleteDirectory(new File(IoTDBDescriptor.getInstance().getConfig().getSchemaDir())); } - public SchemaRegionSnapshotParserTest(SchemaRegionSnapshotParserTestParams params) { + public SchemaRegionSnapshotParserTest(final SchemaRegionSnapshotParserTestParams params) { this.testParams = params; } - public ISchemaRegion getSchemaRegion(String database, int schemaRegionId) throws Exception { - SchemaRegionId regionId = new SchemaRegionId(schemaRegionId); + public ISchemaRegion getSchemaRegion(final String database, final int schemaRegionId) + throws Exception { + final SchemaRegionId regionId = new SchemaRegionId(schemaRegionId); if (SchemaEngine.getInstance().getSchemaRegion(regionId) == null) { - SchemaEngine.getInstance().createSchemaRegion(new PartialPath(database), regionId); + SchemaEngine.getInstance().createSchemaRegion(database, regionId); } return SchemaEngine.getInstance().getSchemaRegion(regionId); } @@ -488,42 +489,42 @@ public void testComplicatedSnapshotParser() throws Exception { // ---------------------------------------------------------------------- // Schema Tree // ---------------------------------------------------------------------- - // This test will construct a complicated mtree. This tree will have - // aligned timeseries, tags and attributes, normal timeseries device template. + // This test will construct a complicated mTree. This tree will have + // aligned timeSeries, tags and attributes, normal timeSeries device template. // // // - // status(BOOLEAN, RLE) alias(stat) - // / - // t2------temperature(INT64, TS_2DIFF,LZ4) - // / - // sg1------s1------t1(activate template: t1) - // / - // root ->| - // \ - // sg2-------t1(aligned)------status(INT64, TS_2DIFF, LZMA2){attr1:atr1} - // \ - // t2-------level{tags:"tag1"="t1", attributes: "attri1"="attr1"} + // status(BOOLEAN, RLE) alias(stat) + // / + // t2------temperature(INT64, TS_2DIFF,LZ4) + // / + // sg1------s1------t1(activate template: t1) + // / + // root -> db ->| // \ - // t1(aligned)-------temperature(INT32, TS_2DIFF, LZ4){attributes:"attr1"="a1"} - // \ - // level(INT32m RLE){tags:"tag1"="t1"} alias(lev) + // sg2-------t1(aligned)------status(INT64, TS_2DIFF, LZMA2){attr1:atr1} + // \ + // t2-------level{tags:"tag1"="t1", attributes: "attri1"="attr1"} + // \ + // t1(aligned)-------temperature(INT32, TS_2DIFF, LZ4){attributes:"attr1"="a1"} + // \ + // level(INT32m RLE){tags:"tag1"="t1"} alias(lev) // // - ISchemaRegion schemaRegion = getSchemaRegion("root", 0); - PartialPath databasePath = new PartialPath("root"); - Template template = new Template(); + final ISchemaRegion schemaRegion = getSchemaRegion("root.db", 0); + final PartialPath databasePath = new PartialPath("root.db"); + final Template template = new Template(); template.setId(1); template.addMeasurement("date", TSDataType.INT64, TSEncoding.RLE, CompressionType.UNCOMPRESSED); - HashMap planMap = new HashMap<>(); + final HashMap planMap = new HashMap<>(); planMap.put( - "root.sg1.s1.t1", + "root.db.sg1.s1.t1", SchemaRegionWritePlanFactory.getActivateTemplateInClusterPlan( - new PartialPath("root.sg1.s1.t1"), 3, 1)); + new PartialPath("root.db.sg1.s1.t1"), 3, 1)); planMap.put( - "root.sg1.s1.t2.temperature", + "root.db.sg1.s1.t2.temperature", SchemaRegionWritePlanFactory.getCreateTimeSeriesPlan( - new MeasurementPath("root.sg1.s1.t2.temperature"), + new MeasurementPath("root.db.sg1.s1.t2.temperature"), TSDataType.INT64, TSEncoding.TS_2DIFF, CompressionType.LZ4, @@ -532,9 +533,9 @@ public void testComplicatedSnapshotParser() throws Exception { null, null)); planMap.put( - "root.sg1.s1.t2.status", + "root.db.sg1.s1.t2.status", SchemaRegionWritePlanFactory.getCreateTimeSeriesPlan( - new MeasurementPath("root.sg1.s1.t2.status"), + new MeasurementPath("root.db.sg1.s1.t2.status"), TSDataType.BOOLEAN, TSEncoding.RLE, CompressionType.SNAPPY, @@ -543,9 +544,9 @@ public void testComplicatedSnapshotParser() throws Exception { null, "statusA")); planMap.put( - "root.sg2.t1", + "root.db.sg2.t1", SchemaRegionWritePlanFactory.getCreateAlignedTimeSeriesPlan( - new PartialPath("root.sg2.t1"), + new PartialPath("root.db.sg2.t1"), new ArrayList() { { add("status"); @@ -587,9 +588,9 @@ public void testComplicatedSnapshotParser() throws Exception { } })); planMap.put( - "root.sg2.t2.level", + "root.db.sg2.t2.level", SchemaRegionWritePlanFactory.getCreateTimeSeriesPlan( - new MeasurementPath("root.sg2.t2.level"), + new MeasurementPath("root.db.sg2.t2.level"), TSDataType.INT64, TSEncoding.RLE, CompressionType.UNCOMPRESSED, @@ -606,9 +607,9 @@ public void testComplicatedSnapshotParser() throws Exception { }, null)); planMap.put( - "root.sg2.t2.t1", + "root.db.sg2.t2.t1", SchemaRegionWritePlanFactory.getCreateAlignedTimeSeriesPlan( - new PartialPath("root.sg2.t2.t1"), + new PartialPath("root.db.sg2.t2.t1"), new ArrayList() { { add("temperature"); @@ -661,7 +662,7 @@ public void testComplicatedSnapshotParser() throws Exception { add(new HashMap<>()); } })); - for (ISchemaRegionPlan plan : planMap.values()) { + for (final ISchemaRegionPlan plan : planMap.values()) { if (plan instanceof ICreateTimeSeriesPlan) { schemaRegion.createTimeSeries((ICreateTimeSeriesPlan) plan, -1); } else if (plan instanceof ICreateAlignedTimeSeriesPlan) { @@ -671,11 +672,11 @@ public void testComplicatedSnapshotParser() throws Exception { } } - File snapshotDir = new File(config.getSchemaDir() + File.separator + "snapshot"); + final File snapshotDir = new File(config.getSchemaDir() + File.separator + "snapshot"); snapshotDir.mkdir(); schemaRegion.createSnapshot(snapshotDir); - SRStatementGenerator statements = + final SRStatementGenerator statements = SchemaRegionSnapshotParser.translate2Statements( Paths.get( config.getSchemaDir() @@ -692,10 +693,10 @@ public void testComplicatedSnapshotParser() throws Exception { databasePath); assert statements != null; int count = 0; - Comparator comparator = + final Comparator comparator = new Comparator() { @Override - public int compare(String o1, String o2) { + public int compare(final String o1, final String o2) { if (o1 == null && o2 == null) { return 0; } else if (o1 == null) { @@ -707,11 +708,11 @@ public int compare(String o1, String o2) { } } }; - for (Statement stmt : statements) { + for (final Statement stmt : statements) { if (stmt instanceof CreateAlignedTimeSeriesStatement) { - CreateAlignedTimeSeriesStatement createAlignedTimeSeriesStatement = + final CreateAlignedTimeSeriesStatement createAlignedTimeSeriesStatement = (CreateAlignedTimeSeriesStatement) stmt; - ICreateAlignedTimeSeriesPlan plan = + final ICreateAlignedTimeSeriesPlan plan = (ICreateAlignedTimeSeriesPlan) planMap.get(createAlignedTimeSeriesStatement.getDevicePath().toString()); Assert.assertNotNull(plan); diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartition.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartition.java index 1420b71b7f73..347a8363a928 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartition.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/DataPartition.java @@ -139,9 +139,9 @@ public List> getTimePartitionRange( } public List getDataRegionReplicaSetWithTimeFilter( - IDeviceID deviceId, Filter timeFilter) { - String storageGroup = getDatabaseNameByDevice(deviceId); - TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceId); + final IDeviceID deviceId, final Filter timeFilter) { + final String storageGroup = getDatabaseNameByDevice(deviceId); + final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceId); if (!dataPartitionMap.containsKey(storageGroup) || !dataPartitionMap.get(storageGroup).containsKey(seriesPartitionSlot)) { return Collections.singletonList(NOT_ASSIGNED); @@ -163,10 +163,8 @@ public List getDataRegionReplicaSetWithTimeFilter( *

The device id shall be [table, seg1, ....] */ public List getDataRegionReplicaSetWithTimeFilter( - String database, IDeviceID deviceId, Filter timeFilter) { - // TODO perfect this interface, @Potato - database = PathUtils.qualifyDatabaseName(database); - TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceId); + final String database, final IDeviceID deviceId, final Filter timeFilter) { + final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceId); if (!dataPartitionMap.containsKey(database) || !dataPartitionMap.get(database).containsKey(seriesPartitionSlot)) { return Collections.singletonList(NOT_ASSIGNED); @@ -181,20 +179,21 @@ public List getDataRegionReplicaSetWithTimeFilter( } public List getDataRegionReplicaSet( - IDeviceID deviceID, TTimePartitionSlot tTimePartitionSlot) { - String storageGroup = getDatabaseNameByDevice(deviceID); - Map>> dbMap = + final IDeviceID deviceID, final TTimePartitionSlot tTimePartitionSlot) { + final String storageGroup = getDatabaseNameByDevice(deviceID); + final Map>> dbMap = dataPartitionMap.get(storageGroup); if (dbMap == null) { return Collections.singletonList(NOT_ASSIGNED); } - TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); - Map> seriesSlotMap = dbMap.get(seriesPartitionSlot); + final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); + final Map> seriesSlotMap = + dbMap.get(seriesPartitionSlot); if (seriesSlotMap == null) { return Collections.singletonList(NOT_ASSIGNED); } - List regionReplicaSets = seriesSlotMap.get(tTimePartitionSlot); + final List regionReplicaSets = seriesSlotMap.get(tTimePartitionSlot); if (regionReplicaSets == null) { return Collections.singletonList(NOT_ASSIGNED); @@ -204,23 +203,25 @@ public List getDataRegionReplicaSet( } public List getDataRegionReplicaSetForWriting( - IDeviceID deviceID, List timePartitionSlotList, String databaseName) { + final IDeviceID deviceID, + final List timePartitionSlotList, + String databaseName) { if (databaseName == null) { databaseName = getDatabaseNameByDevice(deviceID); } // A list of data region replica sets will store data in a same time partition. // We will insert data to the last set in the list. // TODO return the latest dataRegionReplicaSet for each time partition - TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); + final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); // IMPORTANT TODO: (xingtanzjr) need to handle the situation for write operation that there are // more than 1 Regions for one timeSlot - List dataRegionReplicaSets = new ArrayList<>(); - Map>> - dataBasePartitionMap = dataPartitionMap.get(PathUtils.qualifyDatabaseName(databaseName)); - Map> slotReplicaSetMap = + final List dataRegionReplicaSets = new ArrayList<>(); + final Map>> + dataBasePartitionMap = dataPartitionMap.get(databaseName); + final Map> slotReplicaSetMap = dataBasePartitionMap.get(seriesPartitionSlot); - for (TTimePartitionSlot timePartitionSlot : timePartitionSlotList) { - List targetRegionList = slotReplicaSetMap.get(timePartitionSlot); + for (final TTimePartitionSlot timePartitionSlot : timePartitionSlotList) { + final List targetRegionList = slotReplicaSetMap.get(timePartitionSlot); if (targetRegionList == null || targetRegionList.isEmpty()) { throw new RuntimeException( String.format( @@ -234,17 +235,15 @@ public List getDataRegionReplicaSetForWriting( } public TRegionReplicaSet getDataRegionReplicaSetForWriting( - IDeviceID deviceID, TTimePartitionSlot timePartitionSlot, String databaseName) { + final IDeviceID deviceID, final TTimePartitionSlot timePartitionSlot, String databaseName) { // A list of data region replica sets will store data in a same time partition. // We will insert data to the last set in the list. // TODO return the latest dataRegionReplicaSet for each time partition - TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); - if (databaseName != null) { - databaseName = PathUtils.qualifyDatabaseName(databaseName); - } else { + final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); + if (databaseName == null) { databaseName = getDatabaseNameByDevice(deviceID); } - Map>> + final Map>> databasePartitionMap = dataPartitionMap.get(databaseName); if (databasePartitionMap == null) { throw new RuntimeException( @@ -252,7 +251,7 @@ public TRegionReplicaSet getDataRegionReplicaSetForWriting( + databaseName + " not exists and failed to create automatically because enable_auto_create_schema is FALSE."); } - List regions = + final List regions = databasePartitionMap.get(seriesPartitionSlot).get(timePartitionSlot); // IMPORTANT TODO: (xingtanzjr) need to handle the situation for write operation that there // are more than 1 Regions for one timeSlot diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/SchemaPartition.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/SchemaPartition.java index 609456b8ae05..3fc99857396b 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/SchemaPartition.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/partition/SchemaPartition.java @@ -81,12 +81,12 @@ public TRegionReplicaSet getSchemaRegionReplicaSet(String database, IDeviceID de } // [root, db, ....] - public TRegionReplicaSet getSchemaRegionReplicaSet(IDeviceID deviceID) { + public TRegionReplicaSet getSchemaRegionReplicaSet(final IDeviceID deviceID) { // A list of data region replica sets will store data in a same time partition. // We will insert data to the last set in the list. // TODO return the latest dataRegionReplicaSet for each time partition - String storageGroup = getStorageGroupByDevice(deviceID); - TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); + final String storageGroup = getStorageGroupByDevice(deviceID); + final TSeriesPartitionSlot seriesPartitionSlot = calculateDeviceGroupId(deviceID); if (schemaPartitionMap.get(storageGroup) == null) { throw new RuntimeException( new IoTDBException("Path does not exist. ", TSStatusCode.PATH_NOT_EXIST.getStatusCode())); @@ -94,8 +94,8 @@ public TRegionReplicaSet getSchemaRegionReplicaSet(IDeviceID deviceID) { return schemaPartitionMap.get(storageGroup).get(seriesPartitionSlot); } - private String getStorageGroupByDevice(IDeviceID deviceID) { - for (String storageGroup : schemaPartitionMap.keySet()) { + private String getStorageGroupByDevice(final IDeviceID deviceID) { + for (final String storageGroup : schemaPartitionMap.keySet()) { if (PathUtils.isStartWith(deviceID, storageGroup)) { return storageGroup; } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/path/PartialPath.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/path/PartialPath.java index 0a62938ff903..1b7163ca6004 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/path/PartialPath.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/path/PartialPath.java @@ -66,7 +66,7 @@ public class PartialPath extends Path implements Comparable, Cloneable { public PartialPath() {} - public PartialPath(IDeviceID device) throws IllegalPathException { + public PartialPath(final IDeviceID device) throws IllegalPathException { // the first segment is the table name, which may contain multiple levels String[] tableNameSegments = PathUtils.splitPathToDetachedNodes(device.getTableName()); nodes = new String[device.segmentNum() - 1 + tableNameSegments.length]; @@ -120,10 +120,15 @@ protected PartialPath(String device, String measurement) throws IllegalPathExcep /** * @param partialNodes nodes of a time series path */ - public PartialPath(String[] partialNodes) { + public PartialPath(final String[] partialNodes) { nodes = partialNodes; } + public static PartialPath getQualifiedDatabasePartialPath(final String database) + throws IllegalPathException { + return PartialPath.getDatabasePath(PathUtils.qualifyDatabaseName(database)); + } + /** * Get the database {@link PartialPath}. The tree model grammar shall call the {@link * #PartialPath(String)} first to trim the "`"s if it has only one "." after {@link @@ -149,7 +154,7 @@ public static PartialPath getDatabasePath(final String path) throws IllegalPathE * @param path path * @param needSplit whether to split path to nodes, needSplit can only be false. */ - public PartialPath(String path, boolean needSplit) { + public PartialPath(final String path, final boolean needSplit) { Validate.isTrue(!needSplit); fullPath = path; if ("".equals(path)) { diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/schema/column/ColumnHeaderConstant.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/schema/column/ColumnHeaderConstant.java index bf7977ff28b6..40b98cb4a8ea 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/schema/column/ColumnHeaderConstant.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/schema/column/ColumnHeaderConstant.java @@ -64,7 +64,6 @@ private ColumnHeaderConstant() { public static final String DATA_REGION_GROUP_NUM = "DataRegionGroupNum"; public static final String MIN_DATA_REGION_GROUP_NUM = "MinDataRegionGroupNum"; public static final String MAX_DATA_REGION_GROUP_NUM = "MaxDataRegionGroupNum"; - public static final String MODEL = "Model"; public static final String CHILD_PATHS = "ChildPaths"; public static final String NODE_TYPES = "NodeTypes"; public static final String CHILD_NODES = "ChildNodes"; @@ -295,8 +294,7 @@ private ColumnHeaderConstant() { new ColumnHeader(MAX_SCHEMA_REGION_GROUP_NUM, TSDataType.INT32), new ColumnHeader(DATA_REGION_GROUP_NUM, TSDataType.INT32), new ColumnHeader(MIN_DATA_REGION_GROUP_NUM, TSDataType.INT32), - new ColumnHeader(MAX_DATA_REGION_GROUP_NUM, TSDataType.INT32), - new ColumnHeader(MODEL, TSDataType.TEXT)); + new ColumnHeader(MAX_DATA_REGION_GROUP_NUM, TSDataType.INT32)); public static final List showChildPathsColumnHeaders = ImmutableList.of( @@ -566,7 +564,8 @@ private ColumnHeaderConstant() { new ColumnHeader(SCHEMA_REPLICATION_FACTOR, TSDataType.INT32), new ColumnHeader(DATA_REPLICATION_FACTOR, TSDataType.INT32), new ColumnHeader(TIME_PARTITION_INTERVAL, TSDataType.INT64), - new ColumnHeader(MODEL, TSDataType.TEXT)); + new ColumnHeader(SCHEMA_REGION_GROUP_NUM, TSDataType.INT32), + new ColumnHeader(DATA_REGION_GROUP_NUM, TSDataType.INT32)); public static final List describeTableColumnHeaders = ImmutableList.of( diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/PathUtils.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/PathUtils.java index 005003d9b250..2b3a7c3227c7 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/PathUtils.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/PathUtils.java @@ -178,11 +178,11 @@ public static String checkAndReturnSingleMeasurement(String measurement) } /** Return true if the str is a real number. Examples: 1.0; +1.0; -1.0; 0011; 011e3; +23e-3 */ - public static boolean isRealNumber(String str) { + public static boolean isRealNumber(final String str) { return PathVisitor.isRealNumber(str); } - public static boolean isStartWith(IDeviceID deviceID, String storageGroup) { + public static boolean isStartWith(final IDeviceID deviceID, final String storageGroup) { return deviceID.segmentNum() > 0 && deviceID.matchDatabaseName(storageGroup); } @@ -220,4 +220,8 @@ public static String unQualifyDatabaseName(String databaseName) { } return databaseName; } + + public static boolean isTableModelDatabase(final String databaseName) { + return !databaseName.startsWith("root."); + } } diff --git a/iotdb-protocol/thrift-confignode/src/main/thrift/confignode.thrift b/iotdb-protocol/thrift-confignode/src/main/thrift/confignode.thrift index a157cb724a1c..5f090003119d 100644 --- a/iotdb-protocol/thrift-confignode/src/main/thrift/confignode.thrift +++ b/iotdb-protocol/thrift-confignode/src/main/thrift/confignode.thrift @@ -217,7 +217,6 @@ struct TDatabaseSchema { // Schema struct TSchemaPartitionReq { 1: required binary pathPatternTree - 2: optional bool isTableModel } struct TSchemaPartitionTableResp { @@ -605,12 +604,12 @@ struct TDatabaseInfo { 10: required i32 minDataRegionNum 11: required i32 maxDataRegionNum 12: optional i64 timePartitionOrigin - 13: optional bool isTableModel } struct TGetDatabaseReq { 1: required list databasePathPattern 2: required binary scopePatternTree + 3: optional bool isTableModel } struct TShowDatabaseResp { @@ -623,6 +622,7 @@ struct TShowDatabaseResp { struct TShowRegionReq { 1: optional common.TConsensusGroupType consensusGroupType; 2: optional list databases + 3: optional bool isTableModel } struct TRegionInfo {