Skip to content

Commit

Permalink
[BugFix] limit mv with window function (#29325)
Browse files Browse the repository at this point in the history
* limit mv with window function

Signed-off-by: ABingHuang <[email protected]>
(cherry picked from commit c43bdfe)

# Conflicts:
#	fe/fe-core/src/main/java/com/starrocks/sql/analyzer/MaterializedViewAnalyzer.java
  • Loading branch information
ABingHuang authored and mergify[bot] committed Aug 24, 2023
1 parent 17342f9 commit a4ead63
Show file tree
Hide file tree
Showing 2 changed files with 75 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.starrocks.analysis.AnalyticExpr;
import com.starrocks.analysis.Expr;
import com.starrocks.analysis.FunctionCallExpr;
import com.starrocks.analysis.IntLiteral;
Expand Down Expand Up @@ -272,6 +273,7 @@ public Void visitCreateMaterializedViewStatement(CreateMaterializedViewStatement
checkPartitionExpPatterns(statement);
// check partition column must be base table's partition column
checkPartitionColumnWithBaseTable(statement, aliasTableMap);
checkWindowFunctions(statement, columnExprMap);
}
// check and analyze distribution
checkDistribution(statement, aliasTableMap);
Expand Down Expand Up @@ -771,6 +773,39 @@ private void checkPartitionColumnWithBaseHMSTable(SlotRef slotRef, HiveMetaStore
}
}

<<<<<<< HEAD
=======
private void checkPartitionColumnWithBaseHMSTable(SlotRef slotRef, HiveMetaStoreTable table) {
checkPartitionColumnWithBaseTable(slotRef, table.getPartitionColumns(), table.isUnPartitioned());
}

private void checkPartitionColumnWithBaseJDBCTable(SlotRef slotRef, JDBCTable table) {
checkPartitionColumnWithBaseTable(slotRef, table.getPartitionColumns(), table.isUnPartitioned());
}

// if mv is partitioned, mv will be refreshed by partition.
// if mv has window functions, it should also be partitioned by and the partition by columns
// should contain the partition column of mv
private void checkWindowFunctions(
CreateMaterializedViewStatement statement,
Map<Column, Expr> columnExprMap) {
SlotRef partitionSlotRef = getSlotRef(statement.getPartitionRefTableExpr());
// should analyze the partition expr to get type info
PartitionExprAnalyzer.analyzePartitionExpr(statement.getPartitionRefTableExpr(), partitionSlotRef);
for (Expr columnExpr : columnExprMap.values()) {
if (columnExpr instanceof AnalyticExpr) {
AnalyticExpr analyticExpr = columnExpr.cast();
if (analyticExpr.getPartitionExprs() == null
|| !analyticExpr.getPartitionExprs().contains(statement.getPartitionRefTableExpr())) {
throw new SemanticException("window function %s ’s partition expressions" +
" should contain the partition column %s of materialized view",
analyticExpr.getFnCall().getFnName().getFunction(), statement.getPartitionColumn().getName());
}
}
}
}

>>>>>>> c43bdfe123 ([BugFix] limit mv with window function (#29325))
private void checkPartitionColumnWithBaseIcebergTable(SlotRef slotRef, IcebergTable table) {
org.apache.iceberg.Table icebergTable = table.getNativeTable();
PartitionSpec partitionSpec = icebergTable.spec();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,4 +125,44 @@ public void testCreateMvWithNotExistResourceGroup() {
Assert.assertThrows("resource_group not_exist_rg does not exist.",
DdlException.class, () -> starRocksAssert.useDatabase("test").withMaterializedView(sql));
}

@Test
public void testCreateMvWithWindowFunction() throws Exception {
{
String mvSql = "create materialized view window_mv_1\n" +
"partition by date_trunc('month', k1)\n" +
"distributed by hash(k2)\n" +
"refresh manual\n" +
"as\n" +
"select \n" +
"\tk2, k1, row_number() over (partition by date_trunc('month', k1) order by k2)\n" +
"from tbl1 \n";
starRocksAssert.useDatabase("test").withMaterializedView(mvSql);
}

{
String mvSql = "create materialized view window_mv_2\n" +
"partition by k1\n" +
"distributed by hash(k2)\n" +
"refresh manual\n" +
"as\n" +
"select \n" +
"\tk2, k1, row_number() over (partition by k1 order by k2)\n" +
"from tbl1 \n";
starRocksAssert.useDatabase("test").withMaterializedView(mvSql);
}

{
String mvSql = "create materialized view window_mv_3\n" +
"partition by k1\n" +
"distributed by hash(k2)\n" +
"refresh manual\n" +
"as\n" +
"select \n" +
"\tk2, k1, row_number() over (order by k2)\n" +
"from tbl1 \n";
analyzeFail(mvSql, "Detail message: window function row_number ’s partition expressions" +
" should contain the partition column k1 of materialized view");
}
}
}

0 comments on commit a4ead63

Please sign in to comment.