Calcite 自定义优化器规则

2023-11-01 12:52

本文主要是介绍Calcite 自定义优化器规则,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!

1)总结
1.创建 CSVProjectRule 继承 RelRule<CSVProjectRule.Config>
a)在 CSVProjectRule.Config 接口中实现匹配规则
Config DEFAULT = EMPTY.withOperandSupplier(b0 ->b0.operand(LogicalProject.class).anyInputs()).as(Config.class);
b)在 CSVProjectRule 实现类中,如果匹配上了规则,则进行转换
 		@Overridepublic void onMatch(RelOptRuleCall call) {final LogicalProject project = call.rel(0);final RelNode converted = convert(project);if (converted != null) {call.transformTo(converted);}}------------------------------------------------public RelNode convert(RelNode rel) {final LogicalProject project = (LogicalProject) rel;final RelTraitSet traitSet = project.getTraitSet();return new CSVProject(project.getCluster(), traitSet,project.getInput(), project.getProjects(),project.getRowType());}
2.创建转换后的RelNode 即 CSVProject
2)代码示例

CSVProjectRule

package cn.com.ptpress.cdm.optimization.RelBuilder.optimizer;import cn.com.ptpress.cdm.optimization.RelBuilder.csvRelNode.CSVProject;
import org.apache.calcite.plan.RelOptRuleCall;
import org.apache.calcite.plan.RelRule;
import org.apache.calcite.plan.RelTraitSet;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.logical.LogicalProject;public class CSVProjectRule  extends RelRule<CSVProjectRule.Config> {@Overridepublic void onMatch(RelOptRuleCall call) {final LogicalProject project = call.rel(0);final RelNode converted = convert(project);if (converted != null) {call.transformTo(converted);}}/** Rule configuration. */public interface Config extends RelRule.Config {Config DEFAULT = EMPTY.withOperandSupplier(b0 ->b0.operand(LogicalProject.class).anyInputs()).as(Config.class);@Override default CSVProjectRule toRule() {return new CSVProjectRule(this);}}private CSVProjectRule(Config config) {super(config);}public RelNode convert(RelNode rel) {final LogicalProject project = (LogicalProject) rel;final RelTraitSet traitSet = project.getTraitSet();return new CSVProject(project.getCluster(), traitSet,project.getInput(), project.getProjects(),project.getRowType());}
}

CSVProjectRuleWithCost

package cn.com.ptpress.cdm.optimization.RelBuilder.optimizer;import cn.com.ptpress.cdm.optimization.RelBuilder.csvRelNode.CSVProject;
import cn.com.ptpress.cdm.optimization.RelBuilder.csvRelNode.CSVProjectWithCost;
import org.apache.calcite.plan.RelOptRuleCall;
import org.apache.calcite.plan.RelRule;
import org.apache.calcite.plan.RelTraitSet;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.logical.LogicalProject;public class CSVProjectRuleWithCost extends RelRule<CSVProjectRuleWithCost.Config> {@Overridepublic void onMatch(RelOptRuleCall call) {final LogicalProject project = call.rel(0);final RelNode converted = convert(project);if (converted != null) {call.transformTo(converted);}}/** Rule configuration. */public interface Config extends RelRule.Config {Config DEFAULT = EMPTY.withOperandSupplier(b0 ->b0.operand(LogicalProject.class).anyInputs()).as(Config.class);@Override default CSVProjectRuleWithCost toRule() {return new CSVProjectRuleWithCost(this);}}private CSVProjectRuleWithCost(Config config) {super(config);}public RelNode convert(RelNode rel) {final LogicalProject project = (LogicalProject) rel;final RelTraitSet traitSet = project.getTraitSet();return new CSVProjectWithCost(project.getCluster(), traitSet,project.getInput(), project.getProjects(),project.getRowType());}
}

CSVProject

package cn.com.ptpress.cdm.optimization.RelBuilder.csvRelNode;import com.google.common.collect.ImmutableList;
import org.apache.calcite.plan.RelOptCluster;
import org.apache.calcite.plan.RelOptCost;
import org.apache.calcite.plan.RelOptPlanner;
import org.apache.calcite.plan.RelTraitSet;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.core.Project;
import org.apache.calcite.rel.metadata.RelMetadataQuery;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.rex.RexNode;import java.util.List;public class CSVProject extends Project {public CSVProject(RelOptCluster cluster, RelTraitSet traits, RelNode input, List<? extends RexNode> projects, RelDataType rowType) {super(cluster,traits, ImmutableList.of(),input,projects,rowType);}@Overridepublic Project copy(RelTraitSet traitSet, RelNode input, List<RexNode> projects, RelDataType rowType) {return new CSVProject(getCluster(),traitSet,input,projects,rowType);}@Overridepublic RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) {return planner.getCostFactory().makeZeroCost();}
}

CSVProjectWithCost

package cn.com.ptpress.cdm.optimization.RelBuilder.csvRelNode;import com.google.common.collect.ImmutableList;
import org.apache.calcite.plan.RelOptCluster;
import org.apache.calcite.plan.RelOptCost;
import org.apache.calcite.plan.RelOptPlanner;
import org.apache.calcite.plan.RelTraitSet;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.core.Project;
import org.apache.calcite.rel.metadata.RelMetadataQuery;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.rex.RexNode;import java.util.List;public class CSVProjectWithCost extends Project{public CSVProjectWithCost(RelOptCluster cluster, RelTraitSet traits, RelNode input, List<? extends RexNode> projects, RelDataType rowType) {super(cluster,traits, ImmutableList.of(),input,projects,rowType);}@Overridepublic Project copy(RelTraitSet traitSet, RelNode input, List<RexNode> projects, RelDataType rowType) {return new CSVProjectWithCost(getCluster(),traitSet,input,projects,rowType);}@Overridepublic RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) {return planner.getCostFactory().makeInfiniteCost();}
}

SqlToRelNode

package cn.com.ptpress.cdm.optimization.RelBuilder.Utils;import cn.com.ptpress.cdm.ds.csv.CsvSchema;
import org.apache.calcite.config.CalciteConnectionConfigImpl;
import org.apache.calcite.config.CalciteConnectionProperty;
import org.apache.calcite.jdbc.CalciteSchema;
import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
import org.apache.calcite.prepare.CalciteCatalogReader;
import org.apache.calcite.schema.SchemaPlus;
import org.apache.calcite.sql.parser.SqlParser;
import org.apache.calcite.tools.Frameworks;import java.util.Properties;public class CatalogReaderUtil {public static CalciteCatalogReader createCatalogReader(SqlParser.Config parserConfig) {SchemaPlus rootSchema = Frameworks.createRootSchema(true);rootSchema.add("csv", new CsvSchema("data.csv"));return createCatalogReader(parserConfig, rootSchema);}public static CalciteCatalogReader createCatalogReader(SqlParser.Config parserConfig, SchemaPlus rootSchema) {Properties prop = new Properties();prop.setProperty(CalciteConnectionProperty.CASE_SENSITIVE.camelName(),String.valueOf(parserConfig.caseSensitive()));CalciteConnectionConfigImpl calciteConnectionConfig = new CalciteConnectionConfigImpl(prop);return new CalciteCatalogReader(CalciteSchema.from(rootSchema),CalciteSchema.from(rootSchema).path("csv"),new JavaTypeFactoryImpl(),calciteConnectionConfig);}
}

CatalogReaderUtil

package cn.com.ptpress.cdm.optimization.RelBuilder.Utils;import cn.com.ptpress.cdm.ds.csv.CsvSchema;
import org.apache.calcite.config.CalciteConnectionConfigImpl;
import org.apache.calcite.config.CalciteConnectionProperty;
import org.apache.calcite.jdbc.CalciteSchema;
import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
import org.apache.calcite.prepare.CalciteCatalogReader;
import org.apache.calcite.schema.SchemaPlus;
import org.apache.calcite.sql.parser.SqlParser;
import org.apache.calcite.tools.Frameworks;import java.util.Properties;public class CatalogReaderUtil {public static CalciteCatalogReader createCatalogReader(SqlParser.Config parserConfig) {SchemaPlus rootSchema = Frameworks.createRootSchema(true);rootSchema.add("csv", new CsvSchema("data.csv"));return createCatalogReader(parserConfig, rootSchema);}public static CalciteCatalogReader createCatalogReader(SqlParser.Config parserConfig, SchemaPlus rootSchema) {Properties prop = new Properties();prop.setProperty(CalciteConnectionProperty.CASE_SENSITIVE.camelName(),String.valueOf(parserConfig.caseSensitive()));CalciteConnectionConfigImpl calciteConnectionConfig = new CalciteConnectionConfigImpl(prop);return new CalciteCatalogReader(CalciteSchema.from(rootSchema),CalciteSchema.from(rootSchema).path("csv"),new JavaTypeFactoryImpl(),calciteConnectionConfig);}
}

PlannerTest

import cn.com.ptpress.cdm.optimization.RelBuilder.Utils.SqlToRelNode;
import cn.com.ptpress.cdm.optimization.RelBuilder.optimizer.CSVProjectRule;
import cn.com.ptpress.cdm.optimization.RelBuilder.optimizer.CSVProjectRuleWithCost;
import org.apache.calcite.plan.RelOptPlanner;
import org.apache.calcite.plan.RelOptUtil;
import org.apache.calcite.plan.hep.HepPlanner;
import org.apache.calcite.plan.hep.HepProgram;
import org.apache.calcite.plan.hep.HepProgramBuilder;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.rules.FilterJoinRule;
import org.apache.calcite.sql.parser.SqlParseException;
import org.junit.jupiter.api.Test;class PlannerTest {@Testpublic void testCustomRule() throws SqlParseException {final String sql = "select Id from data ";HepProgramBuilder programBuilder = HepProgram.builder();// 测试交换 CSVProjectRule 和 CSVProjectRuleWithCost 的顺序HepPlanner hepPlanner =new HepPlanner(programBuilder.addRuleInstance(CSVProjectRule.Config.DEFAULT.toRule()).addRuleInstance(CSVProjectRuleWithCost.Config.DEFAULT.toRule()).build());//        HepPlanner hepPlanner =
//                new HepPlanner(
//                        programBuilder
//                                .addRuleInstance(CSVProjectRuleWithCost.Config.DEFAULT.toRule())
//                                .addRuleInstance(CSVProjectRule.Config.DEFAULT.toRule())
//                                .build());RelNode relNode = SqlToRelNode.getSqlNode(sql, hepPlanner);System.out.println(RelOptUtil.toString(relNode));RelOptPlanner planner = relNode.getCluster().getPlanner();planner.setRoot(relNode);RelNode bestExp = planner.findBestExp();System.out.println(RelOptUtil.toString(bestExp));RelOptPlanner relOptPlanner = relNode.getCluster().getPlanner();relOptPlanner.addRule(CSVProjectRule.Config.DEFAULT.toRule());relOptPlanner.addRule(CSVProjectRuleWithCost.Config.DEFAULT.toRule());relOptPlanner.setRoot(relNode);RelNode exp = relOptPlanner.findBestExp();System.out.println(RelOptUtil.toString(exp));}/*** 未优化算子树结构* LogicalProject(ID=[$0])*   LogicalFilter(condition=[>(CAST($0):INTEGER NOT NULL, 1)])*     LogicalJoin(condition=[=($0, $3)], joinType=[inner])*       LogicalTableScan(table=[[csv, data]])*       LogicalTableScan(table=[[csv, data]])** 优化后接结果* LogicalProject(ID=[$0])*   LogicalJoin(condition=[=($0, $3)], joinType=[inner])*     LogicalFilter(condition=[>(CAST($0):INTEGER NOT NULL, 1)])*       LogicalTableScan(table=[[csv, data]])*     LogicalTableScan(table=[[csv, data]])*/@Testpublic void testHepPlanner() throws SqlParseException {final String sql = "select a.Id from data as a join data b on a.Id = b.Id where a.Id>1";HepProgramBuilder programBuilder = HepProgram.builder();HepPlanner hepPlanner =new HepPlanner(programBuilder.addRuleInstance(FilterJoinRule.FilterIntoJoinRule.Config.DEFAULT.toRule()).build());RelNode relNode = SqlToRelNode.getSqlNode(sql, hepPlanner);//未优化算子树结构System.out.println(RelOptUtil.toString(relNode));RelOptPlanner planner = relNode.getCluster().getPlanner();planner.setRoot(relNode);RelNode bestExp = planner.findBestExp();//优化后接结果System.out.println(RelOptUtil.toString(bestExp));}/*** 未转化Dag算子树结构* LogicalProject(Id=[$0], Name=[$1], Score=[$2])*   LogicalFilter(condition=[=(CAST($0):INTEGER NOT NULL, 1)])*     LogicalTableScan(table=[[csv, data]])** 转化为Dag图* Breadth-first from root:  {*     rel#8:HepRelVertex(rel#7:LogicalProject.(input=HepRelVertex#6,inputs=0..2)) = rel#7:LogicalProject.(input=HepRelVertex#6,inputs=0..2), rowcount=15.0, cumulative cost=130.0*     rel#6:HepRelVertex(rel#5:LogicalFilter.(input=HepRelVertex#4,condition==(CAST($0):INTEGER NOT NULL, 1))) = rel#5:LogicalFilter.(input=HepRelVertex#4,condition==(CAST($0):INTEGER NOT NULL, 1)), rowcount=15.0, cumulative cost=115.0*     rel#4:HepRelVertex(rel#1:LogicalTableScan.(table=[csv, data])) = rel#1:LogicalTableScan.(table=[csv, data]), rowcount=100.0, cumulative cost=100.0* }*/@Testpublic void testGraph() throws SqlParseException {final String sql = "select * from data where Id=1";HepProgramBuilder programBuilder = HepProgram.builder();HepPlanner hepPlanner =new HepPlanner(programBuilder.build());RelNode relNode = SqlToRelNode.getSqlNode(sql, hepPlanner);//未转化Dag算子树结构System.out.println("未转化Dag算子树结构");System.out.println(RelOptUtil.toString(relNode));//转化为Dag图System.out.println("转化为Dag图");hepPlanner.setRoot(relNode);//查看需要把log4j.properties级别改为trace}
}

data.csv

Id:VARCHAR Name:VARCHAR Score:INTEGER
1,小明,90
2,小红,98
3,小亮,95

这篇关于Calcite 自定义优化器规则的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!



http://www.chinasem.cn/article/323127

相关文章

MyBatisPlus如何优化千万级数据的CRUD

《MyBatisPlus如何优化千万级数据的CRUD》最近负责的一个项目,数据库表量级破千万,每次执行CRUD都像走钢丝,稍有不慎就引起数据库报警,本文就结合这个项目的实战经验,聊聊MyBatisPl... 目录背景一、MyBATis Plus 简介二、千万级数据的挑战三、优化 CRUD 的关键策略1. 查

Java实现自定义table宽高的示例代码

《Java实现自定义table宽高的示例代码》在桌面应用、管理系统乃至报表工具中,表格(JTable)作为最常用的数据展示组件,不仅承载对数据的增删改查,还需要配合布局与视觉需求,而JavaSwing... 目录一、项目背景详细介绍二、项目需求详细介绍三、相关技术详细介绍四、实现思路详细介绍五、完整实现代码

一文详解Java Stream的sorted自定义排序

《一文详解JavaStream的sorted自定义排序》Javastream中的sorted方法是用于对流中的元素进行排序的方法,它可以接受一个comparator参数,用于指定排序规则,sorte... 目录一、sorted 操作的基础原理二、自定义排序的实现方式1. Comparator 接口的 Lam

Redis分片集群、数据读写规则问题小结

《Redis分片集群、数据读写规则问题小结》本文介绍了Redis分片集群的原理,通过数据分片和哈希槽机制解决单机内存限制与写瓶颈问题,实现分布式存储和高并发处理,但存在通信开销大、维护复杂及对事务支持... 目录一、分片集群解android决的问题二、分片集群图解 分片集群特征如何解决的上述问题?(与哨兵模

C++作用域和标识符查找规则详解

《C++作用域和标识符查找规则详解》在C++中,作用域(Scope)和标识符查找(IdentifierLookup)是理解代码行为的重要概念,本文将详细介绍这些规则,并通过实例来说明它们的工作原理,需... 目录作用域标识符查找规则1. 普通查找(Ordinary Lookup)2. 限定查找(Qualif

如何自定义一个log适配器starter

《如何自定义一个log适配器starter》:本文主要介绍如何自定义一个log适配器starter的问题,具有很好的参考价值,希望对大家有所帮助,如有错误或未考虑完全的地方,望不吝赐教... 目录需求Starter 项目目录结构pom.XML 配置LogInitializer实现MDCInterceptor

Nginx Location映射规则总结归纳与最佳实践

《NginxLocation映射规则总结归纳与最佳实践》Nginx的location指令是配置请求路由的核心机制,其匹配规则直接影响请求的处理流程,下面给大家介绍NginxLocation映射规则... 目录一、Location匹配规则与优先级1. 匹配模式2. 优先级顺序3. 匹配示例二、Proxy_pa

Druid连接池实现自定义数据库密码加解密功能

《Druid连接池实现自定义数据库密码加解密功能》在现代应用开发中,数据安全是至关重要的,本文将介绍如何在​​Druid​​连接池中实现自定义的数据库密码加解密功能,有需要的小伙伴可以参考一下... 目录1. 环境准备2. 密码加密算法的选择3. 自定义 ​​DruidDataSource​​ 的密码解密3

spring-gateway filters添加自定义过滤器实现流程分析(可插拔)

《spring-gatewayfilters添加自定义过滤器实现流程分析(可插拔)》:本文主要介绍spring-gatewayfilters添加自定义过滤器实现流程分析(可插拔),本文通过实例图... 目录需求背景需求拆解设计流程及作用域逻辑处理代码逻辑需求背景公司要求,通过公司网络代理访问的请求需要做请

Nginx路由匹配规则及优先级详解

《Nginx路由匹配规则及优先级详解》Nginx作为一个高性能的Web服务器和反向代理服务器,广泛用于负载均衡、请求转发等场景,在配置Nginx时,路由匹配规则是非常重要的概念,本文将详细介绍Ngin... 目录引言一、 Nginx的路由匹配规则概述二、 Nginx的路由匹配规则类型2.1 精确匹配(=)2