[MySQL5.6] 一个简单的optimizer_trace示例
前面已经介绍了如何使用和配置MySQL5.6中optimizer_trace(点击博客),本篇我们以一个相对简单的例子来跟踪optimizer_trace的产生过程。
mysql> show create table sbtest1G *************************** 1. row *************************** Table: sbtest1 Create Table: CREATE TABLE `sbtest1` ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, `k` int(10) unsigned NOT NULL DEFAULT '0', `c` char(120) NOT NULL DEFAULT '', `pad` char(60) NOT NULL DEFAULT '', `d` int(11) DEFAULT NULL, PRIMARY KEY (`id`), KEY `k` (`k`,`d`) ) ENGINE=InnoDB AUTO_INCREMENT=1000001 DEFAULT CHARSET=gbk MAX_ROWS=1000000 1 row in set (0.00 sec)
mysql> explain select * from sbtest1 where k >= 160000 and k < 320000 order by pad limit 3; +----+-------------+---------+-------+---------------+------+---------+------+------+---------------------------------------+ | id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra | +----+-------------+---------+-------+---------------+------+---------+------+------+---------------------------------------+ | 1 | SIMPLE | sbtest1 | range | k | k | 4 | NULL | 3660 | Using index condition; Using filesort | +----+-------------+---------+-------+---------------+------+---------+------+------+---------------------------------------+ 1 row in set (0.00 sec)
SELECT_LEX_UNIT *unit= &lex->unit;
unit->set_limit(unit->global_parameters);
/*
‘options’ of mysql_select will be set in JOIN, as far as JOIN for
every PS/SP execution new, we will not need reset this flag if
setup_tables_done_option changed for next rexecution
*/
res= mysql_select(thd,
select_lex->table_list.first,
select_lex->with_wild, select_lex->item_list,
select_lex->where,
&select_lex->order_list,
&select_lex->group_list,
select_lex->having,
select_lex->options | thd->variables.option_bits |
setup_tables_done_option,
result, unit, select_lex); |
##############################
初始化limit 是否包含”*”, item_list表示select的项的个数 where 条件 order by的item列表 group by的item列表 having子句 |
117 Opt_trace_context * const trace= &thd->opt_trace; 118 Opt_trace_object trace_wrapper(trace); 119 Opt_trace_object trace_prepare(trace, "join_preparation"); 120 trace_prepare.add_select_number(select_lex->select_number); 121 Opt_trace_array trace_steps(trace, "steps");
-
Initialization and linking
JOIN
structure tost_select_lex
. -
fix_fields()
for all items (afterfix_fields()
, we know everything about item). -
Moving
HAVING
toWHERE
if possible. -
Initialization procedure if there is one.
“join_preparation”: {
“select#”: 1,
“steps”: [
{
“expanded_query”: “/* select#1 */ select `sbtest1`.`id` AS `id`,`sbtest1`.`k` AS `k`,`sbtest1`.`c` AS `c`,`sbtest1`.`pad` AS `pad`,`sbtest1`.`d` AS `d` from `sbtest1` where ((`sbtest1`.`k` >= 160000) and (`sbtest1`.`k` < 320000)) order by `sbtest1`.`pad` limit 3″
}
]
} |
#################################################
quoted code in JOIN::prepare 214 { 215 Opt_trace_object trace_wrapper(trace); 216 opt_trace_print_expanded_query(thd, select_lex, &trace_wrapper); 217 } 这里 * 被扩展成了具体的列; select#”: 1 表示这个SQL中的第一个select,每个select都有一个编号来标示;如果存在多个的话,就有多个扩展开的select |
从打印的信息来看可以明显的看出是对where条件的优化,调用函数为optimize_cond。不过在调用该函数之前,也有一些其他的工作需要做:
a1.对where条件中的子查询进行处理
我们知道在5.6里已经对类似select … where id in (select…)这样的子查询做了优化,将其转化成了semi-join操作,这一步发生在函数flatten_subqueries()中;这里实际上是一个递归调用函数flatten_subqueries的步骤,因为子查询中也可能存在子查询;
6897 (*subq)->sj_convert_priority= 6898 (((*subq)->unit->uncacheable & UNCACHEABLE_DEPENDENT) ? MAX_TABLES : 0) + 6899 child_join->tables;
"join_optimization": { "select#": 1, "steps": [ { "transformation": { "select#": 2, "from": "IN (SELECT)", "to": "semijoin", "chosen": true, "evaluating_constant_semijoin_conditions": [ ] } }, { "transformations_to_nested_joins": { "transformations": [ "semijoin" ], "expanded_query": "/* select#1 */ select `sbtest1`.`id` AS `id`,`sbtest1`.`k` AS `k`,`sbtest1`.`c` AS `c`,`sbtest1`.`pad` AS `pad`,`sbtest1`.`d` AS `d` from `sbtest1` semi join (`sbtest1`) where (1 and (`sbtest1`.`id` = 100) and (`sbtest1`.`id` = `sbtest1`.`id`))" } },
a2).优化衍生表
if (select_lex->handle_derived(thd->lex, &mysql_derived_optimize))
mysql> show create table t1G *************************** 1. row *************************** Table: t1 Create Table: CREATE TABLE `t1` ( `a` int(11) NOT NULL AUTO_INCREMENT, `b` int(11) DEFAULT NULL, `c` int(11) DEFAULT NULL, PRIMARY KEY (`a`) ) ENGINE=InnoDB AUTO_INCREMENT=8179 DEFAULT CHARSET=latin1 1 row in set (0.00 sec)
{ "transformations_to_nested_joins": { "transformations": [ "outer_join_to_inner_join", "JOIN_condition_to_WHERE", "parenthesis_removal" ], "expanded_query": "/* select#1 */ select `t1`.`a` AS `a`,`t1`.`b` AS `b`,`t1`.`c` AS `c`,`t2`.`a` AS `a`,`t2`.`b` AS `b`,`t2`.`c` AS `c` from `t1` join `t2` where ((`t2`.`b` < 1000) and (`t1`.`a` = `t2`.`a`))" } },
LEFT JOIN t3 ON t3.b=t2.b
WHERE t3 IS NOT NULL
SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t1.a, t3
WHERE t3 IS NOT NULL AND t3.b=t2.b =>
SELECT * FROM t1, t2, t3
WHERE t3 IS NOT NULL AND t3.b=t2.b AND t2.a=t1.a
234 conds= optimize_cond(thd, conds, &cond_equal, 235 join_list, true, &select_lex->cond_value);
“join_optimization”: {
“select#”: 1, “steps”: [ { “condition_processing“: { “condition”: “WHERE”, “original_condition”: “((`sbtest1`.`k` >= 160000) and (`sbtest1`.`k` < 320000))”, “steps”: [ { “transformation”: “equality_propagation“, “resulting_condition”: “((`sbtest1`.`k` >= 160000) and (`sbtest1`.`k` < 320000))” }, { “transformation”: “constant_propagation“, “resulting_condition”: “((`sbtest1`.`k` >= 160000) and (`sbtest1`.`k` < 320000))” }, { “transformation”: “trivial_condition_removal“, “resulting_condition”: “((`sbtest1`.`k` >= 160000) and (`sbtest1`.`k` < 320000))” } ] } }, |
###########################################
关于where条件的优化,包括几个部分:等价传递(equality_propagation): 例如,对于这样的查询条件: ….where a = 12 and b = c and a = b; 会被转换为: (multiple equal(12, `t1`.`a`, `t1`.`b`, `t1`.`c`)) ….where b > a and a = 2 会被转换为: ((`t1`.`b` > 2) and multiple equal(2, `t1`.`a`)) 常量传递(constant_propagation) 根据注释,如果可以的话,将filed=filed转换为field=const 暂时没有构造出样例,大部分常数传递在第一步就完成了… TODO:阅读函数propagate_cond_constants 一个样例(suite/opt_trace/r/general_no_prot_all.result) 例如 ….where 1 = 0 or a > 500 在这一步,会被转换为: (`t1`.`a` > 500) |
245 having= optimize_cond(thd, having, &cond_equal, join_list, false, 246 &select_lex->having_value);
{
“table_dependencies”: [ { “table”: “`sbtest1`”, “row_may_be_null”: false, “map_bit”: 0, “depends_on_map_bits”: [ ] } ] }, |
#################
表示join操作中依赖的表; 这里只有一个sbtest表,因此map_bit为0,map_bit可以认为是join的表的编号,从0开始打印该信息的函数: make_join_statistics->trace_table_dependencies |
{
“ref_optimizer_key_uses”: [ ] }, |
################
当使用二级索引的ref查询,存在索引上的等值表达式,这里会打印可用的key,如下例所示: create table t4 (a int , b int ,c int, d int, key (a), key(b,c)); // insert some rows… select * from t4 where b =577 and c = 25 and d > 1000; 那么在ref_optimizer_key_uses这一栏就会打印: { “table”: “`t4`”, “field”: “b”, “equals”: “577”, “null_rejecting”: false }, { “table”: “`t4`”, “field”: “c”, “equals”: “25”, “null_rejecting”: false }再例如: select * from t4 where a = 1000 and b > 577 and c = 1000; 则打印的信息为: { “table”: “`t4`”,
“field”: “a”,
“equals”: “1000”,
“null_rejecting”: false
} 显然这条SQL使用ref类型的索引,只能使用索引键值a 如果where条件修改成 a = 1000 and b = 577 and c > 1000,就会打印: { “table”: “`t4`”,
“field”: “a”,
“equals”: “1000”,
“null_rejecting”: false
},
{
“table”: “`t4`”,
“field”: “b”,
“equals”: “577”,
“null_rejecting”: false
} 这里的示例SQL并没有使用到索引的等值条件,所以这里为空 如下函数会被调用到 make_join_statistics ->update_ref_and_keys -> print_keyuse_array update_ref_and_keys是这部分逻辑的主要函数,它会根据条件,找出可以利用索引的key值,存在keyuse数组中;另外也会根据条件设置表的possible key |
{
“rows_estimation”: [ { “table”: “`sbtest1`”, “range_analysis“: { “table_scan“: { “rows”: 986190, “cost”: 211174 }, “potential_range_indices”: [ { “index”: “PRIMARY”, “usable”: false, “cause”: “not_applicable” }, { “index”: “k”, “usable”: true, “key_parts”: [ “k”, “d”, “id” ] } ], “setup_range_conditions“: [ ], “group_index_range”: { “chosen”: false, “cause”: “not_group_by_or_distinct” }, “analyzing_range_alternatives”: { “range_scan_alternatives”: [ { “index”: “k”, “ranges”: [ “160000 <= k < 320000″ ], “index_dives_for_eq_ranges”: true, “rowid_ordered”: false, “using_mrr”: false, “index_only”: false, “rows”: 3660, “cost”: 4393, “chosen”: true } ], “analyzing_roworder_intersect”: { “usable”: false, “cause”: “too_few_roworder_scans” } }, “chosen_range_access_summary”: { “range_access_plan”: { “type”: “range_scan”, “index”: “k”, “rows”: 3660, “ranges”: [ “160000 <= k < 320000″ ] }, “rows_for_plan”: 3660, “cost_for_plan”: 4393, “chosen”: true } } } ] }, |
###################################################rows_estimation:开始估算需要扫描的记录数
这里会针对JOIN中每个涉及到的表进行计算,示例中只有sbtest1,因此只对 sbtest1表及其上的索引; a. 计算该表上的worst seek: 3623 s->worst_seeks= min((double) s->found_records / 10, 3624 (double) s->read_time * 3); 其中s->found_records表示表上的记录数,s->read_time在innodb层表示聚集索引page数 b.检查是否有索引可以用于GROUP BY 或者 DISTINCT查询 add_group_and_distinct_keys(join, s); 如果查询有group by,找到所有包含全部group by列的索引,并将这些索引加入到join_tab->const_keys和join_tab->keys中 如果查询有distinct,同样找出包含全部select列的索引,并加入到join_tab->const_keys和join_tab->keys中 这里可能产生trace输出,例如如下查询(b 为索引列): select * from t1 where a < 1000 group by b; “table”: “`t1`”, “const_keys_added”: {
“keys”: [
“b”
],
“cause”: “group_by”
},
“range_analysis”: { c.下面进入range analysis阶段,这也是本示例中的主要部分,因为我们的示例条件就是一个简单的range查询; 需要满足如下条件才会进行range analysis: Perform range analysis if there are keys it could use (1) Don’t do range analysis if on the inner side of an outer join (2). Do range analysis if on the inner side of a semi-join (3) 3639 TABLE_LIST *const tl= s->table->pos_in_table_list; 3640 if (!s->const_keys.is_clear_all() && // (1)
3641 (!tl->embedding || // (2)
3642 (tl->embedding && tl->embedding->sj_on_expr))) // (3) 满足上述条件后,会调用如下函数开始估算扫描的行数: 3652 records= get_quick_record_count(thd, select, s->table, 3653 &s->const_keys, join->row_limit); make_join_statistics ->get_quick_record_count ->SQL_SELECT::test_quick_select c1) table_scan 先计算全表扫描的开销 这里记录数 records = 986190 (近似值) scan_time= records * ROW_EVALUATE_COST + 1 = 986190 *0.2 +1 = 197239 head->file->scan_time() = 13934 read_time= head->file->scan_time() + scan_time + 1.1 = 13934 + 197239 + 1.1 = 211174.1 read_time即为全表扫描的开销 当使用了force index时,scan_time= read_time= DBL_MAX 当join->row_limit小于估算的表上记录数时,重新计算read_time = (double) records + scan_time + 1 这里由于存在order by , 因此join->row_limit被设置为一个非常大的值,表示limit 3无效,随后输出: “range_analysis“: { “table_scan“: { “rows”: 986190, “cost”: 211174 }, 这里会打印出所有的索引信息; 本示例中,只有两个索引:聚集索引和key(k,d),由于在聚集索引上没有过滤条件,因此这里聚集索引的usable标记为false。 可用的索引(keys_to_use.is_set(idx)为TRUE)信息被拷贝到数组key_parts中,使用到的key可能受到 use_index_extensions(默认打开)的影响,因此这里的二级索引key_parts中包含了聚集索引列 本示例中并无覆盖索引,我们建一个简单的表: CREATE TABLE `t4` ( `a` int(11) DEFAULT NULL,
`b` int(11) DEFAULT NULL,
`c` int(11) DEFAULT NULL,
`d` int(11) DEFAULT NULL,
KEY `a` (`a`),
KEY `b` (`b`,`c`)
) ENGINE=InnoDB 执行查询: select b,c from t4 where b = 15; 这是一个很简单的查询,显然覆盖索引KEY `b` (`b`,`c`)可以被用上,因此在optimizer trace中会打印: “best_covering_index_scan“: { “index”: “b”,
“cost”: 13269,
“chosen”: true
}, 首先找到Key值长度最短的可用覆盖索引: int key_for_use= find_shortest_key(head, &head->covering_keys); 因为键值长度越小,理论上IO越小 计算覆盖索引开销: double key_read_time= param.table->file->index_only_read_time(key_for_use,
rows2double(records)) +
records * ROW_EVALUATE_COST; 如果key_read_time小于read_time,即比全表扫描的cost要小,则选择该覆盖索引,设置read_time=key_read_time index_only_read_time的计算过程: 一个半满page上的键值数: uint keys_per_block= (stats.block_size/2/ (table_share->key_info[keynr].key_length + ref_length) +
1);
read_time=((double) (records + keys_per_block-1) /
(double) keys_per_block); c4) setup_range_conditions 可能输出impossible_range/range_scan_possible, TODO 这里的操作,猜测应该是 代码段: 2808 group_trp= get_best_group_min_max(¶m, tree, best_read_time); 本示例没有包含group by,因此显示chosen 为false,因为not_group_by_or_distinct 我们举一个简单的例子: select * from t4 where a > 100000 group by b ; 相关输出为: “group_index_range”: { “potential_group_range_indices”: [
{
“index”: “b”,
“usable”: false,
“cause”: “not_covering”
},
{
“index”: “a”,
“usable”: false,
“cause”: “not_covering”
}
]
},
如果存在满足的index,还可能输出best_group_range_summary,TODO 分析可选的range查询开销; 示例的查询比较简单,只在一个索引k上有range,因此只有一个选择: “range_scan_alternatives”: [ {
“index”: “k”,
“ranges”: [
“160000 <= k < 320000″
],
“index_dives_for_eq_ranges”: true,
“rowid_ordered”: false,
“using_mrr”: false,
“index_only”: false,
“rows”: 3660,
“cost”: 4393,
“chosen”: true
}
], select * from t4 where a > 100 and b > 100 and b < 500; 这里有两个range, 因此index(a) 和 index(b,c)都可能用上,optimizer_trace输出为: “range_scan_alternatives”: [ {
“index”: “b”,
“ranges”: [
“100 < b < 500″
],
“index_dives_for_eq_ranges”: true,
“rowid_ordered”: false,
“using_mrr”: false,
“index_only”: false,
“rows”: 2606,
“cost”: 3128.2,
“chosen”: true
},
{
“index”: “a”,
“ranges”: [
“100 < a”
],
“index_dives_for_eq_ranges”: true,
“rowid_ordered”: false,
“using_mrr”: false,
“index_only”: false,
“rows”: 32850,
“cost”: 39421,
“chosen”: false,
“cause”: “cost”
}
], 对应函数: 2847 /* Get best ‘range’ plan and prepare data for making other plans */ 2848 if ((range_trp= get_key_scans_params(¶m, tree, FALSE, TRUE,
2849 best_read_time)))
2850 {
2851 best_trp= range_trp;
2852 best_read_time= best_trp->read_cost;
2853 } 函数get_key_scans_params中会去计算可用于range查询的索引的开销, 5546 found_records= check_quick_select(param, idx, read_index_only, *key, 5547 update_tbl_stats, &mrr_flags, 5548 &buf_size, &cost); c7)analyzing_roworder_intersect 优化ROR //Get best non-covering ROR-intersection plan 对应函数 2869 if ((rori_trp= get_best_ror_intersect(¶m, tree, best_read_time))) 2870 {
2871 best_trp= rori_trp;
2872 best_read_time= best_trp->read_cost;
2873 } 本示例由于”too_few_roworder_scans“ 所以chosen为false sql/opt_range.cc:2878~2910 optmizer trace 可能的输出analyzing_index_merge c9) chosen_range_access_summary 在经过上述步骤后,将最优的TABLE_READ_PLAN打印出来 |
3737 if (!join->plan_is_const()) 3738 optimize_keyuse(join, keyuse_array); 3739 3740 join->allow_outer_refs= true; 3741 3742 if (optimize_semijoin_nests_for_materialization(join)) 3743 DBUG_RETURN(true);
{
“considered_execution_plans”: [ { “plan_prefix”: [ ], “table”: “`sbtest1`”, “best_access_path”: { “considered_access_paths”: [ { “access_type”: “range”, “rows”: 3660, “cost”: 5125, “chosen”: true } ] }, “cost_for_plan”: 5125, “rows_for_plan”: 3660, “chosen”: true } ] }, |
###########################
进入选择多表join的顺序: 3745 if (Optimize_table_order(thd, join, NULL).choose_table_order()) 3746 DBUG_RETURN(true);Optimize_table_order::choose_table_order: 该函数决定了多表join的顺序,使用贪婪算法搜索最优查询计划,在5.5及之前对应的函数是choose_plan 首先根据表的记录数排序,记录数少的在前面(如果没有STRAIGHT JOIN) 对于straight join 调用optimize_straight_join(join_tables)生成查询计划 否则,调用greedy_search(join_tables)进行深度优先遍历,找出最优执行计划 示例中只有一个表sbtest1,唯一选择,我们构造一个两个表join的场景来看看; select * from t1,t2 where t1.a < 5000 and t2.b = 4000; 打印的对应trace为: { “considered_execution_plans”: [
{
“plan_prefix”: [
],
“table”: “`t2`”,
“best_access_path”: {
“considered_access_paths”: [
{
“access_type”: “ref”,
“index”: “b”,
“rows”: 1,
“cost”: 1.2,
“chosen”: true
},
{
“access_type”: “range”,
“cause”: “heuristic_index_cheaper”,
“chosen”: false
}
]
},
“cost_for_plan”: 1.2,
“rows_for_plan”: 1,
“rest_of_plan”: [
{
“plan_prefix”: [
“`t2`”
],
“table”: “`t1`”,
“best_access_path”: {
“considered_access_paths”: [
{
“access_type”: “range”,
“rows”: 2963,
“cost”: 1189.3,
“chosen”: true
}
]
},
“cost_for_plan”: 1190.5,
“rows_for_plan”: 2963,
“chosen”: true
}
] }, {
“plan_prefix”: [
],
“table”: “`t1`”,
“best_access_path”: {
“considered_access_paths”: [
{
“access_type”: “range”,
“rows”: 2963,
“cost”: 1189.3,
“chosen”: true
}
]
},
“cost_for_plan”: 1189.3,
“rows_for_plan”: 2963,
“pruned_by_heuristic”: true
}
] |
3752 if (join->decide_subquery_strategy()) 3753 DBUG_RETURN(true); 3754 3755 join->refine_best_rowcount();
3767 if (thd->lex->is_single_level_stmt()) 3768 thd->status_var.last_query_cost= join->best_read; 3769 3770 /* Generate an execution plan from the found optimal join order. */ 3771 if (join->get_best_combination()) 3772 DBUG_RETURN(true);
{
“attaching_conditions_to_tables“: { “original_condition”: “((`sbtest1`.`k` >= 160000) and (`sbtest1`.`k` < 320000))”, “attached_conditions_computation“: [ { “table”: “`sbtest1`”, “rechecking_index_usage”: { “recheck_reason”: “low_limit”, “limit”: 3, “row_estimate”: 3660 } } ], “attached_conditions_summary“: [ { “table”: “`sbtest1`”, “attached”: “((`sbtest1`.`k` >= 160000) and (`sbtest1`.`k` < 320000))” } ] } }, |
###########################################
调用函数: 505 if (make_join_select(this, conds)) 根据注释,make_join_select主要做几件事儿: Step #1: Extract constant condition – Extract and check the constant part of the WHERE
– Extract constant parts of ON expressions from outer
joins and attach them appropriately. trace输出关键字:condition_on_constant_tables Heuristic: Switch from ‘ref’ to ‘range’ access if ‘range’ access can utilize more keyparts than ‘ref’ access. Conditions
for doing switching: trace输出关键字: attaching_conditions_to_tables/attached_conditions_computation
|
{
“clause_processing”: { “clause”: “ORDER BY”, “original_clause”: “`sbtest1`.`pad`”, “items”: [ { “item”: “`sbtest1`.`pad`” } ], “resulting_clause_is_simple”: true, “resulting_clause”: “`sbtest1`.`pad`” } }, |
#############################
d.尝试优化distinct/group by/ order by等
sql/sql_optimizer.cc: 514~695
517 order= ORDER_with_src(remove_const(this, order, conds, 1, &simple_order, “ORDER BY”), order.src);;
从trace输出看,这里未做任何改变;
group by 也会输出类似的信息,例如,把示例的ORDER BY 改成GROUP BY:
select * from sbtest.sbtest1 where k >= 160000 and k < 320000 group by pad limit 3;
{ “clause_processing”: {
“clause”: “GROUP BY”,
“original_clause”: “`sbtest`.`sbtest1`.`pad`”,
“items”: [
{
“item”: “`sbtest`.`sbtest1`.`pad`”
}
],
“resulting_clause_is_simple”: true,
“resulting_clause”: “`sbtest`.`sbtest1`.`pad`”
}
},
|
{
“refine_plan”: [ { “table”: “`sbtest1`”, “pushed_index_condition”: “((`sbtest1`.`k` >= 160000) and (`sbtest1`.`k` < 320000))”, “table_condition_attached”: null, “access_type”: “range” } ] } |
########################################
调用函数: 725 if (make_join_readinfo(this, select_opts_for_readinfo, no_jbuf_after)) 726 DBUG_RETURN(1);在make_join_readinfo中打印refine_plan部分输出 这里也可以认为是最终的查询计划输出了。 |
{
“join_execution”: { “select#”: 1, “steps”: [ { “filesort_information”: [ { “direction”: “asc”, “table”: “`sbtest1`”, “field”: “pad” } ], “filesort_priority_queue_optimization”: { “limit”: 3, “rows_estimate”: 13386691, “row_size”: 495, “memory_available”: 32768, “chosen”: true }, “filesort_execution”: [ ], “filesort_summary”: { “rows”: 4, “examined_rows”: 3662, “number_of_tmp_files”: 0, “sort_buffer_size”: 2012, “sort_mode”: “<sort_key, additional_fields>” } } ] } } |
######################################
打印执行filesort时的相关信息filesort_information 表示排序的基本信息 例如对于类似这样的查询: SELECT … FROM t ORDER BY a1,…,an LIMIT max_rows; 函数check_if_pq_applicable会去检查是否能使用优先队列来维持结果,需要满足如下条件: –比归并排序的开销更小 –有足够的内存来存储max_rows条记录 memory_available就是sort_buffer_size的大小 |
原创文章,转载请注明: 转载自Simple Life