diff --git a/mysql-test/main/opt_trace_load_stats.result b/mysql-test/main/opt_trace_load_stats.result index 82ca5b1e01a4d..af8e3914fb8cb 100644 --- a/mysql-test/main/opt_trace_load_stats.result +++ b/mysql-test/main/opt_trace_load_stats.result @@ -367,62 +367,62 @@ set @opt_context=json_remove(@saved_opt_context_1, '$.current_database'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "current_database" element not present at offset 1613. +Warning 4253 Failed to parse saved optimizer context: "current_database" element not present at offset 1669. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].name'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "name" element not present at offset 1620. +Warning 4253 Failed to parse saved optimizer context: "name" element not present at offset 1676. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].ddl'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "ddl" element not present at offset 1400. +Warning 4253 Failed to parse saved optimizer context: "ddl" element not present at offset 1456. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].num_of_records'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "num_of_records" element not present at offset 1616. +Warning 4253 Failed to parse saved optimizer context: "num_of_records" element not present at offset 1672. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].indexes[0].index_name'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "index_name" element not present at offset 406. +Warning 4253 Failed to parse saved optimizer context: "index_name" element not present at offset 433. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].indexes[0].rec_per_key'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "rec_per_key" element not present at offset 410. +Warning 4253 Failed to parse saved optimizer context: "rec_per_key" element not present at offset 437. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_ranges[0].index_name'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "index_name" element not present at offset 883. +Warning 4253 Failed to parse saved optimizer context: "index_name" element not present at offset 910. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_ranges[0].ranges'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "ranges" element not present at offset 875. +Warning 4253 Failed to parse saved optimizer context: "ranges" element not present at offset 902. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_ranges[0].num_rows'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "num_rows" element not present at offset 893. +Warning 4253 Failed to parse saved optimizer context: "num_rows" element not present at offset 920. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_ranges[0].cost'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "cost" element not present at offset 677. +Warning 4253 Failed to parse saved optimizer context: "cost" element not present at offset 704. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_ranges[0].max_index_blocks'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "max_index_blocks" element not present at offset 886. +Warning 4253 Failed to parse saved optimizer context: "max_index_blocks" element not present at offset 913. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_ranges[0].max_row_blocks'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "max_row_blocks" element not present at offset 888. +Warning 4253 Failed to parse saved optimizer context: "max_row_blocks" element not present at offset 915. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].indexes[0]'); select * from t1 where a > 10; a b @@ -461,51 +461,51 @@ set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_inde select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "key_number" element not present at offset 775. +Warning 4253 Failed to parse saved optimizer context: "key_number" element not present at offset 802. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].num_records'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "num_records" element not present at offset 774. +Warning 4253 Failed to parse saved optimizer context: "num_records" element not present at offset 801. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].eq_ref'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "eq_ref" element not present at offset 779. +Warning 4253 Failed to parse saved optimizer context: "eq_ref" element not present at offset 806. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].index_cost_io'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "index_cost_io" element not present at offset 772. +Warning 4253 Failed to parse saved optimizer context: "index_cost_io" element not present at offset 799. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].index_cost_cpu'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "index_cost_cpu" element not present at offset 761. +Warning 4253 Failed to parse saved optimizer context: "index_cost_cpu" element not present at offset 788. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].row_cost_io'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "row_cost_io" element not present at offset 774. +Warning 4253 Failed to parse saved optimizer context: "row_cost_io" element not present at offset 801. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].row_cost_cpu'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "row_cost_cpu" element not present at offset 763. +Warning 4253 Failed to parse saved optimizer context: "row_cost_cpu" element not present at offset 790. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].max_index_blocks'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "max_index_blocks" element not present at offset 769. +Warning 4253 Failed to parse saved optimizer context: "max_index_blocks" element not present at offset 796. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].max_row_blocks'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "max_row_blocks" element not present at offset 771. +Warning 4253 Failed to parse saved optimizer context: "max_row_blocks" element not present at offset 798. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].copy_cost'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "copy_cost" element not present at offset 776. +Warning 4253 Failed to parse saved optimizer context: "copy_cost" element not present at offset 803. drop table t1; drop database db1; diff --git a/mysql-test/main/opt_trace_load_stats_innodb.result b/mysql-test/main/opt_trace_load_stats_innodb.result index ccee44fc2a175..3fd1b0594e446 100644 --- a/mysql-test/main/opt_trace_load_stats_innodb.result +++ b/mysql-test/main/opt_trace_load_stats_innodb.result @@ -1301,5 +1301,102 @@ JSON_EQUALS(@saved_explain_output, @explain_output) 1 set optimizer_replay_context=""; drop table t1; +# +# Index-Merge query on a single table having 2 indexes with overlapping keys +# +set optimizer_replay_context=""; +create table t1 ( +a int, +b int, +c int, +index idx_ab(a, b), +index idx_ac(a, c) +) ENGINE=InnoDB; +insert into t1 select seq%2, seq%3, seq%5 from seq_1_to_20; +set optimizer_replay_context=""; +explain format=json select * from t1 where a=1 and b=1 and c=1 +set @trace= (select trace from information_schema.optimizer_trace); +set @saved_opt_context= +(select json_pretty(json_extract( +json_extract(@trace, "$**.optimizer_context"), +'$[0]' + ) +)); +set @saved_opt_context_var_name='saved_opt_context'; +set @explain_output='$explain_output'; +set @explain_output= (select json_pretty(round_cost(@explain_output))); +select @explain_output; +@explain_output +{ + "query_block": + { + "select_id": 1, + "cost": 0.0038076, + "nested_loop": + [ + { + "table": + { + "table_name": "t1", + "access_type": "index_merge", + "possible_keys": + [ + "idx_ab", + "idx_ac" + ], + "key_length": "10,10", + "index_merge": + { + "intersect": + [ + { + "range": + { + "key": "idx_ac", + "used_key_parts": + [ + "a", + "c" + ] + } + }, + { + "range": + { + "key": "idx_ab", + "used_key_parts": + [ + "a", + "b" + ] + } + } + ] + }, + "loops": 1, + "rows": 1, + "cost": 0.0038076, + "filtered": 100, + "attached_condition": "t1.a = 1 and t1.b = 1 and t1.c = 1", + "using_index": true + } + } + ] + } +} +set @saved_explain_output=@explain_output; +set optimizer_replay_context=""; +delete from t1; +analyze table t1; +Table Op Msg_type Msg_text +db1.t1 analyze status OK +set optimizer_replay_context=@saved_opt_context_var_name; +set @explain_output='$explain_output'; +set @explain_output= (select json_pretty(round_cost(@explain_output))); +select JSON_EQUALS(@saved_explain_output, @explain_output); +JSON_EQUALS(@saved_explain_output, @explain_output) +1 +set optimizer_replay_context=""; +drop table t1; drop function round_cost; drop database db1; diff --git a/mysql-test/main/opt_trace_load_stats_innodb.test b/mysql-test/main/opt_trace_load_stats_innodb.test index 7f24025333acb..cc532ac39a672 100644 --- a/mysql-test/main/opt_trace_load_stats_innodb.test +++ b/mysql-test/main/opt_trace_load_stats_innodb.test @@ -433,6 +433,30 @@ let $table_update_query=delete from t1; drop table t1; +--echo # +--echo # Index-Merge query on a single table having 2 indexes with overlapping keys +--echo # + +set optimizer_replay_context=""; + +create table t1 ( + a int, + b int, + c int, + index idx_ab(a, b), + index idx_ac(a, c) +) ENGINE=InnoDB; + +insert into t1 select seq%2, seq%3, seq%5 from seq_1_to_20; + +let $explain_query=explain format=json select * from t1 where a=1 and b=1 and c=1; + +let $table_update_query=delete from t1; + +--source include/run_query_twice_and_compare_stats.inc + +drop table t1; + drop function round_cost; drop database db1; diff --git a/mysql-test/main/opt_trace_store_stats.result b/mysql-test/main/opt_trace_store_stats.result index 0f543e7c340b1..c5d63992d07eb 100644 --- a/mysql-test/main/opt_trace_store_stats.result +++ b/mysql-test/main/opt_trace_store_stats.result @@ -259,4 +259,39 @@ t1_idx_b ["(6) <= (b) <= (10)"] 1 drop view view1; drop table t1; drop table t2; +# +# union query with const tables +# testing the INSERT statements +# +set optimizer_record_context=OFF; +create table t1 (a int not null auto_increment, +b int, +primary key (a) +); +insert into t1 select seq, seq%5 from seq_1_to_20; +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +db1.t1 analyze status Engine-independent statistics collected +db1.t1 analyze status OK +set optimizer_record_context=ON; +analyze select * from t1 where t1.a=5 and t1.b=0 union select * from t1 where t1.a=4 and t1.b=4; +id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra +1 PRIMARY t1 const PRIMARY PRIMARY 4 const 1 NULL 100.00 NULL +2 UNION t1 const PRIMARY PRIMARY 4 const 1 NULL 100.00 NULL +NULL UNION RESULT ALL NULL NULL NULL NULL NULL 2.00 NULL NULL +set @trace= (select trace from information_schema.optimizer_trace); +set @const_table_inserts= (select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.const_table_inserts'))); +select @const_table_inserts; +@const_table_inserts +[ + [ + "REPLACE INTO db1.t1(a, b) VALUES (5, 0)", + "REPLACE INTO db1.t1(a, b) VALUES (4, 4)" + ] +] +select * from json_table(@const_table_inserts, '$[*][*]' columns(insert_stmt text path '$')) as jt; +insert_stmt +REPLACE INTO db1.t1(a, b) VALUES (5, 0) +REPLACE INTO db1.t1(a, b) VALUES (4, 4) +drop table t1; drop database db1; diff --git a/mysql-test/main/opt_trace_store_stats.test b/mysql-test/main/opt_trace_store_stats.test index d7e14542f308e..975b4132cb743 100644 --- a/mysql-test/main/opt_trace_store_stats.test +++ b/mysql-test/main/opt_trace_store_stats.test @@ -183,4 +183,29 @@ select * from json_table( drop view view1; drop table t1; drop table t2; + +--echo # +--echo # union query with const tables +--echo # testing the INSERT statements +--echo # +set optimizer_record_context=OFF; + +create table t1 (a int not null auto_increment, + b int, + primary key (a) +); + +insert into t1 select seq, seq%5 from seq_1_to_20; + +analyze table t1 persistent for all; + +set optimizer_record_context=ON; +analyze select * from t1 where t1.a=5 and t1.b=0 union select * from t1 where t1.a=4 and t1.b=4; + +set @trace= (select trace from information_schema.optimizer_trace); +set @const_table_inserts= (select JSON_DETAILED(JSON_EXTRACT(@trace, '$**.const_table_inserts'))); +select @const_table_inserts; +select * from json_table(@const_table_inserts, '$[*][*]' columns(insert_stmt text path '$')) as jt; + +drop table t1; drop database db1; diff --git a/sql/filesort.cc b/sql/filesort.cc index 1360f4a3b181b..fe9ca6867465c 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -736,74 +736,10 @@ static char dbug_row_print_buf[4096]; String dbug_format_row(TABLE *table, const uchar *rec, bool print_names) { - Field **pfield; - char row_buff_tmp[512]; - String tmp(row_buff_tmp, sizeof(row_buff_tmp), &my_charset_bin); - String output(dbug_row_print_buf, sizeof(dbug_row_print_buf), &my_charset_bin); - - auto move_back_lambda= [table, rec]() mutable { - table->move_fields(table->field, table->record[0], rec); - }; - auto move_back_guard= make_scope_exit(move_back_lambda, false); - - if (rec != table->record[0]) - { - table->move_fields(table->field, rec, table->record[0]); - move_back_guard.engage(); - } - - SCOPE_VALUE(table->read_set, (table->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE) ? - table->write_set : table->read_set); - + String output(dbug_row_print_buf, sizeof(dbug_row_print_buf), + &my_charset_bin); output.length(0); - output.append(table->alias); - output.append('('); - bool first= true; - if (print_names) - { - for (pfield= table->field; *pfield ; pfield++) - { - if (table->read_set && !bitmap_is_set(table->read_set, (*pfield)->field_index)) - continue; - - if (first) - first= false; - else - output.append(STRING_WITH_LEN(", ")); - - output.append((*pfield)->field_name.str - ? (*pfield)->field_name : NULL_clex_str); - } - - output.append(STRING_WITH_LEN(")=(")); - first= true; - } - - for (pfield= table->field; *pfield ; pfield++) - { - Field *field= *pfield; - - if (table->read_set && !bitmap_is_set(table->read_set, (*pfield)->field_index)) - continue; - - if (first) - first= false; - else - output.append(STRING_WITH_LEN(", ")); - - if (field->is_null()) - output.append(&NULL_clex_str); - else - { - if (field->type() == MYSQL_TYPE_BIT) - (void) field->val_int_as_str(&tmp, 1); - else - field->val_str(&tmp); - output.append(tmp.ptr(), tmp.length()); - } - } - output.append(')'); - + format_and_store_row(table, rec, print_names, "=", true, output); return output; } @@ -3087,3 +3023,90 @@ static uint make_packed_sortkey(Sort_param *param, uchar *to) Sort_keys::store_sortkey_length(orig_to, length); return length; } + +/* + @brief + format the row record and store it in the output + for eg: - if print_names, and reqd_table_alias are specified, + with a separator of "=", the formatted row looks like + "tbl(col1,col2,...,coln)=(val1,val2,...,valn)" +*/ +void format_and_store_row(TABLE *table, const uchar *rec, bool print_names, + const char *separator, bool reqd_table_alias, + String &output) +{ + Field **pfield; + char row_buff_tmp[512]; + String tmp(row_buff_tmp, sizeof(row_buff_tmp), &my_charset_bin); + + auto move_back_lambda= [table, rec]() mutable { + table->move_fields(table->field, table->record[0], rec); + }; + auto move_back_guard= make_scope_exit(move_back_lambda, false); + + if (rec != table->record[0]) + { + table->move_fields(table->field, rec, table->record[0]); + move_back_guard.engage(); + } + + SCOPE_VALUE(table->read_set, + (table->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE) + ? table->write_set + : table->read_set); + + if (reqd_table_alias) + { + output.append(table->alias); + } + output.append('('); + bool first= true; + if (print_names) + { + for (pfield= table->field; *pfield; pfield++) + { + if (table->read_set && + !bitmap_is_set(table->read_set, (*pfield)->field_index)) + continue; + + if (first) + first= false; + else + output.append(STRING_WITH_LEN(", ")); + + output.append((*pfield)->field_name.str ? (*pfield)->field_name + : NULL_clex_str); + } + + output.append(STRING_WITH_LEN(")")); + output.append(separator, strlen(separator)); + output.append(STRING_WITH_LEN("(")); + first= true; + } + + for (pfield= table->field; *pfield; pfield++) + { + Field *field= *pfield; + + if (table->read_set && + !bitmap_is_set(table->read_set, (*pfield)->field_index)) + continue; + + if (first) + first= false; + else + output.append(STRING_WITH_LEN(", ")); + + if (field->is_null()) + output.append(&NULL_clex_str); + else + { + if (field->type() == MYSQL_TYPE_BIT) + (void) field->val_int_as_str(&tmp, 1); + else + field->val_str(&tmp); + output.append(tmp.ptr(), tmp.length()); + } + } + output.append(')'); +} diff --git a/sql/filesort.h b/sql/filesort.h index f2da3b84e1b1e..6b82c841a23d1 100644 --- a/sql/filesort.h +++ b/sql/filesort.h @@ -244,5 +244,7 @@ void change_double_for_sort(double nr,uchar *to); void store_length(uchar *to, uint length, uint pack_length); void reverse_key(uchar *to, const SORT_FIELD_ATTR *sort_field); - +void format_and_store_row(TABLE *table, const uchar *rec, bool print_names, + const char *separator, bool reqd_table_alias, + String &output); #endif /* FILESORT_INCLUDED */ diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 4721ec3233bb2..7de83b02ccba3 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -448,8 +448,6 @@ static int and_range_trees(RANGE_OPT_PARAM *param, static bool remove_nonrange_trees(PARAM *param, SEL_TREE *tree); static void restore_nonrange_trees(RANGE_OPT_PARAM *param, SEL_TREE *tree, SEL_ARG **backup); -static void print_key_value(String *out, const KEY_PART_INFO *key_part, - const uchar* key, uint length); static void print_keyparts_name(String *out, const KEY_PART_INFO *key_part, uint n_keypart, key_part_map keypart_map); @@ -466,6 +464,12 @@ static void print_min_range_operator(String *out, const ha_rkey_function flag); static void print_max_range_operator(String *out, const ha_rkey_function flag); static bool is_field_an_unique_index(Field *field); +static ha_rows hook_records_in_range(MEM_ROOT *mem_root, THD *thd, + TABLE *table, + const KEY_PART_INFO *key_part, uint keynr, + const key_range *min_range, + const key_range *max_range, + page_range *pages); /* SEL_IMERGE is a list of possible ways to do index merge, i.e. it is @@ -7238,8 +7242,11 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info, } min_range.length= max_range.length= (uint) (key_ptr - key_val); min_range.keypart_map= max_range.keypart_map= keypart_map; - records= (info->param->table->file-> - records_in_range(scan->keynr, &min_range, &max_range, &pages)); + + records= hook_records_in_range(info->param->old_root, info->param->thd, + info->param->table, key_part, scan->keynr, + &min_range, &max_range, &pages); + if (cur_covered) { /* uncovered -> covered */ @@ -17672,8 +17679,8 @@ static void trace_ranges(Json_writer_array *range_trace, PARAM *param, @param[in] used_length length of the key tuple */ -static void print_key_value(String *out, const KEY_PART_INFO *key_part, - const uchar* key, uint used_length) +void print_key_value(String *out, const KEY_PART_INFO *key_part, + const uchar *key, uint used_length) { out->append(STRING_WITH_LEN("(")); Field *field= key_part->field; @@ -17729,3 +17736,36 @@ void print_keyparts_name(String *out, const KEY_PART_INFO *key_part, } out->append(STRING_WITH_LEN(")")); } + +/* + @brief + Call records_in_range(). If necessary, + - Replace its return value from Optimizer Context, and/or + - Save its return value in the Optimizer Context we're recording. + + @detail + Note that currently "pages" and min/max_range->flag are not hooked. +*/ +static ha_rows hook_records_in_range(MEM_ROOT *mem_root, THD *thd, + TABLE *table, + const KEY_PART_INFO *key_part, uint keynr, + const key_range *min_range, + const key_range *max_range, + page_range *pages) +{ + ha_rows records= + (table->file->records_in_range(keynr, min_range, max_range, pages)); + + if (thd->opt_ctx_replay) + { + thd->opt_ctx_replay->infuse_records_in_range( + table, key_part, keynr, min_range, max_range, &records); + } + + if (Optimizer_context_recorder *recorder= get_opt_context_recorder(thd)) + { + recorder->record_records_in_range(mem_root, table, key_part, keynr, + min_range, max_range, records); + } + return records; +} diff --git a/sql/opt_range.h b/sql/opt_range.h index 39f2af28402d0..3e43c66c1267a 100644 --- a/sql/opt_range.h +++ b/sql/opt_range.h @@ -2043,6 +2043,9 @@ bool eq_ranges_exceeds_limit(RANGE_SEQ_IF *seq, void *seq_init_param, void print_range(String *out, const KEY_PART_INFO *key_part, KEY_MULTI_RANGE *range, uint n_key_parts); +void print_key_value(String *out, const KEY_PART_INFO *key_part, + const uchar *key, uint used_length); + #ifdef WITH_PARTITION_STORAGE_ENGINE bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond); #endif diff --git a/sql/opt_store_replay_context.cc b/sql/opt_store_replay_context.cc index 25e567d1ea1c9..460aaea21681e 100644 --- a/sql/opt_store_replay_context.cc +++ b/sql/opt_store_replay_context.cc @@ -29,6 +29,8 @@ #include "sql_json_lib.h" #include "opt_histogram_json.h" +using namespace json_reader; + /** @file @@ -125,6 +127,18 @@ class cost_index_read_call_record : public Sql_alloc ALL_READ_COST cost; }; +/* + A record to hold one records_in_range() call: +*/ +class records_in_range_call_record : public Sql_alloc +{ +public: + uint keynr; + char *min_key; + char *max_key; + ha_rows records; +}; + /* structure to store all the index range records, and the cost for reading indexes, pertaining to a table @@ -140,6 +154,8 @@ class trace_table_context : public Sql_alloc size_t name_len; List mrr_list; List irc_list; + List rir_list; + List const_tbl_ins_stmt_list; }; static char *strdup_root(MEM_ROOT *root, String *buf); @@ -155,8 +171,11 @@ static int parse_range_context(THD *thd, json_engine_t *je, String *err_buf, static int parse_index_read_cost_context(THD *thd, json_engine_t *je, String *err_buf, trace_irc_context_read *irc_ctx); -static int parse_range_cost_estimate(THD *thd, json_engine_t *je, - String *err_buf, Cost_estimate *cost); +static bool parse_range_cost_estimate(THD *thd, json_engine_t *je, + String *err_buf, Cost_estimate *cost); +static int parse_records_in_range_context(THD *thd, json_engine_t *je, + String *err_buf, + trace_rir_context_read *rir_ctx); struct DDL_Key { @@ -277,6 +296,25 @@ static void dump_index_read_cost_to_trace(THD *thd, } } +static void dump_records_in_range_to_trace(THD *thd, + trace_table_context *context) +{ + if (!context) + return; + + Json_writer_array list_irc_wrapper(thd, "list_records_in_range"); + List_iterator rir_li(context->rir_list); + + while (records_in_range_call_record *rir= rir_li++) + { + Json_writer_object rir_wrapper(thd); + rir_wrapper.add("key_number", rir->keynr); + rir_wrapper.add("min_key", rir->min_key); + rir_wrapper.add("max_key", rir->max_key); + rir_wrapper.add("num_records", rir->records); + } +} + static void dump_index_stats_to_trace(THD *thd, uchar *tbl_name, size_t tbl_name_len) { @@ -288,6 +326,7 @@ static void dump_index_stats_to_trace(THD *thd, uchar *tbl_name, dump_range_stats_to_trace(thd, table_context); dump_index_read_cost_to_trace(thd, table_context); + dump_records_in_range_to_trace(thd, table_context); } /* @@ -451,6 +490,18 @@ bool store_tables_context_in_trace(THD *thd) if (!tbl->is_view()) { + trace_table_context *table_context= thd->opt_ctx_recorder->search( + (uchar *) ddl_key->name, ddl_key->name_len); + if (table_context) + { + Json_writer_array inserts_wrapper(thd, "const_table_inserts"); + List_iterator inserts_li(table_context->const_tbl_ins_stmt_list); + while (char *stmt= inserts_li++) + { + inserts_wrapper.add(stmt, strlen(stmt)); + } + inserts_wrapper.end(); + } dump_table_stats_to_trace(thd, tbl, (uchar *) ddl_key->name, ddl_key->name_len, ctx_wrapper); } @@ -617,6 +668,63 @@ const uchar *Optimizer_context_recorder::get_tbl_trace_ctx_key( return reinterpret_cast(entry->name); } +void Optimizer_context_recorder::record_records_in_range( + MEM_ROOT *mem_root, const TABLE *tbl, const KEY_PART_INFO *key_part, + uint keynr, const key_range *min_range, const key_range *max_range, + ha_rows records) +{ + records_in_range_call_record *rec_in_range_ctx= + new (mem_root) records_in_range_call_record; + + if (unlikely(!rec_in_range_ctx)) + return; // OOM + + rec_in_range_ctx->keynr= keynr; + String min_key; + String max_key; + print_key_value(&min_key, key_part, min_range->key, min_range->length); + print_key_value(&max_key, key_part, min_range->key, min_range->length); + + if (!(rec_in_range_ctx->min_key= + strdup_root(mem_root, min_key.c_ptr_safe()))) + return; // OOM + + if (!(rec_in_range_ctx->max_key= + strdup_root(mem_root, max_key.c_ptr_safe()))) + return; // OOM + + rec_in_range_ctx->records= records; + + trace_table_context *table_ctx= + get_table_context(mem_root, tbl->pos_in_table_list); + + if (unlikely(!table_ctx)) + return; // OOM + + table_ctx->rir_list.push_back(rec_in_range_ctx, mem_root); +} + +void Optimizer_context_recorder::record_const_table_row(MEM_ROOT *mem_root, + TABLE *tbl) +{ + StringBuffer<512> output; + output.append(STRING_WITH_LEN("REPLACE INTO ")); + store_full_table_name(tbl->pos_in_table_list, &output); + format_and_store_row(tbl, tbl->record[1], true, " VALUES ", false, output); + trace_table_context *table_ctx= + get_table_context(mem_root, tbl->pos_in_table_list); + + if (unlikely(!table_ctx)) + return; // OOM + + char *ins_stmt= strdup_root(mem_root, output.c_ptr_safe()); + + if (unlikely(!ins_stmt)) + return; // OOM + + table_ctx->const_tbl_ins_stmt_list.push_back(ins_stmt, mem_root); +} + static char *strdup_root(MEM_ROOT *root, String *buf) { return strdup_root(root, buf->c_ptr_safe()); @@ -713,6 +821,19 @@ class trace_irc_context_read : public Sql_alloc ALL_READ_COST cost; }; +/* + This class is used to store the in-memory representation of + one records_in_range call cost i.e. read from json +*/ +class trace_rir_context_read : public Sql_alloc +{ +public: + uint keynr; + char *min_key; + char *max_key; + ha_rows records; +}; + /* This class is used to store the in-memory representation of a table context i.e. read from json. @@ -733,6 +854,7 @@ class trace_table_context_read : public Sql_alloc List index_list; List ranges_list; List irc_list; + List rir_list; }; /* @@ -787,8 +909,8 @@ class Read_container_value : public Read_value int after_read(int rc) { return rc > 0; } public: - int read_value(json_engine_t *je, const char *value_name, - String *err_buf) override + bool read_value(json_engine_t *je, const char *value_name, + String *err_buf) override { int rc= before_read(je, value_name, err_buf); if (rc <= 0) @@ -810,8 +932,8 @@ class Read_range_cost_estimate : public Read_value : thd(thd_arg), ptr(ptr_arg) { } - int read_value(json_engine_t *je, const char *value_name, - String *err_buf) override + bool read_value(json_engine_t *je, const char *value_name, + String *err_buf) override { return parse_range_cost_estimate(thd, je, err_buf, ptr); } @@ -831,6 +953,7 @@ class Read_list_of_ha_rows : public Read_container_value { while (je->state != JST_ARRAY_END) { + using json_reader::read_ha_rows_and_check_limit; ha_rows temp_value; if (read_ha_rows_and_check_limit(je, "rec_per_key", err_buf, temp_value, ULONGLONG_MAX, "unsigned longlong", @@ -973,7 +1096,7 @@ static int parse_context_obj_from_json_array(json_engine_t *je, if (int rc= parse_check_obj_start_in_array(je, err_buf, err_msg)) return rc; - return read_all_elements(je, array, err_buf); + return json_read_object(je, array, err_buf); } /* @@ -1014,6 +1137,10 @@ static int parse_table_context(THD *thd, json_engine_t *je, String *err_buf, Read_list_of_context( thd, &table_ctx->irc_list, parse_index_read_cost_context), true}, + {"list_records_in_range", + Read_list_of_context( + thd, &table_ctx->rir_list, parse_records_in_range_context), + true}, {NULL, Read_double(NULL), true}}; return parse_context_obj_from_json_array(je, err_buf, err_msg, array); @@ -1095,8 +1222,8 @@ static int parse_range_context(THD *thd, json_engine_t *je, String *err_buf, 1 Parse Error -1 EOF */ -static int parse_range_cost_estimate(THD *thd, json_engine_t *je, - String *err_buf, Cost_estimate *cost) +static bool parse_range_cost_estimate(THD *thd, json_engine_t *je, + String *err_buf, Cost_estimate *cost) { if (json_scan_next(je) || je->state != JST_OBJ_START) { @@ -1118,7 +1245,7 @@ static int parse_range_cost_estimate(THD *thd, json_engine_t *je, {"row_cost_cpu", Read_double(&cost->row_cost.cpu), false}, {NULL, Read_double(NULL), true}}; - return read_all_elements(je, array, err_buf); + return json_read_object(je, array, err_buf); } /* @@ -1164,11 +1291,41 @@ static int parse_index_read_cost_context(THD *thd, json_engine_t *je, return parse_context_obj_from_json_array(je, err_buf, err_msg, array); } -Optimizer_context_replay::Optimizer_context_replay(THD *thd) +/* + Parses the cost information for reading records_in_range + JSON structure of the optimizer context. + To be specific, single array element of list_records_in_range + is parsed in this method. + Refer to the file opt_context_schema.inc, and + the description at the start of this file. + + @return + 0 OK + 1 Parse Error + -1 EOF +*/ +static int parse_records_in_range_context(THD *thd, json_engine_t *je, + String *err_buf, + trace_rir_context_read *rir_ctx) { - this->thd= thd; - this->db_name= NULL; - this->parse(); + const char *err_msg= "Expected an object in the records_in_range array"; + + Read_named_member array[]= { + {"key_number", Read_non_neg_integer(&rir_ctx->keynr), + false}, + {"min_key", Read_string(thd, &rir_ctx->min_key), false}, + {"max_key", Read_string(thd, &rir_ctx->max_key), false}, + {"num_records", + Read_non_neg_integer(&rir_ctx->records), false}, + {NULL, Read_double(NULL), true}}; + + return parse_context_obj_from_json_array(je, err_buf, err_msg, array); +} + +Optimizer_context_replay::Optimizer_context_replay(THD *thd_arg) + : thd(thd_arg), db_name(nullptr) +{ + parse(); // TODO: error handling? } /* @@ -1187,16 +1344,13 @@ bool Optimizer_context_replay::infuse_read_cost(const TABLE *tbl, String tbl_name; store_full_table_name(tbl->pos_in_table_list, &tbl_name); - List_iterator table_itr(this->ctx_list); - while (trace_table_context_read *tbl_ctx= table_itr++) + if (trace_table_context_read *tbl_ctx= + find_trace_read_context(tbl_name.c_ptr_safe())) { - if (strcmp(tbl_name.c_ptr_safe(), tbl_ctx->name) == 0) - { - cost->io= tbl_ctx->read_cost_io; - cost->cpu= tbl_ctx->read_cost_cpu; - return false; - } + cost->io= tbl_ctx->read_cost_io; + cost->cpu= tbl_ctx->read_cost_cpu; + return false; } push_warning_printf( @@ -1319,21 +1473,18 @@ bool Optimizer_context_replay::infuse_index_read_cost(const TABLE *tbl, String tbl_name; store_full_table_name(tbl->pos_in_table_list, &tbl_name); - List_iterator table_itr(this->ctx_list); - while (trace_table_context_read *tbl_ctx= table_itr++) + if (trace_table_context_read *tbl_ctx= + find_trace_read_context(tbl_name.c_ptr_safe())) { - if (strcmp(tbl_name.c_ptr_safe(), tbl_ctx->name) == 0) + List_iterator irc_itr(tbl_ctx->irc_list); + while (trace_irc_context_read *irc_ctx= irc_itr++) { - List_iterator irc_itr(tbl_ctx->irc_list); - while (trace_irc_context_read *irc_ctx= irc_itr++) + if (irc_ctx->key == keynr && irc_ctx->records == records && + irc_ctx->eq_ref == eq_ref) { - if (irc_ctx->key == keynr && irc_ctx->records == records && - irc_ctx->eq_ref == eq_ref) - { - *cost= irc_ctx->cost; - return false; - } + *cost= irc_ctx->cost; + return false; } } } @@ -1371,7 +1522,7 @@ void Optimizer_context_replay::infuse_table_stats(TABLE *table) saved_ts->table= table; saved_ts->original_rows= table->used_stat_records; - if (saved_tablestats_list.push_back(saved_ts)) + if (saved_table_stats.push_back(saved_ts)) return; ha_rows temp_rows; @@ -1433,23 +1584,70 @@ void Optimizer_context_replay::infuse_table_stats(TABLE *table) } } +bool Optimizer_context_replay::infuse_records_in_range( + const TABLE *tbl, const KEY_PART_INFO *key_part, uint keynr, + const key_range *min_range, const key_range *max_range, ha_rows *records) +{ + if (!has_records() || !is_base_table(tbl->pos_in_table_list)) + return true; + + String min_key; + String max_key; + String tbl_name; + print_key_value(&min_key, key_part, min_range->key, min_range->length); + print_key_value(&max_key, key_part, min_range->key, min_range->length); + store_full_table_name(tbl->pos_in_table_list, &tbl_name); + + if (trace_table_context_read *tbl_ctx= + find_trace_read_context(tbl_name.c_ptr_safe())) + { + List_iterator rir_itr(tbl_ctx->rir_list); + while (trace_rir_context_read *rir_ctx= rir_itr++) + { + if (rir_ctx->keynr == keynr && + strcmp(rir_ctx->min_key, min_key.c_ptr_safe()) == 0 && + strcmp(rir_ctx->max_key, max_key.c_ptr_safe()) == 0) + { + *records= rir_ctx->records; + return false; + } + } + } + + String warn_msg; + warn_msg.append(tbl_name); + warn_msg.append(STRING_WITH_LEN(" with key_number:")); + warn_msg.append(keynr); + warn_msg.append(STRING_WITH_LEN(" with min_key:")); + warn_msg.append(min_key); + warn_msg.append(STRING_WITH_LEN(" with max_key:")); + warn_msg.append(max_key); + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_JSON_OPTIMIZER_REPLAY_CONTEXT_MATCH_FAILED, + ER_THD(thd, ER_JSON_OPTIMIZER_REPLAY_CONTEXT_MATCH_FAILED), + warn_msg.c_ptr_safe(), "list_records_in_range"); + return true; +} + /* @brief restore the saved stats for the tables, and indexes that were - earlier recorded using set_table_stats_from_context() + earlier recorded using infuse_table_stats() */ void Optimizer_context_replay::restore_modified_table_stats() { - List_iterator table_li(saved_tablestats_list); + List_iterator table_li(saved_table_stats); while (Saved_Table_stats *saved_ts= table_li++) { saved_ts->table->used_stat_records= saved_ts->original_rows; List_iterator index_li(saved_ts->saved_indexstats_list); while (Saved_Index_stats *saved_is= index_li++) { - saved_is->key_info->is_statistics_from_stat_tables= + KEY *key= saved_is->key_info; + key->is_statistics_from_stat_tables= saved_is->original_is_statistics_from_stat_tables; - saved_is->key_info->read_stats= saved_is->original_read_stats; + key->read_stats= saved_is->original_read_stats; } } } @@ -1483,10 +1681,10 @@ bool Optimizer_context_replay::parse() LEX_CSTRING varname= {var_name, strlen(var_name)}; Read_named_member array[]= { - {"current_database", Read_string(this->thd, &this->db_name), false}, + {"current_database", Read_string(thd, &db_name), false}, {"list_contexts", - Read_list_of_context( - this->thd, &this->ctx_list, parse_table_context), + Read_list_of_context(thd, &ctx_list, + parse_table_context), false}, {NULL, Read_double(NULL), true}}; @@ -1524,7 +1722,7 @@ bool Optimizer_context_replay::parse() goto err; } - if (read_all_elements(&je, array, &err_buf)) + if (json_read_object(&je, array, &err_buf)) goto err; #ifndef DBUG_OFF @@ -1548,8 +1746,8 @@ void Optimizer_context_replay::dbug_print_read_stats() { DBUG_ENTER("Optimizer_context_replay::print()"); DBUG_PRINT("info", ("----------Printing Stored Context-------------")); - DBUG_PRINT("info", ("current_database : %s", this->db_name)); - List_iterator table_itr(this->ctx_list); + DBUG_PRINT("info", ("current_database : %s", db_name)); + List_iterator table_itr(ctx_list); while (trace_table_context_read *tbl_ctx= table_itr++) { @@ -1656,15 +1854,12 @@ bool Optimizer_context_replay::infuse_table_rows(const TABLE *tbl, String tbl_name; store_full_table_name(tbl->pos_in_table_list, &tbl_name); - List_iterator table_itr(this->ctx_list); - while (trace_table_context_read *tbl_ctx= table_itr++) + if (trace_table_context_read *tbl_ctx= + find_trace_read_context(tbl_name.c_ptr_safe())) { - if (strcmp(tbl_name.c_ptr_safe(), tbl_ctx->name) == 0) - { - *rows= tbl_ctx->total_rows; - return false; - } + *rows= tbl_ctx->total_rows; + return false; } push_warning_printf( @@ -1689,19 +1884,16 @@ Optimizer_context_replay::get_index_rec_per_key_list(const TABLE *tbl, String tbl_name; store_full_table_name(tbl->pos_in_table_list, &tbl_name); - List_iterator table_itr(this->ctx_list); - while (trace_table_context_read *tbl_ctx= table_itr++) + if (trace_table_context_read *tbl_ctx= + find_trace_read_context(tbl_name.c_ptr_safe())) { - if (strcmp(tbl_name.c_ptr_safe(), tbl_ctx->name) == 0) + List_iterator index_itr(tbl_ctx->index_list); + while (trace_index_context_read *idx_ctx= index_itr++) { - List_iterator index_itr(tbl_ctx->index_list); - while (trace_index_context_read *idx_ctx= index_itr++) + if (strcmp(idx_name, idx_ctx->idx_name) == 0) { - if (strcmp(idx_name, idx_ctx->idx_name) == 0) - { - return &idx_ctx->list_rec_per_key; - } + return &idx_ctx->list_rec_per_key; } } } @@ -1733,20 +1925,17 @@ void Optimizer_context_replay::store_range_contexts( String tbl_name; store_full_table_name(tbl->pos_in_table_list, &tbl_name); - List_iterator table_itr(this->ctx_list); - while (trace_table_context_read *tbl_ctx= table_itr++) + if (trace_table_context_read *tbl_ctx= + find_trace_read_context(tbl_name.c_ptr_safe())) { - if (strcmp(tbl_name.c_ptr_safe(), tbl_ctx->name) == 0) + List_iterator range_ctx_itr( + tbl_ctx->ranges_list); + while (trace_range_context_read *range_ctx= range_ctx_itr++) { - List_iterator range_ctx_itr( - tbl_ctx->ranges_list); - while (trace_range_context_read *range_ctx= range_ctx_itr++) + if (strcmp(idx_name, range_ctx->index_name) == 0) { - if (strcmp(idx_name, range_ctx->index_name) == 0) - { - list->push_back(range_ctx); - } + list->push_back(range_ctx); } } } @@ -1764,3 +1953,16 @@ void Optimizer_context_replay::store_range_contexts( name.c_ptr_safe(), "list of range contexts"); } } + +trace_table_context_read * +Optimizer_context_replay::find_trace_read_context(const char *name) +{ + List_iterator table_itr(ctx_list); + + while (trace_table_context_read *tbl_ctx= table_itr++) + { + if (strcmp(name, tbl_ctx->name) == 0) + return tbl_ctx; + } + return nullptr; +} \ No newline at end of file diff --git a/sql/opt_store_replay_context.h b/sql/opt_store_replay_context.h index b9379c1585204..22987f5ea22cd 100644 --- a/sql/opt_store_replay_context.h +++ b/sql/opt_store_replay_context.h @@ -23,46 +23,26 @@ #include "table.h" #include "json_lib.h" -class SEL_ARG_RANGE_SEQ; - -class Range_list_recorder : public Sql_alloc -{ -public: - void add_range(MEM_ROOT *mem_root, const char *range); -}; +/*************************************************************************** + * Part 1: APIs for recording Optimizer Context. + ***************************************************************************/ +class SEL_ARG_RANGE_SEQ; +class Range_list_recorder; class trace_table_context; -bool store_tables_context_in_trace(THD *thd); - /* - This class is used to buffer the range stats for indexes, - while the range optimizer is in use, - so that entire tables/views contexts can be stored - at a single place in the trace. + Recorder is used to capture the environment during query optimization run. + When the optimization is finished, one can save the captured context + somewhere (currently, we write it into the Optimizer Trace) */ class Optimizer_context_recorder { -private: - /* - Hash of table contexts used for storing - all the ranges of indexes that are used - in the current query, into the trace. - full name of the table/view is used as the key. - */ - HASH tbl_trace_ctx_hash; - trace_table_context *get_table_context(MEM_ROOT *mem_root, - const TABLE_LIST *tbl); - public: Optimizer_context_recorder(); ~Optimizer_context_recorder(); - bool has_records(); - - trace_table_context *search(uchar *tbl_name, size_t tbl_name_len); - Range_list_recorder * start_range_list_record(MEM_ROOT *mem_root, TABLE_LIST *tbl, size_t found_records, const char *index_name, @@ -72,10 +52,35 @@ class Optimizer_context_recorder void record_cost_index_read(MEM_ROOT *mem_root, const TABLE_LIST *tbl, uint key, ha_rows records, bool eq_ref, const ALL_READ_COST *cost); + void record_records_in_range(MEM_ROOT *mem_root, const TABLE *tbl, + const KEY_PART_INFO *key_part, uint keynr, + const key_range *min_range, + const key_range *max_range, ha_rows records); + void record_const_table_row(MEM_ROOT *mem_root, TABLE *tbl); + + bool has_records(); + trace_table_context *search(uchar *tbl_name, size_t tbl_name_len); + +private: + /* + Hash table mapping "dbname.table_name" -> pointer to trace_table_context. + Contains records for all tables for which we have captured data. + */ + HASH tbl_trace_ctx_hash; + + trace_table_context *get_table_context(MEM_ROOT *mem_root, + const TABLE_LIST *tbl); static const uchar *get_tbl_trace_ctx_key(const void *entry_, size_t *length, my_bool flags); }; +/* Interface to record range lists */ +class Range_list_recorder : public Sql_alloc +{ +public: + void add_range(MEM_ROOT *mem_root, const char *range); +}; + /* Optionally create and get the statistics context recorder for this query */ Optimizer_context_recorder *get_opt_context_recorder(THD *thd); @@ -86,25 +91,70 @@ get_range_list_recorder(THD *thd, MEM_ROOT *mem_root, TABLE_LIST *tbl, Cost_estimate *cost, ha_rows max_index_blocks, ha_rows max_row_blocks); +/* Save the collected context in optimizer trace */ +bool store_tables_context_in_trace(THD *thd); + +/*************************************************************************** + * Part 2: APIs for loading previously saved Optimizer Context and replaying + * it: making the optimizer work as if the environment was like it has been + * at the time the context was recorded. + ***************************************************************************/ class trace_table_context_read; class trace_index_context_read; class trace_range_context_read; class trace_irc_context_read; +class trace_rir_context_read; + class Saved_Table_stats; /* This class stores the parsed optimizer context information and then infuses read stats into the optimizer + + Optimizer Context information that we've read from a JSON document. + + The optimizer can use infuse_XXX() methods to get the saved values. */ class Optimizer_context_replay { +public: + Optimizer_context_replay(THD *thd); + + /* Save table's statistics and replace it with data from the context. */ + void infuse_table_stats(TABLE *table); + /* Restore the saved statistics back (to be done at query end) */ + void restore_modified_table_stats(); + + /* + "Infusion" functions. + When the optimizer needs some data, for example to call index_read_cost(), + it will call infuse_index_read_cost() and get the value from the context. + */ + bool infuse_read_cost(const TABLE *tbl, IO_AND_CPU_COST *cost); + bool infuse_range_stats(TABLE *tbl, uint keynr, RANGE_SEQ_IF *seq_if, + SEL_ARG_RANGE_SEQ *seq, Cost_estimate *cost, + ha_rows *rows, ha_rows *max_index_blocks, + ha_rows *max_row_blocks); + bool infuse_index_read_cost(const TABLE *tbl, uint keynr, ha_rows records, + bool eq_ref, ALL_READ_COST *cost); + bool infuse_records_in_range(const TABLE *tbl, const KEY_PART_INFO *key_part, + uint keynr, const key_range *min_range, + const key_range *max_range, ha_rows *records); + private: THD *thd; - List saved_tablestats_list; + /* + Statistics that tables had before we've replaced them with values from + the saved context. To be used to restore the original values. + */ + List saved_table_stats; + + /* Current database recorded in the saved Optimizer Context */ char *db_name; + List ctx_list; - bool has_records(); bool parse(); + bool has_records(); #ifndef DBUG_OFF void dbug_print_read_stats(); #endif @@ -113,18 +163,7 @@ class Optimizer_context_replay void store_range_contexts(const TABLE *tbl, const char *idx_name, List *list); bool infuse_table_rows(const TABLE *tbl, ha_rows *rows); - -public: - Optimizer_context_replay(THD *thd); - bool infuse_read_cost(const TABLE *tbl, IO_AND_CPU_COST *cost); - bool infuse_range_stats(TABLE *tbl, uint keynr, RANGE_SEQ_IF *seq_if, - SEL_ARG_RANGE_SEQ *seq, Cost_estimate *cost, - ha_rows *rows, ha_rows *max_index_blocks, - ha_rows *max_row_blocks); - bool infuse_index_read_cost(const TABLE *tbl, uint keynr, ha_rows records, - bool eq_ref, ALL_READ_COST *cost); - void infuse_table_stats(TABLE *table); - void restore_modified_table_stats(); + trace_table_context_read *find_trace_read_context(const char *name); }; #endif diff --git a/sql/sql_json_lib.cc b/sql/sql_json_lib.cc index 7ccac45c5fcc1..952d7eed0e806 100644 --- a/sql/sql_json_lib.cc +++ b/sql/sql_json_lib.cc @@ -16,6 +16,9 @@ #include "mysql.h" #include "sql_select.h" +namespace json_reader +{ + /* check if the given read_elem_key can be read from the json_engine. if not, fill the err_buf with an error message @@ -148,23 +151,38 @@ bool read_ha_rows_and_check_limit(json_engine_t *je, const char *read_elem_key, return false; } +}; // namespace json_reader + /* - function to read all the registered members in Read_named_member array - from json, and check if the value was assigned to them or not. - If any of the mandatory fields are not assigned a value, then the - function returns an error. + @brief + Read a JSON object. The members to read are described in *members array. + + @detail + members is an array terminated by a member with (char*)NULL as a name. + Each element describes the element name, type and location where to store + the read value. See class Read_named_member for details. + + In the JSON document, object members can come in any order. Non-mandatory + members may be absent. + + If JSON document has a member which was not requested to read, + then we just skip it and not report any error. + + @seealso + class Read_named_member + @return 0 OK 1 An Error occured */ -int read_all_elements(json_engine_t *je, Read_named_member *arr, - String *err_buf) +int json_read_object(json_engine_t *je, Read_named_member *members, + String *err_buf) { int rc; while (!(rc= json_scan_next(je)) && je->state != JST_OBJ_END) { Json_saved_parser_state save1(je); - for (Read_named_member *memb= arr; memb->name; memb++) + for (Read_named_member *memb= members; memb->name; memb++) { Json_string js_name(memb->name); if (json_key_matches(je, js_name.get())) @@ -179,7 +197,7 @@ int read_all_elements(json_engine_t *je, Read_named_member *arr, } /* Check if all members got values */ - for (Read_named_member *memb= arr; memb->name; memb++) + for (Read_named_member *memb= members; memb->name; memb++) { if (!memb->is_optional && !memb->value_assigned) { diff --git a/sql/sql_json_lib.h b/sql/sql_json_lib.h index f098c0767af6e..2c8a7c1c2446a 100644 --- a/sql/sql_json_lib.h +++ b/sql/sql_json_lib.h @@ -84,17 +84,58 @@ bool json_unescape_to_string(const char *val, int val_len, String *out); */ int json_escape_to_string(const String *str, String *out); -bool read_ha_rows_and_check_limit(json_engine_t *je, const char *read_elem_key, - String *err_buf, ha_rows &value, - ha_rows LIMIT_VAL, - const char *limit_val_type, - bool unescape_required); +namespace json_reader +{ +class Read_value; +}; + +/* + Description of a JSON object member that is to be read by json_read_object(). + Intended usage: + + char *var1; + int var2; + Read_named_member memb[]= { + {"member1", Read_string(thd->mem_root, &var1), false}, + {"member2", Read_double(&var2), false}, + {NULL, Read_double(NULL), true} + }; + json_read_object(je, memb, err); +*/ + +class Read_named_member +{ +public: + const char *name; /* JSON object name */ + /* + Reader object holding the datatype and place for the value. + It's an lvalue reference so we can have both inherited classes + and refer to unnamed objects on the stack. + */ + json_reader::Read_value &&value; + + const bool is_optional; /* Can this member be omitted in JSON? */ + + bool value_assigned= false; /* Filled and checked by json_read_object() */ +}; + +/* Read an object from JSON according to description in *members */ +int json_read_object(json_engine_t *je, Read_named_member *members, + String *err_buf); + +namespace json_reader +{ /* Things to use with Read_named_member */ bool read_string(THD *thd, json_engine_t *je, const char *read_elem_key, String *err_buf, char *&value); bool read_double(json_engine_t *je, const char *read_elem_key, String *err_buf, double &value); +bool read_ha_rows_and_check_limit(json_engine_t *je, const char *read_elem_key, + String *err_buf, ha_rows &value, + ha_rows LIMIT_VAL, + const char *limit_val_type, + bool unescape_required); /* Interface to read a value with value_name from Json, @@ -103,20 +144,20 @@ bool read_double(json_engine_t *je, const char *read_elem_key, String *err_buf, class Read_value { public: - virtual int read_value(json_engine_t *je, const char *value_name, - String *err_buf)= 0; - virtual ~Read_value(){}; + virtual bool read_value(json_engine_t *je, const char *value_name, + String *err_buf)= 0; + virtual ~Read_value() {}; }; class Read_string : public Read_value { char **ptr; - THD *thd; + THD *thd; /* The string will be allocated on thd->mem_root */ public: Read_string(THD *thd_arg, char **ptr_arg) : ptr(ptr_arg), thd(thd_arg) {} - int read_value(json_engine_t *je, const char *value_name, - String *err_buf) override + bool read_value(json_engine_t *je, const char *value_name, + String *err_buf) override { return read_string(thd, je, value_name, err_buf, *ptr); } @@ -128,8 +169,8 @@ class Read_double : public Read_value public: Read_double(double *ptr_arg) : ptr(ptr_arg) {} - int read_value(json_engine_t *je, const char *value_name, - String *err_buf) override + bool read_value(json_engine_t *je, const char *value_name, + String *err_buf) override { return read_double(je, value_name, err_buf, *ptr); } @@ -142,8 +183,8 @@ class Read_non_neg_integer : public Read_value public: Read_non_neg_integer(T *ptr_arg) : ptr(ptr_arg) {} - int read_value(json_engine_t *je, const char *value_name, - String *err_buf) override + bool read_value(json_engine_t *je, const char *value_name, + String *err_buf) override { ha_rows temp_val; const char *type= ""; @@ -189,22 +230,6 @@ class Read_non_neg_integer : public Read_value } }; -/* - A place holder to keep track of the field, and its corresponding - Read_value class to be used for fetching the field value from Json. - It tracks whether the value was successfully read from the Json or not. - */ -class Read_named_member -{ -public: - const char *name; - Read_value &&value; - const bool is_optional; - - bool value_assigned= false; -}; - -int read_all_elements(json_engine_t *je, Read_named_member *arr, - String *err_buf); +}; /* namespace json_reader */ #endif diff --git a/sql/sql_select.cc b/sql/sql_select.cc index dc2ad27d393a9..bafcdcbe88c2c 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -25149,6 +25149,11 @@ join_read_const(JOIN_TAB *tab) return -1; } store_record(table,record[1]); + if (Optimizer_context_recorder *recorder= + get_opt_context_recorder(tab->join->thd)) + { + recorder->record_const_table_row(tab->join->thd->mem_root, table); + } } else if (!(table->status & ~STATUS_NULL_ROW)) // Only happens with left join {