diff --git a/include/handler_ername.h b/include/handler_ername.h index 74a07c8707826..4ba0b07a5f498 100644 --- a/include/handler_ername.h +++ b/include/handler_ername.h @@ -78,6 +78,7 @@ { "HA_ERR_ABORTED_BY_USER", HA_ERR_ABORTED_BY_USER, "" }, { "HA_ERR_DISK_FULL", HA_ERR_DISK_FULL, "" }, { "HA_ERR_INCOMPATIBLE_DEFINITION", HA_ERR_INCOMPATIBLE_DEFINITION, "" }, +{ "HA_ERR_FK_DEPTH_EXCEEDED", HA_ERR_FK_DEPTH_EXCEEDED, "" }, { "HA_ERR_COMMIT_ERROR", HA_ERR_COMMIT_ERROR, "" }, { "HA_ERR_PARTITION_LIST", HA_ERR_PARTITION_LIST, ""}, { "HA_ERR_NO_ENCRYPTION", HA_ERR_NO_ENCRYPTION, ""}, diff --git a/include/my_base.h b/include/my_base.h index 8fdd1397ff09d..0a85444de31fb 100644 --- a/include/my_base.h +++ b/include/my_base.h @@ -557,7 +557,8 @@ enum ha_base_keytype { #define HA_ERR_ROLLBACK 200 /* Automatic rollback done */ #define HA_ERR_LOCAL_TMP_SPACE_FULL 201 #define HA_ERR_GLOBAL_TMP_SPACE_FULL 202 -#define HA_ERR_LAST 202 /* Copy of last error nr * */ +#define HA_ERR_CASCADE_SQL 203 /* Error happened in cascade sql action */ +#define HA_ERR_LAST 203 /* Copy of last error nr * */ /* Number of different errors */ #define HA_ERR_ERRORS (HA_ERR_LAST - HA_ERR_FIRST + 1) diff --git a/include/my_handler_errors.h b/include/my_handler_errors.h index c98e8cbafd739..3d4f959ffe934 100644 --- a/include/my_handler_errors.h +++ b/include/my_handler_errors.h @@ -114,7 +114,8 @@ static const char *handler_error_messages[]= "Transaction was aborted", /* HA_ERR_LOCAL_TMP_SPACE_FULL=201 */ "Local temporary space limit reached", - "Global temporary space limit reached" + "Global temporary space limit reached", + "Error in cascade SQL action", }; #endif /* MYSYS_MY_HANDLER_ERRORS_INCLUDED */ diff --git a/mysql-test/main/alter_table_online.result b/mysql-test/main/alter_table_online.result index 45351daea0fea..c7b1df0ed23cb 100644 --- a/mysql-test/main/alter_table_online.result +++ b/mysql-test/main/alter_table_online.result @@ -91,14 +91,12 @@ references t1 (a) on update cascade) engine=InnoDB; insert into t2 values (1),(2),(3); alter table t2 add c int, algorithm=copy, lock=none; -ERROR 0A000: LOCK=NONE is not supported. Reason: ON UPDATE CASCADE. Try LOCK=SHARED -alter table t2 add c int, algorithm=inplace, lock=none; +alter table t2 add d int, algorithm=inplace, lock=none; create or replace table t2 (b int, foreign key (b) references t1 (a) on delete set null) engine=InnoDB; alter table t2 add c int, algorithm=copy, lock=none; -ERROR 0A000: LOCK=NONE is not supported. Reason: ON DELETE SET NULL. Try LOCK=SHARED -alter table t2 add c int, algorithm=inplace, lock=none; +alter table t2 add d int, algorithm=inplace, lock=none; create or replace table t2 (b int, foreign key (b) references t1 (a) on delete no action) engine=InnoDB; @@ -116,8 +114,7 @@ create table t2 (a int references t1 (a), b int references t1 (b) on update cascade) engine=InnoDB; insert into t2 values (1, 1),(2, 2); alter table t2 add c int, algorithm=copy, lock=none; -ERROR 0A000: LOCK=NONE is not supported. Reason: ON UPDATE CASCADE. Try LOCK=SHARED -alter table t2 add c int, algorithm=copy; +alter table t2 add e int, algorithm=copy; alter table t2 add d int, algorithm=inplace; drop table t2, t1; # diff --git a/mysql-test/main/alter_table_online.test b/mysql-test/main/alter_table_online.test index 6416a3e79d1ce..7d5f89bccb914 100644 --- a/mysql-test/main/alter_table_online.test +++ b/mysql-test/main/alter_table_online.test @@ -70,6 +70,8 @@ DROP TABLE t; --echo # --echo # MDEV-29068 Cascade foreign key updates do not apply in online alter +--echo # per MDEV-31942 Online alter: support cascade foreign keys +--echo # The limitation is lifted --echo # create table t1 (a int primary key) engine=InnoDB; insert into t1 values (1),(2),(3); @@ -78,17 +80,16 @@ create table t2 (b int, foreign key (b) on update cascade) engine=InnoDB; insert into t2 values (1),(2),(3); ---error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON + alter table t2 add c int, algorithm=copy, lock=none; -alter table t2 add c int, algorithm=inplace, lock=none; +alter table t2 add d int, algorithm=inplace, lock=none; create or replace table t2 (b int, foreign key (b) references t1 (a) on delete set null) engine=InnoDB; ---error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON alter table t2 add c int, algorithm=copy, lock=none; -alter table t2 add c int, algorithm=inplace, lock=none; +alter table t2 add d int, algorithm=inplace, lock=none; create or replace table t2 (b int, foreign key (b) references t1 (a) @@ -111,9 +112,8 @@ create table t2 (a int references t1 (a), b int references t1 (b) on update cascade) engine=InnoDB; insert into t2 values (1, 1),(2, 2); ---error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON alter table t2 add c int, algorithm=copy, lock=none; -alter table t2 add c int, algorithm=copy; +alter table t2 add e int, algorithm=copy; alter table t2 add d int, algorithm=inplace; # Cleanup drop table t2, t1; diff --git a/mysql-test/main/alter_table_online_debug.result b/mysql-test/main/alter_table_online_debug.result index 537556e2e2db9..c4ecb85815ce7 100644 --- a/mysql-test/main/alter_table_online_debug.result +++ b/mysql-test/main/alter_table_online_debug.result @@ -1830,8 +1830,93 @@ alter table t add index (a), algorithm=copy, lock=none; connection default; drop table t; set global default_storage_engine= MyISAM; -disconnect con1; -disconnect con2; # # End of 11.2 tests # +# MDEV-12302: Execute triggers for foreign key updates/deletes +# This unblocks online alter table for cascade FK +create table t1 (a int primary key) engine=InnoDB; +insert into t1 values (1),(2),(3); +create table t2 (b int unique references t1 (a) on delete cascade) engine=InnoDB; +insert into t2 values (1),(2),(3); +set debug_sync= 'alter_table_copy_end SIGNAL copy_end WAIT_FOR proceed'; +alter table t2 add c int default (b+1), algorithm=copy, lock=none; +connection con1; +set debug_sync='now WAIT_FOR copy_end'; +delete from t1 where a = 1; +set debug_sync='now SIGNAL proceed'; +connection default; +select * from t2; +b c +2 3 +3 4 +create or replace table t2 (b int, foreign key (b) +references t1 (a) +on delete set null) engine=InnoDB; +insert into t2 values (2),(3); +set debug_sync= 'alter_table_copy_end SIGNAL copy_end WAIT_FOR proceed'; +alter table t2 add c int default (b+1), algorithm=copy, lock=none; +connection con1; +set debug_sync='now WAIT_FOR copy_end'; +delete from t1 where a = 2; +set debug_sync='now SIGNAL proceed'; +connection default; +select * from t2; +b c +NULL NULL +3 4 +select * from t1; +a +3 +drop table t2, t1; +create table t1 (a int primary key, b int unique) engine=InnoDB; +insert into t1 values (1, 1),(2, 2),(3, 3); +create table t2 (a int references t1 (a) on update cascade, +b int references t1 (b) on update cascade) engine=InnoDB; +insert into t2 values (1, 1),(2, 2),(3, 3); +set debug_sync= 'alter_table_copy_end SIGNAL copy_end WAIT_FOR proceed'; +alter table t2 add c int default (b+1), algorithm=copy, lock=none; +connection con1; +set debug_sync='now WAIT_FOR copy_end'; +update t1 set a = 5, b = 6 where a = 2; +set debug_sync='now SIGNAL proceed'; +connection default; +select * from t2; +a b c +1 1 2 +5 6 7 +3 3 4 +select * from t1; +a b +1 1 +3 3 +5 6 +drop table t2, t1; +create table t1 (a int primary key, b int unique) engine=InnoDB; +insert into t1 values (1, 1),(2, 2),(3, 3); +create table t2 (a int references t1 (a) on update cascade, +b int references t1 (b) on update set null) engine=InnoDB; +insert into t2 values (1, 1),(2, 2),(3, 3); +set debug_sync= 'alter_table_copy_end SIGNAL copy_end WAIT_FOR proceed'; +alter table t2 add c int default (b+1), algorithm=copy, lock=none; +connection con1; +set debug_sync='now WAIT_FOR copy_end'; +update t1 set a=5, b = 6 where a = 2; +set debug_sync='now SIGNAL proceed'; +connection default; +select * from t2; +a b c +1 1 2 +5 NULL NULL +3 3 4 +select * from t1; +a b +1 1 +3 3 +5 6 +drop table t2, t1; +# +# End of 11.3 tests +# +disconnect con1; +disconnect con2; diff --git a/mysql-test/main/alter_table_online_debug.test b/mysql-test/main/alter_table_online_debug.test index 1271e0f13e35f..6b0a6ac24d955 100644 --- a/mysql-test/main/alter_table_online_debug.test +++ b/mysql-test/main/alter_table_online_debug.test @@ -2103,8 +2103,95 @@ drop table t; eval set global default_storage_engine= $default_storage_engine; ---disconnect con1 ---disconnect con2 + --echo # --echo # End of 11.2 tests --echo # + + +--echo # MDEV-31942 Online alter: support cascade foreign keys +create table t1 (a int primary key) engine=InnoDB; +insert into t1 values (1),(2),(3); + +create table t2 (b int unique references t1 (a) on delete cascade) engine=InnoDB; +insert into t2 values (1),(2),(3); + + +set debug_sync= 'alter_table_copy_end SIGNAL copy_end WAIT_FOR proceed'; +send alter table t2 add c int default (b+1), algorithm=copy, lock=none; +connection con1; + +set debug_sync='now WAIT_FOR copy_end'; +delete from t1 where a = 1; +set debug_sync='now SIGNAL proceed'; +connection default; +reap; +select * from t2; + +create or replace table t2 (b int, foreign key (b) + references t1 (a) + on delete set null) engine=InnoDB; +insert into t2 values (2),(3); + +set debug_sync= 'alter_table_copy_end SIGNAL copy_end WAIT_FOR proceed'; +send alter table t2 add c int default (b+1), algorithm=copy, lock=none; + +connection con1; +set debug_sync='now WAIT_FOR copy_end'; +delete from t1 where a = 2; +set debug_sync='now SIGNAL proceed'; +connection default; +reap; +select * from t2; +select * from t1; + +drop table t2, t1; + +create table t1 (a int primary key, b int unique) engine=InnoDB; +insert into t1 values (1, 1),(2, 2),(3, 3); +create table t2 (a int references t1 (a) on update cascade, + b int references t1 (b) on update cascade) engine=InnoDB; +insert into t2 values (1, 1),(2, 2),(3, 3); + +set debug_sync= 'alter_table_copy_end SIGNAL copy_end WAIT_FOR proceed'; +send alter table t2 add c int default (b+1), algorithm=copy, lock=none; + +connection con1; +set debug_sync='now WAIT_FOR copy_end'; +update t1 set a = 5, b = 6 where a = 2; +set debug_sync='now SIGNAL proceed'; +connection default; +reap; +select * from t2; +select * from t1; + +drop table t2, t1; + +create table t1 (a int primary key, b int unique) engine=InnoDB; +insert into t1 values (1, 1),(2, 2),(3, 3); +create table t2 (a int references t1 (a) on update cascade, + b int references t1 (b) on update set null) engine=InnoDB; +insert into t2 values (1, 1),(2, 2),(3, 3); + +set debug_sync= 'alter_table_copy_end SIGNAL copy_end WAIT_FOR proceed'; +send alter table t2 add c int default (b+1), algorithm=copy, lock=none; + +connection con1; +set debug_sync='now WAIT_FOR copy_end'; +update t1 set a=5, b = 6 where a = 2; +set debug_sync='now SIGNAL proceed'; +connection default; +reap; +select * from t2; +select * from t1; + +# Cleanup +drop table t2, t1; + +--echo # +--echo # End of 11.3 tests +--echo # + + +--disconnect con1 +--disconnect con2 diff --git a/mysql-test/suite/innodb/r/foreign_key.result b/mysql-test/suite/innodb/r/foreign_key.result index d98da3242f856..5b7d6d7c19b0a 100644 --- a/mysql-test/suite/innodb/r/foreign_key.result +++ b/mysql-test/suite/innodb/r/foreign_key.result @@ -707,9 +707,9 @@ connection default; DELETE IGNORE FROM t1 WHERE b = 1; Warnings: Warning 152 InnoDB: Cannot delete/update rows with cascading foreign key constraints that exceed max depth of 15. Please drop extra constraints and try again -Warning 1296 Got error 193 '`test`.`t1`, CONSTRAINT `1` FOREIGN KEY (`a`) REFERENCES `t1` (`b`) ON DELETE CASCADE' from InnoDB +Warning 1030 Got error 193 "Foreign key cascade delete/update exceeds max depth" from storage engine InnoDB Warning 152 InnoDB: Cannot delete/update rows with cascading foreign key constraints that exceed max depth of 15. Please drop extra constraints and try again -Warning 1296 Got error 193 '`test`.`t1`, CONSTRAINT `1` FOREIGN KEY (`a`) REFERENCES `t1` (`b`) ON DELETE CASCADE' from InnoDB +Warning 1030 Got error 193 "Foreign key cascade delete/update exceeds max depth" from storage engine InnoDB SELECT a FROM t1 FORCE INDEX(a); a 0 @@ -872,11 +872,11 @@ INSERT INTO t1 (a,b) VALUES (0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), (0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,1),(1,0); DELETE FROM t1 WHERE b = 1; -ERROR HY000: Got error 193 '`test`.`t1`, CONSTRAINT `1` FOREIGN KEY (`a`) REFERENCES `t1` (`b`) ON DELETE CASCADE' from InnoDB +ERROR HY000: Got error 193 "Foreign key cascade delete/update exceeds max depth" from storage engine InnoDB SHOW WARNINGS; Level Code Message Warning 152 InnoDB: Cannot delete/update rows with cascading foreign key constraints that exceed max depth of 15. Please drop extra constraints and try again -Error 1296 Got error 193 '`test`.`t1`, CONSTRAINT `1` FOREIGN KEY (`a`) REFERENCES `t1` (`b`) ON DELETE CASCADE' from InnoDB +Error 1030 Got error 193 "Foreign key cascade delete/update exceeds max depth" from storage engine InnoDB DROP TABLE t1; FOUND 1 /InnoDB: Cannot delete/update rows with cascading foreign key constraints that exceed max depth of 15.*/ in mysqld.1.err # End of 10.2 tests diff --git a/mysql-test/suite/innodb/r/trigger_cascade.result b/mysql-test/suite/innodb/r/trigger_cascade.result new file mode 100644 index 0000000000000..92c47d9a709e6 --- /dev/null +++ b/mysql-test/suite/innodb/r/trigger_cascade.result @@ -0,0 +1,464 @@ +# +# After update +# +create table t1 ( +x int primary key +) engine=innodb; +create table t2 ( +x int primary key, +y int, +foreign key (x) references t1(x) on update cascade +) engine=innodb; +create table t3 ( +old_x int, +new_x int +) engine=innodb; +create trigger tr after update on t2 +for each row +insert into t3 (old_x, new_x) values (old.x, new.x); +insert into t1 (x) values (1); +insert into t2 (x, y) values (1, 20); +update t1 set x = 2; +select * from t3; +old_x new_x +1 2 +update t2 set y = 50; +select * from t3; +old_x new_x +1 2 +2 2 +select * from t1; +x +2 +select * from t2; +x y +2 50 +drop table t3; +drop table t2; +drop table t1; +# +# After update with counter +# +create table t1(x int primary key) engine=innodb; +create table t2(x int primary key, +foreign key(x) references t1(x) on update cascade) engine=innodb; +create trigger tr_t2 after update on t2 +for each row set @counter=@counter+1; +insert into t1 values (1); +insert into t2 values (1); +set @counter=0; +update t1 set x = 2; +select * from t1; +x +2 +select * from t2; +x +2 +select @counter; +@counter +1 +drop table t2; +drop table t1; +# +# Before update +# +create table t1 ( +x int primary key +) engine=innodb; +create table t2 ( +x int primary key, +y int, +foreign key(x) references t1(x) on update cascade +) engine=innodb; +create trigger tr before update on t2 +for each row set new.y = 50; +insert into t1 (x) values (1); +insert into t2 (x, y) values (1,2); +update t1 set x = 2; +select * from t1; +x +2 +select * from t2; +x y +2 50 +drop table t2; +drop table t1; +# +# Update, foreign key on secondary index +# +create table t1(x int auto_increment primary key, y int, unique key(y)) engine=innodb; +create table t2(a int primary key, x int, z varchar(20), +foreign key(x) references t1(y) on update cascade) engine=innodb; +create table t3(id int auto_increment primary key, action_x int, action_z varchar(20), note varchar(50)) engine=innodb; +create trigger tr_t2 before update on t2 +for each row +insert into t3(action_x, action_z, note) values (old.x, old.z, 'update t2 — old'), +(new.x, new.z, 'update t2 — new'); +insert into t1 values (1, 10); +insert into t2 values (1, 10, 'str'); +update t1 set y=20 where x=1; +select * from t1; +x y +1 20 +select * from t2; +a x z +1 20 str +select * from t3; +id action_x action_z note +1 10 str update t2 — old +2 20 str update t2 — new +drop table t3; +drop table t2; +drop table t1; +# +# Update, on update set null cascade +# +create table t1 ( +id int primary key, +value varchar(50) +) engine=innodb; +create table t2 ( +id int primary key, +t1_id int, +t2_value varchar(50), +foreign key (t1_id) references t1(id) on update set null +) engine=innodb; +create trigger tr_t2_before_update before update on t2 +for each row set new.t2_value = 'updated by trigger'; +insert into t1 (id, value) values (1, 'parent_row'); +insert into t2 (id, t1_id, t2_value) values (1, 1, 'child_row'); +select * from t1; +id value +1 parent_row +select * from t2; +id t1_id t2_value +1 1 child_row +update t1 set id = 2 where id = 1; +select * from t1; +id value +2 parent_row +select * from t2; +id t1_id t2_value +1 NULL updated by trigger +drop table t2; +drop table t1; +# +# Before update, indexes on virtual columns +# +create table t1(x int auto_increment primary key, y int, unique key(y)) engine=innodb; +create table t2(a int primary key, x int, +t int, +z varchar(20) as (concat("test", t)), +y varchar(20) as (concat("test2", t)), +y_another varchar(20) as (concat("test3", x)), +unique(z), +unique(y), +unique(y_another), +foreign key(x) references t1(y) on update cascade) engine=innodb; +create trigger tr before update on t2 +for each row set new.t = 50; +insert into t1 (x,y) values (1,1); +insert into t2 (a,x,t) values (1,1,1); +select * from t2; +a x t z y y_another +1 1 1 test1 test21 test31 +update t1 set y = 2; +select * from t2 force index (z) where z = 'test50'; +a x t z y y_another +1 2 50 test50 test250 test32 +select * from t2 force index (y) where y = 'test250'; +a x t z y y_another +1 2 50 test50 test250 test32 +select * from t2 force index (y_another) where y_another = 'test32'; +a x t z y y_another +1 2 50 test50 test250 test32 +select * from t2; +a x t z y y_another +1 2 50 test50 test250 test32 +drop table t2; +drop table t1; +# +# Before delete +# +create table t1 ( +x int primary key +) engine=innodb; +create table t2 ( +x int primary key, +y int, +foreign key(x) references t1(x) on delete cascade +) engine=innodb; +create trigger tr before delete on t2 +for each row set @deleted_value = old.y; +insert into t1 (x) values (1); +insert into t2 (x, y) values (1, 100); +delete from t1 where x = 1; +select @deleted_value as deleted_value; +deleted_value +100 +select * from t2; +x y +drop table t2; +drop table t1; +# +# After, before delete +# +create table t1 ( +x int primary key +) engine=innodb; +create table t2 ( +x int primary key, +y int, +foreign key(x) references t1(x) on delete cascade +) engine=innodb; +create table t3 ( +id int auto_increment primary key, +log_event varchar(255) +) engine=innodb; +create trigger tr_before before delete on t2 +for each row +set @deleted_value = old.y; +create trigger tr_after after delete on t2 +for each row +insert into t3 (log_event) values ('after delete trigger executed'); +insert into t1 (x) values (1), (2); +insert into t2 (x, y) values (1, 100), (2, 200); +delete from t1 where x = 1; +select @deleted_value as before_deleted_value; +before_deleted_value +100 +select * from t3; +id log_event +1 after delete trigger executed +select * from t2; +x y +2 200 +select * from t1; +x +2 +drop table t3; +drop table t2; +drop table t1; +# +# On delete set null +# +create table t1 ( +id int primary key, +value varchar(50) +) engine=innodb; +create table t2 ( +id int primary key, +t1_id int, +t2_value varchar(50), +foreign key (t1_id) references t1(id) on delete set null +) engine=innodb; +create trigger tr_t2_before_update before update on t2 +for each row set new.t2_value = 'updated by trigger'; +insert into t1 (id, value) values (1, 'parent_row'); +insert into t2 (id, t1_id, t2_value) values (1, 1, 'child_row'); +select * from t1; +id value +1 parent_row +select * from t2; +id t1_id t2_value +1 1 child_row +delete from t1 where id = 1; +select * from t1; +id value +select * from t2; +id t1_id t2_value +1 NULL updated by trigger +drop table t2; +drop table t1; +# +# Cascade chain +# +create table t1 ( +id int primary key +) engine=innodb; +create table t2 ( +id int primary key, +foreign key (id) references t1(id) on update cascade +) engine=innodb; +create table t3 ( +id int primary key, +foreign key (id) references t2(id) on update cascade +) engine=innodb; +create table update_log ( +log_id int auto_increment primary key, +table_name varchar(50), +old_value int, +new_value int +) engine=innodb; +create trigger tr_t2_after_update after update on t2 +for each row +insert into update_log(table_name, old_value, new_value) +values ('t2', old.id, new.id); +create trigger tr_t3_after_update after update on t3 +for each row +insert into update_log(table_name, old_value, new_value) +values ('t3', old.id, new.id); +insert into t1 (id) values (1); +insert into t2 (id) values (1); +insert into t3 (id) values (1); +update t1 set id = 2; +select * from t1; +id +2 +select * from t2; +id +2 +select * from t3; +id +2 +select * from update_log; +log_id table_name old_value new_value +1 t3 1 2 +2 t2 1 2 +drop table update_log; +drop table t3; +drop table t2; +drop table t1; +# +# With bit fields +# +create table t1 ( +id bit(8) primary key, +flag bit(1) not null default b'0' +) engine=innodb; +create table t2 ( +id bit(8) primary key, +t1_id bit(8), +value int, +foreign key (t1_id) references t1(id) on update cascade +) engine=innodb; +create table trigger_log ( +log_id int auto_increment primary key, +log_message varchar(255) +) engine=innodb; +create trigger tr_t2_after_update after update on t2 +for each row +insert into trigger_log(log_message) +values (concat('t2 updated: ', hex(old.t1_id), ' -> ', hex(new.t1_id))); +insert into t1 (id, flag) values (b'00000001', b'1'); +insert into t2 (id, t1_id, value) values (b'00000001', b'00000001', 100); +update t1 set id = b'00000010' where id = b'00000001'; +select bin(id), bin(t1_id), value from t2; +bin(id) bin(t1_id) value +1 10 100 +select * from trigger_log; +log_id log_message +1 t2 updated: 1 -> 2 +drop table trigger_log; +drop table t2; +drop table t1; +# +# With blobs +# +create table t1 ( +id int primary key, +blob_data blob +) engine=innodb; +create table t2 ( +id int primary key, +t1_id int, +copied_blob_data blob, +foreign key (t1_id) references t1(id) on delete cascade on update cascade +) engine=innodb; +create trigger tr_t2_before_update +before update on t2 +for each row +set new.copied_blob_data = concat(old.copied_blob_data, repeat('-updated', 1250)); +insert into t1 (id, blob_data) values (1, repeat('a', 1024)); +insert into t2 (id, t1_id, copied_blob_data) values (1, 1, repeat('b', 1024)); +select * from t2; +id t1_id copied_blob_data +1 1 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb +update t1 set id = 2 where id = 1; +select * from t2; +id t1_id copied_blob_data +1 2 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated-updated +drop table t2; +drop table t1; +# +# With blobs error too long +# +create table t1 ( +id int primary key, +blob_data blob +) engine=innodb; +create table t2 ( +id int primary key, +t1_id int, +copied_blob_data blob, +foreign key (t1_id) references t1(id) on delete cascade on update cascade +) engine=innodb; +create trigger tr_t2_before_update +before update on t2 +for each row +set new.copied_blob_data = concat(old.copied_blob_data, repeat('-updated', 12500)); +insert into t1 (id, blob_data) values (1, repeat('a', 1024)); +insert into t2 (id, t1_id, copied_blob_data) values (1, 1, repeat('b', 1024)); +select * from t2; +id t1_id copied_blob_data +1 1 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb +update t1 set id = 2 where id = 1; +ERROR 22001: Data too long for column 'copied_blob_data' at row 0 +drop table t2; +drop table t1; +# max depth is incrementally limited for cascade actions inside triggers +create table t2( +id int primary key, +pid int, +index(pid), +foreign key(pid) references t2(id) on delete cascade) engine=innodb; +insert into t2 values(0,0),(1,0),(2,1),(3,2),(4,3),(5,4),(6,5),(7,6), +(8,7),(9,8),(10,9),(11,10),(12,11),(13,12),(14,13); +create table t1( +id int primary key, +pid int, +index(pid), +foreign key(pid) references t1(id) on delete cascade) engine=innodb; +create trigger trg before delete on t1 for each row +delete from t2 where id = 0 and old.id=14; +insert into t1 values(0,0),(1,0),(2,1),(3,2),(4,3),(5,4),(6,5),(7,6), +(8,7),(9,8),(10,9),(11,10),(12,11),(13,12),(14,13); +delete from t1 where id=0; +ERROR HY000: Got error 193 "Foreign key cascade delete/update exceeds max depth" from storage engine InnoDB +select * from t2; +id pid +0 0 +1 0 +2 1 +3 2 +4 3 +5 4 +6 5 +7 6 +8 7 +9 8 +10 9 +11 10 +12 11 +13 12 +14 13 +select * from t1; +id pid +0 0 +1 0 +2 1 +3 2 +4 3 +5 4 +6 5 +7 6 +8 7 +9 8 +10 9 +11 10 +12 11 +13 12 +14 13 +drop table t1; +drop table t2; +call mtr.add_suppression("InnoDB: Cannot delete/update rows with cascading foreign key constraints that exceed max depth of 15\\."); diff --git a/mysql-test/suite/innodb/t/foreign_key.test b/mysql-test/suite/innodb/t/foreign_key.test index 6ce3936369036..4b9f83fc2c28e 100644 --- a/mysql-test/suite/innodb/t/foreign_key.test +++ b/mysql-test/suite/innodb/t/foreign_key.test @@ -878,7 +878,7 @@ INSERT INTO t1 (a,b) VALUES (0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,0), (0,0),(0,0),(0,0),(0,0),(0,0),(0,0),(0,1),(1,0); ---error ER_GET_ERRMSG +--error ER_GET_ERRNO DELETE FROM t1 WHERE b = 1; SHOW WARNINGS; DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/innodb.test b/mysql-test/suite/innodb/t/innodb.test index ca0eb06d6133a..112ab529f98b3 100644 --- a/mysql-test/suite/innodb/t/innodb.test +++ b/mysql-test/suite/innodb/t/innodb.test @@ -1064,7 +1064,7 @@ create table t1( foreign key(pid) references t1(id) on delete cascade) engine=innodb; insert into t1 values(0,0),(1,0),(2,1),(3,2),(4,3),(5,4),(6,5),(7,6), (8,7),(9,8),(10,9),(11,10),(12,11),(13,12),(14,13),(15,14); ---error ER_GET_ERRMSG,ER_ROW_IS_REFERENCED_2 +--error ER_GET_ERRNO,ER_ROW_IS_REFERENCED_2 delete from t1 where id=0; delete from t1 where id=15; delete from t1 where id=0; diff --git a/mysql-test/suite/innodb/t/trigger_cascade.test b/mysql-test/suite/innodb/t/trigger_cascade.test new file mode 100644 index 0000000000000..2c984cdc64de4 --- /dev/null +++ b/mysql-test/suite/innodb/t/trigger_cascade.test @@ -0,0 +1,480 @@ +--source include/have_innodb.inc + +--echo # +--echo # After update +--echo # + +create table t1 ( + x int primary key +) engine=innodb; + +create table t2 ( + x int primary key, + y int, + foreign key (x) references t1(x) on update cascade +) engine=innodb; + +create table t3 ( + old_x int, + new_x int +) engine=innodb; + +create trigger tr after update on t2 + for each row + insert into t3 (old_x, new_x) values (old.x, new.x); + +insert into t1 (x) values (1); +insert into t2 (x, y) values (1, 20); + +update t1 set x = 2; +select * from t3; +update t2 set y = 50; + +select * from t3; +select * from t1; +select * from t2; + +drop table t3; +drop table t2; +drop table t1; + +--echo # +--echo # After update with counter +--echo # + +create table t1(x int primary key) engine=innodb; +create table t2(x int primary key, + foreign key(x) references t1(x) on update cascade) engine=innodb; + +create trigger tr_t2 after update on t2 + for each row set @counter=@counter+1; + +insert into t1 values (1); +insert into t2 values (1); +set @counter=0; + +update t1 set x = 2; + +select * from t1; +select * from t2; + +select @counter; +drop table t2; +drop table t1; + +--echo # +--echo # Before update +--echo # + +create table t1 ( + x int primary key +) engine=innodb; + + +create table t2 ( + x int primary key, + y int, + foreign key(x) references t1(x) on update cascade +) engine=innodb; + +create trigger tr before update on t2 + for each row set new.y = 50; + +insert into t1 (x) values (1); +insert into t2 (x, y) values (1,2); + +update t1 set x = 2; +select * from t1; +select * from t2; + + +drop table t2; +drop table t1; + +--echo # +--echo # Update, foreign key on secondary index +--echo # + +create table t1(x int auto_increment primary key, y int, unique key(y)) engine=innodb; +create table t2(a int primary key, x int, z varchar(20), + foreign key(x) references t1(y) on update cascade) engine=innodb; +create table t3(id int auto_increment primary key, action_x int, action_z varchar(20), note varchar(50)) engine=innodb; + +create trigger tr_t2 before update on t2 + for each row + insert into t3(action_x, action_z, note) values (old.x, old.z, 'update t2 — old'), + (new.x, new.z, 'update t2 — new'); + + + +insert into t1 values (1, 10); +insert into t2 values (1, 10, 'str'); + +update t1 set y=20 where x=1; + +select * from t1; +select * from t2; +select * from t3; + +drop table t3; +drop table t2; +drop table t1; + +--echo # +--echo # Update, on update set null cascade +--echo # + +create table t1 ( + id int primary key, + value varchar(50) +) engine=innodb; + +create table t2 ( + id int primary key, + t1_id int, + t2_value varchar(50), + foreign key (t1_id) references t1(id) on update set null +) engine=innodb; + +create trigger tr_t2_before_update before update on t2 +for each row set new.t2_value = 'updated by trigger'; + +insert into t1 (id, value) values (1, 'parent_row'); +insert into t2 (id, t1_id, t2_value) values (1, 1, 'child_row'); + +select * from t1; +select * from t2; + +update t1 set id = 2 where id = 1; + +select * from t1; +select * from t2; + +drop table t2; +drop table t1; + +--echo # +--echo # Before update, indexes on virtual columns +--echo # + +create table t1(x int auto_increment primary key, y int, unique key(y)) engine=innodb; +create table t2(a int primary key, x int, + t int, + z varchar(20) as (concat("test", t)), + y varchar(20) as (concat("test2", t)), + y_another varchar(20) as (concat("test3", x)), + unique(z), + unique(y), + unique(y_another), + foreign key(x) references t1(y) on update cascade) engine=innodb; + +create trigger tr before update on t2 + for each row set new.t = 50; + +insert into t1 (x,y) values (1,1); +insert into t2 (a,x,t) values (1,1,1); + +select * from t2; +update t1 set y = 2; +select * from t2 force index (z) where z = 'test50'; +select * from t2 force index (y) where y = 'test250'; +select * from t2 force index (y_another) where y_another = 'test32'; +select * from t2; +drop table t2; +drop table t1; + +--echo # +--echo # Before delete +--echo # + +create table t1 ( + x int primary key +) engine=innodb; + +create table t2 ( + x int primary key, + y int, + foreign key(x) references t1(x) on delete cascade +) engine=innodb; + +create trigger tr before delete on t2 + for each row set @deleted_value = old.y; + +insert into t1 (x) values (1); +insert into t2 (x, y) values (1, 100); + +delete from t1 where x = 1; + +select @deleted_value as deleted_value; + +select * from t2; + +drop table t2; +drop table t1; + +--echo # +--echo # After, before delete +--echo # + +create table t1 ( + x int primary key +) engine=innodb; + +create table t2 ( + x int primary key, + y int, + foreign key(x) references t1(x) on delete cascade +) engine=innodb; + +create table t3 ( + id int auto_increment primary key, + log_event varchar(255) +) engine=innodb; + + +create trigger tr_before before delete on t2 + for each row + set @deleted_value = old.y; + + +create trigger tr_after after delete on t2 + for each row + insert into t3 (log_event) values ('after delete trigger executed'); + +insert into t1 (x) values (1), (2); +insert into t2 (x, y) values (1, 100), (2, 200); + +delete from t1 where x = 1; + +select @deleted_value as before_deleted_value; + +select * from t3; + +select * from t2; + +select * from t1; + +drop table t3; +drop table t2; +drop table t1; + +--echo # +--echo # On delete set null +--echo # + +create table t1 ( + id int primary key, + value varchar(50) +) engine=innodb; + +create table t2 ( + id int primary key, + t1_id int, + t2_value varchar(50), + foreign key (t1_id) references t1(id) on delete set null +) engine=innodb; + + +create trigger tr_t2_before_update before update on t2 +for each row set new.t2_value = 'updated by trigger'; + +insert into t1 (id, value) values (1, 'parent_row'); +insert into t2 (id, t1_id, t2_value) values (1, 1, 'child_row'); + +select * from t1; +select * from t2; + +delete from t1 where id = 1; + +select * from t1; +select * from t2; + +drop table t2; +drop table t1; + +--echo # +--echo # Cascade chain +--echo # + +create table t1 ( + id int primary key +) engine=innodb; + +create table t2 ( + id int primary key, + foreign key (id) references t1(id) on update cascade +) engine=innodb; + +create table t3 ( + id int primary key, + foreign key (id) references t2(id) on update cascade +) engine=innodb; + +create table update_log ( + log_id int auto_increment primary key, + table_name varchar(50), + old_value int, + new_value int +) engine=innodb; + +create trigger tr_t2_after_update after update on t2 + for each row + insert into update_log(table_name, old_value, new_value) + values ('t2', old.id, new.id); + +create trigger tr_t3_after_update after update on t3 + for each row + insert into update_log(table_name, old_value, new_value) + values ('t3', old.id, new.id); + +insert into t1 (id) values (1); +insert into t2 (id) values (1); +insert into t3 (id) values (1); + +update t1 set id = 2; + +select * from t1; +select * from t2; +select * from t3; +select * from update_log; + +drop table update_log; +drop table t3; +drop table t2; +drop table t1; + +--echo # +--echo # With bit fields +--echo # + +create table t1 ( + id bit(8) primary key, + flag bit(1) not null default b'0' +) engine=innodb; + +create table t2 ( + id bit(8) primary key, + t1_id bit(8), + value int, + foreign key (t1_id) references t1(id) on update cascade +) engine=innodb; + +create table trigger_log ( + log_id int auto_increment primary key, + log_message varchar(255) +) engine=innodb; + +create trigger tr_t2_after_update after update on t2 + for each row + insert into trigger_log(log_message) + values (concat('t2 updated: ', hex(old.t1_id), ' -> ', hex(new.t1_id))); + +insert into t1 (id, flag) values (b'00000001', b'1'); +insert into t2 (id, t1_id, value) values (b'00000001', b'00000001', 100); + +update t1 set id = b'00000010' where id = b'00000001'; + +select bin(id), bin(t1_id), value from t2; + +select * from trigger_log; + +drop table trigger_log; +drop table t2; +drop table t1; + + +--echo # +--echo # With blobs +--echo # + +create table t1 ( + id int primary key, + blob_data blob +) engine=innodb; + +create table t2 ( + id int primary key, + t1_id int, + copied_blob_data blob, + foreign key (t1_id) references t1(id) on delete cascade on update cascade +) engine=innodb; + + +create trigger tr_t2_before_update +before update on t2 +for each row +set new.copied_blob_data = concat(old.copied_blob_data, repeat('-updated', 1250)); + + +insert into t1 (id, blob_data) values (1, repeat('a', 1024)); +insert into t2 (id, t1_id, copied_blob_data) values (1, 1, repeat('b', 1024)); + +select * from t2; +update t1 set id = 2 where id = 1; + +select * from t2; + +drop table t2; +drop table t1; + +--echo # +--echo # With blobs error too long +--echo # + +create table t1 ( + id int primary key, + blob_data blob +) engine=innodb; + +create table t2 ( + id int primary key, + t1_id int, + copied_blob_data blob, + foreign key (t1_id) references t1(id) on delete cascade on update cascade +) engine=innodb; + + +create trigger tr_t2_before_update +before update on t2 +for each row +set new.copied_blob_data = concat(old.copied_blob_data, repeat('-updated', 12500)); + + +insert into t1 (id, blob_data) values (1, repeat('a', 1024)); +insert into t2 (id, t1_id, copied_blob_data) values (1, 1, repeat('b', 1024)); + +select * from t2; +--error ER_DATA_TOO_LONG +update t1 set id = 2 where id = 1; + +drop table t2; +drop table t1; + +--echo # max depth is incrementally limited for cascade actions inside triggers + +create table t2( + id int primary key, + pid int, + index(pid), + foreign key(pid) references t2(id) on delete cascade) engine=innodb; +insert into t2 values(0,0),(1,0),(2,1),(3,2),(4,3),(5,4),(6,5),(7,6), + (8,7),(9,8),(10,9),(11,10),(12,11),(13,12),(14,13); + +create table t1( + id int primary key, + pid int, + index(pid), + foreign key(pid) references t1(id) on delete cascade) engine=innodb; + +create trigger trg before delete on t1 for each row + delete from t2 where id = 0 and old.id=14; + +insert into t1 values(0,0),(1,0),(2,1),(3,2),(4,3),(5,4),(6,5),(7,6), + (8,7),(9,8),(10,9),(11,10),(12,11),(13,12),(14,13); +--error ER_GET_ERRNO +delete from t1 where id=0; +select * from t2; +select * from t1; +drop table t1; +drop table t2; +call mtr.add_suppression("InnoDB: Cannot delete/update rows with cascading foreign key constraints that exceed max depth of 15\\."); diff --git a/sql/handler.cc b/sql/handler.cc index 65af1a4b095bd..bb57f12e66bfd 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -5269,6 +5269,10 @@ void handler::print_error(int error, myf errflag) SET_FATAL_ERROR; textno= ER_ROLLBACK_ONLY; break; + case HA_ERR_CASCADE_SQL: + // Cascade error should be already reported. + DBUG_ASSERT(table->in_use->get_stmt_da()->is_error()); + DBUG_VOID_RETURN; default: { /* The error was "unknown" to this function. diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 059cf17e75bf6..5612f25decb9a 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -5166,6 +5166,11 @@ prepare_fk_prelocking_list(THD *thd, Query_tables_list *prelocking_ctx, NULL, lock_type, TABLE_LIST::PRELOCK_FK, table_list->belong_to_view, op, &prelocking_ctx->query_tables_last, table_list->for_insert_data); + + if (fk->delete_method == FK_OPTION_SET_NULL) + tl->trg_event_map |= trg2bit(TRG_EVENT_UPDATE); + + #ifdef WITH_WSREP /* Append table level shared key for the referenced/foreign table for: @@ -5239,6 +5244,11 @@ bool DML_prelocking_strategy::handle_table(THD *thd, if (table_list->trg_event_map && trigger_prelocking_needed) { + if (prepare_fk_prelocking_list(thd, prelocking_ctx, table_list, + need_prelocking, + table_list->trg_event_map)) + return TRUE; + if (table->triggers) { *need_prelocking= TRUE; @@ -5247,11 +5257,6 @@ bool DML_prelocking_strategy::handle_table(THD *thd, add_tables_and_routines_for_triggers(thd, prelocking_ctx, table_list)) return TRUE; } - - if (prepare_fk_prelocking_list(thd, prelocking_ctx, table_list, - need_prelocking, - table_list->trg_event_map)) - return TRUE; } else if (table_list->slave_fk_event_map) { diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 42723fb6caf62..035d9cb69c559 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -746,6 +746,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier) derived_tables_processing(FALSE), waiting_on_group_commit(FALSE), has_waiter(FALSE), last_sql_command(SQLCOM_END), spcont(NULL), + fk_cascade_depth(0), m_parser_state(NULL), #ifndef EMBEDDED_LIBRARY audit_plugin_version(-1), diff --git a/sql/sql_class.h b/sql/sql_class.h index f8f9290405ba3..5e03d24eff022 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -4349,6 +4349,7 @@ class THD: public THD_count, /* this must be first */ /** number of name_const() substitutions, see sp_head.cc:subst_spvars() */ uint query_name_consts; + uint fk_cascade_depth; NET* slave_net; // network connection from slave -> m. diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 1907dbbc06496..aaf46c331c7d5 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -10626,27 +10626,6 @@ const char *online_alter_check_supported(THD *thd, if (!*online) return "BIGINT GENERATED ALWAYS AS ROW_START"; - List fk_list; - table->file->get_foreign_key_list(thd, &fk_list); - for (auto &fk: fk_list) - { - if (fk_modifies_child(fk.delete_method) || - fk_modifies_child(fk.update_method)) - { - *online= false; - // Don't fall to a common unsupported case to avoid heavy string ops. - if (alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_NONE) - { - return fk_modifies_child(fk.delete_method) - ? thd->strcat({STRING_WITH_LEN("ON DELETE ")}, - *fk_option_name(fk.delete_method)).str - : thd->strcat({STRING_WITH_LEN("ON UPDATE ")}, - *fk_option_name(fk.update_method)).str; - } - return NULL; - } - } - for (auto &c: alter_info->create_list) { *online= c.field || !(c.flags & AUTO_INCREMENT_FLAG); @@ -13915,3 +13894,117 @@ bool HA_CREATE_INFO:: } return false; } + +static +void update_virtual_fields_for_rows(TABLE *table) +{ + if (table->vfield) { + table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_READ); + table->move_fields(table->field, table->record[1], table->record[0]); + table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_READ); + table->move_fields(table->field, table->record[0], table->record[1]); + } +} + +/** + Delete a row from the table. + The row to be deleted must be present in `table->record[0]`. + The handler (`table->file`) must be configured to operate on the data + stored in `table->record[0]` (i.e., the effect from calling `ha_rnd_pos` + or `ha_index_read` is required to make a successful call). + @param[in,out] table The table object representing the target table. + @return error number or 0 */ +int sql_delete_row(TABLE *table) +{ + THD *thd= table->in_use; + bool delete_history= thd->lex->vers_conditions.delete_history; + thd->lex->vers_conditions.delete_history = false; + SCOPE_EXIT([thd, delete_history] + { + thd->lex->vers_conditions.delete_history= delete_history; + }); + table->pos_in_table_list->trg_event_map = trg2bit(TRG_EVENT_DELETE); + + int error= 0; + bool trg_skip_row= false; + // This ensures that triggers can correctly read virtual field values + if (table->vfield) + table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_READ); + + table->column_bitmaps_set(&table->s->all_set, &table->s->all_set); + if (table->triggers && + unlikely(table->triggers->process_triggers(thd, TRG_EVENT_DELETE, + TRG_ACTION_BEFORE, FALSE, + &trg_skip_row))) + return trg_skip_row ? HA_ERR_CASCADE_SQL : 0; + + + error= table->delete_row(); + if (error != 0) + return error; + + if (table->triggers && + unlikely(table->triggers->process_triggers(thd, TRG_EVENT_DELETE, + TRG_ACTION_AFTER, FALSE, + nullptr))) + return HA_ERR_CASCADE_SQL; + + return 0; +} + +/** + Update a row in the table. + The old row must be present in `table->record[1]` and the new row in `table->record[0]`. + The handler (`table->file`) must be configured to operate on the data + stored in `table->record[1]` (i.e., the effect from calling `ha_index_read` + is required to make a successful call). + @param[in,out] table The table object representing the target table. + @return error number or 0 */ +int sql_update_row(TABLE *table) +{ + THD *thd= current_thd; + bool trg_skip_row= false; + table->column_bitmaps_set(&table->s->all_set, &table->s->all_set); + + table->pos_in_table_list->trg_event_map = trg2bit(TRG_EVENT_UPDATE); + + // This ensures that triggers can correctly read virtual field values + update_virtual_fields_for_rows(table); + + if (table->triggers && + table->triggers->has_triggers(TRG_EVENT_UPDATE, TRG_ACTION_BEFORE)) + { + if (unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, + TRG_ACTION_BEFORE, TRUE, + &trg_skip_row))) + return trg_skip_row ? 0 : HA_ERR_CASCADE_SQL; + // This is necessary for indexes that depend on virtual fields + update_virtual_fields_for_rows(table); + } + + int error= table->file->ha_update_row(table->record[1], table->record[0]); + + bool record_was_same= error == HA_ERR_RECORD_IS_THE_SAME; + if (record_was_same) + error= 0; + if (error != 0) + return error; + + if (table->versioned(VERS_TIMESTAMP)) + { + store_record(table, record[2]); + table->mark_columns_per_binlog_row_image(); + error= vers_insert_history_row(table); + restore_record(table, record[2]); + if (error) + return error; + } + + if (table->triggers && + unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, + TRG_ACTION_AFTER, TRUE, + nullptr))) + return HA_ERR_CASCADE_SQL; + + return 0; +} diff --git a/sql/sql_table.h b/sql/sql_table.h index 4d1d5dff9d582..f4a0fc4809a59 100644 --- a/sql/sql_table.h +++ b/sql/sql_table.h @@ -226,4 +226,7 @@ extern MYSQL_PLUGIN_IMPORT const Lex_ident_column primary_key_name; bool check_engine(THD *, const char *, const char *, HA_CREATE_INFO *); +int sql_delete_row(TABLE *table); + +int sql_update_row(TABLE *table); #endif /* SQL_TABLE_INCLUDED */ diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 46f084108153d..6d727616439c8 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -2050,15 +2050,6 @@ convert_error_code_to_mysql( return(HA_ERR_ABORTED_BY_USER); case DB_FOREIGN_EXCEED_MAX_CASCADE: - ut_ad(thd); - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - HA_ERR_ROW_IS_REFERENCED, - "InnoDB: Cannot delete/update " - "rows with cascading foreign key " - "constraints that exceed max " - "depth of %d. Please " - "drop extra constraints and try " - "again", FK_MAX_CASCADE_DEL); return(HA_ERR_FK_DEPTH_EXCEEDED); case DB_CANT_CREATE_GEOMETRY_OBJECT: @@ -2223,11 +2214,111 @@ convert_error_code_to_mysql( return(HA_ERR_TABLE_CORRUPT); case DB_FTS_TOO_MANY_WORDS_IN_PHRASE: return(HA_ERR_FTS_TOO_MANY_WORDS_IN_PHRASE); + case DB_SQL_ERROR: + return(HA_ERR_CASCADE_SQL); case DB_COMPUTE_VALUE_FAILED: return(HA_ERR_GENERIC); // impossible } } +/** + Converts a MariaDB handler error code to an InnoDB error code. + An inverse of convert_error_code_to_mysql. + @return InnoDB error code +*/ +static dberr_t +convert_sql_error_to_dberr(THD *thd, que_thr_t *thr, int sql_error) +{ + switch (sql_error) + { + case 0: + return DB_SUCCESS; + case HA_ERR_CASCADE_SQL: + // this error should be accessible with thd->get_stmt_da()->sql_errno() + ut_ad(thd->get_stmt_da()->is_error()); + return DB_SQL_ERROR; + + case HA_ERR_GENERIC: + default: + return DB_ERROR; + + case HA_ERR_ABORTED_BY_USER: + return DB_INTERRUPTED; + case HA_ERR_FK_DEPTH_EXCEEDED: + if (thd->fk_cascade_depth == 0) + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + HA_ERR_ROW_IS_REFERENCED, + "InnoDB: Cannot delete/update " + "rows with cascading foreign key " + "constraints that exceed max " + "depth of %d. Please " + "drop extra constraints and try " + "again", FK_MAX_CASCADE_DEL); + return DB_FOREIGN_EXCEED_MAX_CASCADE; + case HA_ERR_NULL_IN_SPATIAL: + return DB_CANT_CREATE_GEOMETRY_OBJECT; + case HA_ERR_FOUND_DUPP_KEY: + return DB_DUPLICATE_KEY; + case HA_ERR_TABLE_READONLY: + return DB_READ_ONLY; + case HA_ERR_FOREIGN_DUPLICATE_KEY: + return DB_FOREIGN_DUPLICATE_KEY; + case HA_ERR_TABLE_DEF_CHANGED: + return DB_MISSING_HISTORY; + case HA_ERR_NO_ACTIVE_RECORD: + return DB_RECORD_NOT_FOUND; + case HA_ERR_LOCK_DEADLOCK: + return DB_DEADLOCK; + case HA_ERR_RECORD_CHANGED: + return DB_RECORD_CHANGED; + case HA_ERR_LOCK_WAIT_TIMEOUT: + return DB_LOCK_WAIT_TIMEOUT; + case HA_ERR_NO_REFERENCED_ROW: + return DB_NO_REFERENCED_ROW; + case HA_ERR_ROW_IS_REFERENCED: + return DB_ROW_IS_REFERENCED; + case HA_ERR_CANNOT_ADD_FOREIGN: + return DB_CANNOT_ADD_CONSTRAINT; + case HA_ERR_CRASHED: + return DB_CORRUPTION; + case HA_ERR_RECORD_FILE_FULL: + return DB_OUT_OF_FILE_SPACE; + case HA_ERR_INTERNAL_ERROR: + return DB_TEMP_FILE_WRITE_FAIL; + case HA_ERR_NO_SUCH_TABLE: + return DB_TABLE_NOT_FOUND; + case HA_ERR_DECRYPTION_FAILED: + return DB_DECRYPTION_FAILED; + case HA_ERR_TABLESPACE_MISSING: + return DB_TABLESPACE_NOT_FOUND; + case HA_ERR_TO_BIG_ROW: + return DB_TOO_BIG_RECORD; + case HA_ERR_INDEX_COL_TOO_LONG: + return DB_TOO_BIG_INDEX_COL; + case HA_ERR_LOCK_TABLE_FULL: + return DB_LOCK_TABLE_FULL; + case HA_FTS_INVALID_DOCID: + return DB_FTS_INVALID_DOCID; + case HA_ERR_TOO_MANY_CONCURRENT_TRXS: + return DB_TOO_MANY_CONCURRENT_TRXS; + case HA_ERR_UNSUPPORTED: + return DB_UNSUPPORTED; + case HA_ERR_INDEX_CORRUPT: + return DB_INDEX_CORRUPT; + case HA_ERR_UNDO_REC_TOO_BIG: + return DB_UNDO_RECORD_TOO_BIG; + case HA_ERR_OUT_OF_MEM: + return DB_OUT_OF_MEMORY; + case HA_ERR_TABLESPACE_EXISTS: + return DB_TABLESPACE_EXISTS; + case HA_ERR_TABLE_CORRUPT: + return DB_TABLE_CORRUPT; + case HA_ERR_FTS_TOO_MANY_WORDS_IN_PHRASE: + return DB_FTS_TOO_MANY_WORDS_IN_PHRASE; + } +} + + /*************************************************************//** Prints info of a THD object (== user session thread) to the given file. */ void @@ -8448,6 +8539,36 @@ ATTRIBUTE_COLD bool wsrep_append_table_key(MYSQL_THD thd, } #endif /* WITH_WSREP */ +int ha_innobase::update_prebuilt_upd_buf() +{ + DBUG_ENTER("ha_innobase::update_prebuilt_upd_buf"); + if (m_upd_buf == NULL) + { + ut_ad(m_upd_buf_size == 0); + + /* + Create a buffer for packing the fields of a record. Why + table->reclength did not work here? Obviously, because char + fields when packed actually became 1 byte longer, when we also + stored the string length as the first byte. + */ + + m_upd_buf_size = table->s->reclength + table->s->max_key_length + + MAX_REF_PARTS * 3; + + m_upd_buf = reinterpret_cast( + my_malloc(PSI_INSTRUMENT_ME, m_upd_buf_size, MYF(MY_WME))); + + if (m_upd_buf == NULL) + { + m_upd_buf_size = 0; + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + } + } + DBUG_RETURN(0); +} + + /** Updates a row given as a parameter to a new value. Note that we are given whole rows, not just the fields which are updated: this incurs some @@ -8475,27 +8596,8 @@ ha_innobase::update_row( DBUG_RETURN(err); } - if (m_upd_buf == NULL) { - ut_ad(m_upd_buf_size == 0); - - /* Create a buffer for packing the fields of a record. Why - table->reclength did not work here? Obviously, because char - fields when packed actually became 1 byte longer, when we also - stored the string length as the first byte. */ - - m_upd_buf_size = table->s->reclength + table->s->max_key_length - + MAX_REF_PARTS * 3; - - m_upd_buf = reinterpret_cast( - my_malloc(PSI_INSTRUMENT_ME, - m_upd_buf_size, - MYF(MY_WME))); - - if (m_upd_buf == NULL) { - m_upd_buf_size = 0; - DBUG_RETURN(HA_ERR_OUT_OF_MEM); - } - } + if (int ret= update_prebuilt_upd_buf()) + DBUG_RETURN(ret); mariadb_set_stats temp(trx, handler_stats); @@ -21405,3 +21507,117 @@ void alter_stats_rebuild(dict_table_t *table, trx_t *trx) noexcept " table rebuild: %s", ut_strerr(ret)); DBUG_VOID_RETURN; } + + +/********************************************************************//** +Fetches the MySQL TABLE object corresponding to the InnoDB table +referenced in the given update node. +@return Pointer to the TABLE object if found, or nullptr on error. */ +static +TABLE *find_sql_table_for_update_node(upd_node_t* node) { + THD *thd= current_thd; + char db_buf[NAME_LEN + 1]; + char tbl_buf[NAME_LEN + 1]; + ulint db_buf_len, tbl_buf_len; + dict_table_t *table= node->table; + if (!table->parse_name(db_buf, tbl_buf, &db_buf_len, &tbl_buf_len)) { + return nullptr; + } + + return find_fk_open_table(thd, db_buf, db_buf_len, tbl_buf, + tbl_buf_len); +} + + +/** + Executes a cascading operation (DELETE or UPDATE) for a foreign key + constraint by invoking the corresponding action from sql layer. + + Sets up the MariaDB format table records and cursor. + + @return DB_SUCCESS if OK else error code. +*/ +dberr_t +innodb_do_foreign_cascade(que_thr_t *thr, upd_node_t* node) +{ + bool is_delete= node->is_delete; + + TABLE *maria_table= find_sql_table_for_update_node(node); + + THD *thd= maria_table->in_use; + ha_innobase *handler= (ha_innobase*)maria_table->file; + row_prebuilt_t *prebuilt= handler->get_prebuilt(node->table); + btr_pcur_t *pcur= node->pcur; + const rec_t* rec= btr_pcur_get_rec(pcur); + dict_index_t *clust_index= dict_table_get_first_index(node->table); + const rec_offs* offsets= rec_get_offsets( + rec, clust_index, nullptr, + clust_index->n_core_fields, + ULINT_UNDEFINED, &node->heap); + + uint old_rec_idx= is_delete ? 0 : 1; + row_sel_store_mysql_rec(maria_table->record[old_rec_idx], prebuilt, rec, NULL, + true, clust_index, offsets); + + if (!is_delete) + { + /* + The following creates the upd_row vector, that can be converted to a + mysql row. + The memory in row and upd_row is not re-used, following the existing + pattern in row_upd_store_row. + The memory is then freed in row_upd_store_row, so no memory is leaked. + */ + node->row= row_build(ROW_COPY_DATA, clust_index, rec, offsets, NULL, + NULL, NULL, &node->upd_ext, node->heap); + + node->upd_row= dtuple_copy(node->row, node->heap); + row_upd_replace(node->upd_row, &node->upd_ext, clust_index, + node->update, node->heap); + + dtuple_t* entry= row_build_index_entry(node->upd_row, NULL, clust_index, + node->heap); + + ulint n_ext= dtuple_get_n_ext(node->upd_row); + + ulint size= rec_get_converted_size(clust_index, entry, n_ext); + byte *buf= static_cast(mem_heap_alloc(node->heap, size)); + + rec_t *rec_upd= rec_convert_dtuple_to_rec(buf, clust_index, entry, n_ext); + const rec_offs* upd_offsets= rec_get_offsets( + rec_upd, clust_index, nullptr, + clust_index->n_core_fields, + ULINT_UNDEFINED, &node->heap); + + row_sel_store_mysql_rec_keep_blobs(maria_table->record[0], prebuilt, + rec_upd, NULL, false, clust_index, + upd_offsets); + } + + if (handler->update_prebuilt_upd_buf()) + return DB_OUT_OF_MEMORY; + + TABLE_LIST *tl = maria_table->pos_in_table_list; + uint8 old_trg_ops = tl->trg_event_map; + uint8 old_slave_fk_ops = tl->slave_fk_event_map; + tl->trg_event_map = tl->slave_fk_event_map = 0; + + auto *upd_node= prebuilt->upd_node; + auto *upd_graph= prebuilt->upd_graph; + prebuilt->upd_node= node; + prebuilt->upd_graph= static_cast(que_node_get_parent(thr)); + + btr_pcur_copy_stored_position(prebuilt->pcur, pcur); + prebuilt->sql_stat_start = FALSE; + + ++thd->fk_cascade_depth; + int err= is_delete ? sql_delete_row(maria_table) + : sql_update_row(maria_table); + --thd->fk_cascade_depth; + + tl->slave_fk_event_map = old_slave_fk_ops; + tl->trg_event_map = old_trg_ops; + prebuilt->upd_node= upd_node; + prebuilt->upd_graph= upd_graph; + return convert_sql_error_to_dberr(maria_table->in_use, thr, err); +} diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h index b6fb571078bea..47cc642268e0b 100644 --- a/storage/innobase/handler/ha_innodb.h +++ b/storage/innobase/handler/ha_innodb.h @@ -435,6 +435,12 @@ class ha_innobase final : public handler const Column_definition& new_field, const KEY_PART_INFO& old_part, const KEY_PART_INFO& new_part) const override; + row_prebuilt_t *get_prebuilt(const dict_table_t* table) { + build_template(true); + m_prebuilt->index = dict_table_get_first_index(table); + return m_prebuilt; + } + int update_prebuilt_upd_buf(); /** Check consistency between .frm indexes and InnoDB indexes Set HA_DUPLICATE_KEY_NOT_IN_ORDER if multiple unique index @@ -507,14 +513,14 @@ class ha_innobase final : public handler /** Thread handle of the user currently using the handler; this is set in external_lock function */ THD* m_user_thd; - +public: /** buffer used in updates */ uchar* m_upd_buf; /** the size of upd_buf in bytes */ ulint m_upd_buf_size; - - /** Flags that specify the handler instance (table) capability. */ +protected: + /** Flags that specificy the handler instance (table) capability. */ Table_flags m_int_table_flags; /** Index into the server's primary key meta-data table->key_info{} */ @@ -934,3 +940,10 @@ ulint dict_table_get_foreign_id(const dict_table_t &table) noexcept; @param foreign foreign key */ void dict_create_add_foreign_id(ulint *id_nr, const char *name, dict_foreign_t *foreign) noexcept; + +/** An SQL-layer callback for cascade actions +@param thr Innodb thr +@param node Innodb update node +@return DB_SUCCESS or DB_SQL_ERROR + */ +dberr_t innodb_do_foreign_cascade(que_thr_t *thr, upd_node_t* node); diff --git a/storage/innobase/include/db0err.h b/storage/innobase/include/db0err.h index 642a3d2dbe05a..4f728e29e0a19 100644 --- a/storage/innobase/include/db0err.h +++ b/storage/innobase/include/db0err.h @@ -165,6 +165,8 @@ enum dberr_t { DB_END_OF_INDEX, DB_NOT_FOUND, /*!< Generic error code for "Not found" type of errors */ + DB_SQL_ERROR, /*!< An error happened inside sql-layer + callback */ }; #endif diff --git a/storage/innobase/include/que0que.h b/storage/innobase/include/que0que.h index 6485e21e7fc5a..8d9a6fc2bd4e8 100644 --- a/storage/innobase/include/que0que.h +++ b/storage/innobase/include/que0que.h @@ -255,9 +255,6 @@ struct que_thr_t{ UT_LIST_NODE_T(que_thr_t) queue; /*!< list of runnable thread nodes in the server task queue */ - ulint fk_cascade_depth; /*!< maximum cascading call depth - supported for foreign key constraint - related delete/updates */ row_prebuilt_t* prebuilt; /*!< prebuilt structure processed by the query thread */ }; diff --git a/storage/innobase/include/row0sel.h b/storage/innobase/include/row0sel.h index 35e3cbe66315c..e45e71b7b819d 100644 --- a/storage/innobase/include/row0sel.h +++ b/storage/innobase/include/row0sel.h @@ -454,3 +454,22 @@ row_sel_field_store_in_mysql_format_func( #endif /* UNIV_DEBUG */ const byte* data, /*!< in: data to store */ ulint len); /*!< in: length of the data */ + +bool row_sel_store_mysql_rec( + byte* mysql_rec, + row_prebuilt_t* prebuilt, + const rec_t* rec, + const dtuple_t* vrow, + bool rec_clust, + const dict_index_t* index, + const rec_offs* offsets); + +bool +row_sel_store_mysql_rec_keep_blobs( + byte* mysql_rec, + row_prebuilt_t* prebuilt, + const rec_t* rec, + const dtuple_t* vrow, + bool rec_clust, + const dict_index_t* index, + const rec_offs* offsets); diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index 2063f1179dabc..852f08e0b50dd 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -889,6 +889,14 @@ struct trx_t : ilist_node<> lock_wait(). That is protected by lock_sys.wait_mutex and lock.wait_lock. */ dberr_t error_state; + /* + When handling cascade operations (e.g. foreign key updates/deletes), + nested SQL operations may generate errors that are already stored + in sql_error. In these scenarios, we preserve the original error + state and skip redundant error conversion. + */ + int sql_error; + const dict_index_t*error_info; /*!< if the error number indicates a duplicate key error, a pointer to the problematic index is stored here */ diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index 87d02da38e5a7..da2fb53a55974 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -1106,16 +1106,17 @@ row_ins_foreign_check_on_constraint( goto nonstandard_exit_func; } } - - if (row_ins_cascade_n_ancestors(cascade) >= FK_MAX_CASCADE_DEL) { - err = DB_FOREIGN_EXCEED_MAX_CASCADE; - - row_ins_foreign_report_err( - "Trying a too deep cascaded delete or update\n", - thr, foreign, btr_pcur_get_rec(pcur), entry); - - goto nonstandard_exit_func; + /* Check fk_cascade_depth to limit the recursive call depth on + a single update/delete that affects multiple tables chained + together with foreign key relations. + fk_cascade_depth increments later, so we have to add 1 here. */ + if (UNIV_UNLIKELY(trx->mysql_thd->fk_cascade_depth + 1 >= + FK_MAX_CASCADE_DEL)) + { + err= DB_FOREIGN_EXCEED_MAX_CASCADE; + goto nonstandard_exit_func; } + ut_ad(row_ins_cascade_n_ancestors(cascade) < FK_MAX_CASCADE_DEL); index = pcur->index(); @@ -1315,15 +1316,6 @@ row_ins_foreign_check_on_constraint( } } - if (table->versioned() && cascade->is_delete != PLAIN_DELETE - && cascade->update->affects_versioned()) { - ut_ad(!cascade->historical_heap); - cascade->historical_heap = mem_heap_create(srv_page_size); - cascade->historical_row = row_build( - ROW_COPY_DATA, clust_index, clust_rec, NULL, table, - NULL, NULL, NULL, cascade->historical_heap); - } - /* Store pcur position and initialize or store the cascade node pcur stored position */ diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 0bbc90cd0f6c2..55063180aeeab 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -65,7 +65,7 @@ Created 9/17/2000 Heikki Tuuri #include #include #include - +#include "ha_innodb.h" /** Delay an INSERT, DELETE or UPDATE operation if the purge is lagging. */ static void row_mysql_delay_if_needed() noexcept @@ -672,6 +672,7 @@ row_mysql_handle_errors( case DB_TABLE_NOT_FOUND: case DB_DECRYPTION_FAILED: case DB_COMPUTE_VALUE_FAILED: + case DB_SQL_ERROR: rollback_to_savept: DBUG_EXECUTE_IF("row_mysql_crash_if_error", { log_buffer_flush_to_disk(); @@ -1660,7 +1661,6 @@ row_update_for_mysql(row_prebuilt_t* prebuilt) for (;;) { thr->run_node = node; thr->prev_node = node; - thr->fk_cascade_depth = 0; row_upd_step(thr); @@ -1815,125 +1815,6 @@ row_unlock_for_mysql( @param buf Buffer to hold start time data */ void thd_get_query_start_data(THD *thd, char *buf); -/** Insert history row when evaluating foreign key referential action. - -1. Create new dtuple_t 'row' from node->historical_row; -2. Update its row_end to current timestamp; -3. Insert it to a table; -4. Update table statistics. - -This is used in UPDATE CASCADE/SET NULL of a system versioned referenced table. - -node->historical_row: dtuple_t containing pointers of row changed by referential -action. - -@param[in] thr current query thread -@param[in] node a node which just updated a row in a foreign table -@return DB_SUCCESS or some error */ -static dberr_t row_update_vers_insert(que_thr_t* thr, upd_node_t* node) -{ - trx_t* trx = thr_get_trx(thr); - dfield_t* row_end; - char row_end_data[8]; - dict_table_t* table = node->table; - const unsigned zip_size = table->space->zip_size(); - ut_ad(table->versioned()); - - dtuple_t* row; - const ulint n_cols = dict_table_get_n_cols(table); - const ulint n_v_cols = dict_table_get_n_v_cols(table); - - ut_ad(n_cols == dtuple_get_n_fields(node->historical_row)); - ut_ad(n_v_cols == dtuple_get_n_v_fields(node->historical_row)); - - row = dtuple_create_with_vcol(node->historical_heap, n_cols, n_v_cols); - - dict_table_copy_types(row, table); - - ins_node_t* insert_node = - ins_node_create(INS_DIRECT, table, node->historical_heap); - - if (!insert_node) { - trx->error_state = DB_OUT_OF_MEMORY; - goto exit; - } - - insert_node->common.parent = thr; - ins_node_set_new_row(insert_node, row); - - ut_ad(n_cols > DATA_N_SYS_COLS); - // Exclude DB_ROW_ID, DB_TRX_ID, DB_ROLL_PTR - for (ulint i = 0; i < n_cols - DATA_N_SYS_COLS; i++) { - dfield_t *src= dtuple_get_nth_field(node->historical_row, i); - dfield_t *dst= dtuple_get_nth_field(row, i); - dfield_copy(dst, src); - if (dfield_is_ext(src)) { - byte *field_data - = static_cast(dfield_get_data(src)); - ulint ext_len; - ulint field_len = dfield_get_len(src); - - ut_a(field_len >= BTR_EXTERN_FIELD_REF_SIZE); - - ut_a(memcmp(field_data + field_len - - BTR_EXTERN_FIELD_REF_SIZE, - field_ref_zero, - BTR_EXTERN_FIELD_REF_SIZE)); - - byte *data = btr_copy_externally_stored_field( - &ext_len, field_data, zip_size, field_len, - node->historical_heap); - dfield_set_data(dst, data, ext_len); - } - } - - for (ulint i = 0; i < n_v_cols; i++) { - dfield_t *dst= dtuple_get_nth_v_field(row, i); - dfield_t *src= dtuple_get_nth_v_field(node->historical_row, i); - dfield_copy(dst, src); - } - - node->historical_row = NULL; - - row_end = dtuple_get_nth_field(row, table->vers_end); - if (dict_table_get_nth_col(table, table->vers_end)->vers_native()) { - mach_write_to_8(row_end_data, trx->id); - dfield_set_data(row_end, row_end_data, 8); - } else { - thd_get_query_start_data(trx->mysql_thd, row_end_data); - dfield_set_data(row_end, row_end_data, 7); - } - - for (;;) { - thr->run_node = insert_node; - thr->prev_node = insert_node; - - row_ins_step(thr); - - switch (trx->error_state) { - case DB_LOCK_WAIT: - if (lock_wait(thr) == DB_SUCCESS) { - continue; - } - - /* fall through */ - default: - /* Other errors are handled for the parent node. */ - thr->fk_cascade_depth = 0; - goto exit; - - case DB_SUCCESS: - dict_stats_update_if_needed(table, *trx); - goto exit; - } - } -exit: - que_graph_free_recursive(insert_node); - mem_heap_free(node->historical_heap); - node->historical_heap = NULL; - return trx->error_state; -} - /**********************************************************************//** Does a cascaded delete or set null in a foreign key operation. @return error code or DB_SUCCESS */ @@ -1945,26 +1826,11 @@ row_update_cascade_for_mysql( or set null operation */ dict_table_t* table) /*!< in: table where we do the operation */ { - /* Increment fk_cascade_depth to record the recursive call depth on - a single update/delete that affects multiple tables chained - together with foreign key relations. */ - - if (++thr->fk_cascade_depth > FK_MAX_CASCADE_DEL) { - return(DB_FOREIGN_EXCEED_MAX_CASCADE); - } - trx_t* trx = thr_get_trx(thr); - if (table->versioned()) { - if (node->is_delete == PLAIN_DELETE) { - node->vers_make_delete(trx); - } else if (node->update->affects_versioned()) { - dberr_t err = row_update_vers_insert(thr, node); - if (err != DB_SUCCESS) { - return err; - } - node->vers_make_update(trx); - } + if (table->versioned() && node->is_delete != PLAIN_DELETE && + node->update->affects_versioned()) { + node->vers_make_update(trx); } for (;;) { @@ -1975,7 +1841,11 @@ row_update_cascade_for_mysql( { TABLE *mysql_table = thr->prebuilt->m_mysql_table; thr->prebuilt->m_mysql_table = NULL; - row_upd_step(thr); + + dberr_t cascade_error = innodb_do_foreign_cascade(thr, + node); + if (UNIV_LIKELY(trx->error_state == DB_SUCCESS)) + trx->error_state = cascade_error; thr->prebuilt->m_mysql_table = mysql_table; } @@ -1987,12 +1857,9 @@ row_update_cascade_for_mysql( /* fall through */ default: - /* Other errors are handled for the parent node. */ - thr->fk_cascade_depth = 0; return trx->error_state; case DB_SUCCESS: - thr->fk_cascade_depth = 0; bool stats; if (node->is_delete == PLAIN_DELETE) { diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index c063c3b34b810..1bfb6860780c4 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -3117,38 +3117,24 @@ row_sel_store_mysql_field( DBUG_RETURN(TRUE); } -/** Convert a row in the Innobase format to a row in the MySQL format. -Note that the template in prebuilt may advise us to copy only a few -columns to mysql_rec, other columns are left blank. All columns may not -be needed in the query. -@param[out] mysql_rec row in the MySQL format -@param[in] prebuilt cursor -@param[in] rec Innobase record in the index - which was described in prebuilt's - template, or in the clustered index; - must be protected by a page latch -@param[in] vrow virtual columns -@param[in] rec_clust whether index must be the clustered index -@param[in] index index of rec -@param[in] offsets array returned by rec_get_offsets(rec) -@retval true on success -@retval false if not all columns could be retrieved */ MY_ATTRIBUTE((warn_unused_result)) -static bool row_sel_store_mysql_rec( - byte* mysql_rec, - row_prebuilt_t* prebuilt, - const rec_t* rec, - const dtuple_t* vrow, - bool rec_clust, - const dict_index_t* index, - const rec_offs* offsets) +static bool +row_sel_store_mysql_rec_internal( + byte* mysql_rec, + row_prebuilt_t* prebuilt, + const rec_t* rec, + const dtuple_t* vrow, + bool rec_clust, + const dict_index_t* index, + const rec_offs* offsets, + bool free_blob) { DBUG_ENTER("row_sel_store_mysql_rec"); ut_ad(rec_clust || index == prebuilt->index); ut_ad(!rec_clust || dict_index_is_clust(index)); - if (UNIV_LIKELY_NULL(prebuilt->blob_heap)) { + if (free_blob && UNIV_LIKELY_NULL(prebuilt->blob_heap)) { row_mysql_prebuilt_free_blob_heap(prebuilt); } @@ -3260,6 +3246,65 @@ static bool row_sel_store_mysql_rec( DBUG_RETURN(true); } +/** Convert a row in the Innobase format to a row in the MySQL format. +Note that the template in prebuilt may advise us to copy only a few +columns to mysql_rec, other columns are left blank. All columns may not +be needed in the query. +@param[out] mysql_rec row in the MySQL format +@param[in] prebuilt cursor +@param[in] rec Innobase record in the index + which was described in prebuilt's + template, or in the clustered index; + must be protected by a page latch +@param[in] vrow virtual columns +@param[in] rec_clust whether index must be the clustered index +@param[in] index index of rec +@param[in] offsets array returned by rec_get_offsets(rec) +@retval true on success +@retval false if not all columns could be retrieved */ +MY_ATTRIBUTE((warn_unused_result)) +bool row_sel_store_mysql_rec( + byte* mysql_rec, + row_prebuilt_t* prebuilt, + const rec_t* rec, + const dtuple_t* vrow, + bool rec_clust, + const dict_index_t* index, + const rec_offs* offsets) +{ + return row_sel_store_mysql_rec_internal(mysql_rec, prebuilt, rec, vrow, + rec_clust, index, offsets, true); +} + +/** Convert a row in the Innobase format to a row in the MySQL format. +Unlike row_sel_store_mysql_rec, keeps the blobs stored in prebuilt->blob_heap. +@param[out] mysql_rec row in the MySQL format +@param[in] prebuilt cursor +@param[in] rec Innobase record in the index + which was described in prebuilt's + template, or in the clustered index; + must be protected by a page latch +@param[in] vrow virtual columns +@param[in] rec_clust whether index must be the clustered index +@param[in] index index of rec +@param[in] offsets array returned by rec_get_offsets(rec) +@retval true on success +@retval false if not all columns could be retrieved */ +MY_ATTRIBUTE((warn_unused_result)) +bool +row_sel_store_mysql_rec_keep_blobs( + byte* mysql_rec, + row_prebuilt_t* prebuilt, + const rec_t* rec, + const dtuple_t* vrow, + bool rec_clust, + const dict_index_t* index, + const rec_offs* offsets) +{ + return row_sel_store_mysql_rec_internal(mysql_rec, prebuilt, rec, vrow, + rec_clust, index, offsets, false); +} + static void row_sel_reset_old_vers_heap(row_prebuilt_t *prebuilt) { if (prebuilt->old_vers_heap) diff --git a/storage/innobase/ut/ut0ut.cc b/storage/innobase/ut/ut0ut.cc index b5799689d979e..3b11001b6fdd7 100644 --- a/storage/innobase/ut/ut0ut.cc +++ b/storage/innobase/ut/ut0ut.cc @@ -418,7 +418,8 @@ ut_strerr( return ("File system does not support punch hole (trim) operation."); case DB_PAGE_CORRUPTED: return("Page read from tablespace is corrupted."); - + case DB_SQL_ERROR: + return("Error happened at sql layer."); /* do not add default: in order to produce a warning if new code is added to the enum but not added here */ }