summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sql/group_by_handler.cc5
-rw-r--r--sql/opt_subselect.cc3
-rw-r--r--sql/sql_class.cc13
-rw-r--r--sql/sql_class.h8
-rw-r--r--sql/sql_delete.cc1
-rw-r--r--sql/sql_error.cc6
-rw-r--r--sql/sql_insert.cc2
-rw-r--r--sql/sql_lex.h1
-rw-r--r--sql/sql_limit.h28
-rw-r--r--sql/sql_profile.cc2
-rw-r--r--sql/sql_repl.cc2
-rw-r--r--sql/sql_select.cc67
-rw-r--r--sql/sql_tvc.cc3
-rw-r--r--sql/sql_union.cc5
14 files changed, 63 insertions, 83 deletions
diff --git a/sql/group_by_handler.cc b/sql/group_by_handler.cc
index cb1462fce0e..71703cf09b6 100644
--- a/sql/group_by_handler.cc
+++ b/sql/group_by_handler.cc
@@ -97,7 +97,10 @@ int Pushdown_query::execute(JOIN *join)
{
int error;
/* result < 0 if row was not accepted and should not be counted */
- if (unlikely((error= join->result->send_data(*join->fields))))
+ if (unlikely((error=
+ join->result->send_data_with_check(*join->fields,
+ join->unit,
+ join->send_records))))
{
handler->end_scan();
DBUG_RETURN(error < 0 ? 0 : -1);
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index 516730e25d3..eeefe4200da 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -5714,9 +5714,6 @@ int select_value_catcher::send_data(List<Item> &items)
DBUG_ASSERT(!assigned);
DBUG_ASSERT(items.elements == n_elements);
- if (unit->lim.check_and_move_offset())
- DBUG_RETURN(0); // Using limit offset,count
-
Item *val_item;
List_iterator_fast<Item> li(items);
for (uint i= 0; (val_item= li++); i++)
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 1e969cba637..766deea3320 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -3015,9 +3015,6 @@ int select_send::send_data(List<Item> &items)
Protocol *protocol= thd->protocol;
DBUG_ENTER("select_send::send_data");
- /* unit is not set when using 'delete ... returning' */
- if (unit && unit->lim.check_and_move_offset())
- DBUG_RETURN(FALSE); // using limit offset,count
if (thd->killed == ABORT_QUERY)
DBUG_RETURN(FALSE);
@@ -3282,8 +3279,6 @@ int select_export::send_data(List<Item> &items)
String tmp(buff,sizeof(buff),&my_charset_bin),*res;
tmp.length(0);
- if (unit->lim.check_and_move_offset())
- DBUG_RETURN(0); // using limit offset,count
if (thd->killed == ABORT_QUERY)
DBUG_RETURN(0);
row_count++;
@@ -3539,8 +3534,6 @@ int select_dump::send_data(List<Item> &items)
Item *item;
DBUG_ENTER("select_dump::send_data");
- if (unit->lim.check_and_move_offset())
- DBUG_RETURN(0); // using limit offset,count
if (thd->killed == ABORT_QUERY)
DBUG_RETURN(0);
@@ -3579,8 +3572,6 @@ int select_singlerow_subselect::send_data(List<Item> &items)
MYF(current_thd->lex->ignore ? ME_WARNING : 0));
DBUG_RETURN(1);
}
- if (unit->lim.check_and_move_offset())
- DBUG_RETURN(0); // Using limit offset,count
if (thd->killed == ABORT_QUERY)
DBUG_RETURN(0);
List_iterator_fast<Item> li(items);
@@ -3717,8 +3708,6 @@ int select_exists_subselect::send_data(List<Item> &items)
{
DBUG_ENTER("select_exists_subselect::send_data");
Item_exists_subselect *it= (Item_exists_subselect *)item;
- if (unit->lim.check_and_move_offset())
- DBUG_RETURN(0); // Using limit offset,count
if (thd->killed == ABORT_QUERY)
DBUG_RETURN(0);
it->value= 1;
@@ -4123,8 +4112,6 @@ int select_dumpvar::send_data(List<Item> &items)
{
DBUG_ENTER("select_dumpvar::send_data");
- if (unit->lim.check_and_move_offset())
- DBUG_RETURN(0); // using limit offset,count
if (row_count++)
{
my_message(ER_TOO_MANY_ROWS, ER_THD(thd, ER_TOO_MANY_ROWS), MYF(0));
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 17ca2ad3947..36480b3f3e0 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -5095,6 +5095,14 @@ class select_result_sink: public Sql_alloc
public:
THD *thd;
select_result_sink(THD *thd_arg): thd(thd_arg) {}
+ inline int send_data_with_check(List<Item> &items,
+ SELECT_LEX_UNIT *u,
+ ha_rows sent)
+ {
+ if (u->lim.check_offset(sent))
+ return 0;
+ return send_data(items);
+ }
/*
send_data returns 0 on ok, 1 on error and -1 if data was ignored, for
example for a duplicate row entry written to a temp table.
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index a965a7115d2..4488b4a92dd 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -815,6 +815,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
break;
}
+ // no LIMIT / OFFSET
if (with_select && result->send_data(select_lex->item_list) < 0)
{
error=1;
diff --git a/sql/sql_error.cc b/sql/sql_error.cc
index c38ecfff6e6..92a1adb8ab7 100644
--- a/sql/sql_error.cc
+++ b/sql/sql_error.cc
@@ -808,14 +808,14 @@ bool mysqld_show_warnings(THD *thd, ulong levels_to_show)
Diagnostics_area::Sql_condition_iterator it=
thd->get_stmt_da()->sql_conditions();
- for (idx= 1; (err= it++) ; idx++)
+ for (idx= 0; (err= it++) ; idx++)
{
/* Skip levels that the user is not interested in */
if (!(levels_to_show & ((ulong) 1 << err->get_level())))
continue;
- if (unit->lim.check_and_move_offset())
+ if (unit->lim.check_offset(idx))
continue; // using limit offset,count
- if (idx > unit->lim.get_select_limit())
+ if (idx >= unit->lim.get_select_limit())
break;
protocol->prepare_for_resend();
protocol->store(warning_level_names[err->get_level()].str,
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index bc729a33ba1..a0cdd530d9a 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -3857,8 +3857,6 @@ int select_insert::send_data(List<Item> &values)
DBUG_ENTER("select_insert::send_data");
bool error=0;
- if (unit->lim.check_and_move_offset())
- DBUG_RETURN(0); // using limit offset,count
if (unlikely(thd->killed == ABORT_QUERY))
DBUG_RETURN(0);
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 08f4932ac97..5a0a180c4c8 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -831,7 +831,6 @@ void create_explain_query_if_not_exists(LEX *lex, MEM_ROOT *mem_root);
bool print_explain_for_slow_log(LEX *lex, THD *thd, String *str);
-
class st_select_lex_unit: public st_select_lex_node {
protected:
TABLE_LIST result_table_list;
diff --git a/sql/sql_limit.h b/sql/sql_limit.h
index 8e0c920dffc..93a9aae85af 100644
--- a/sql/sql_limit.h
+++ b/sql/sql_limit.h
@@ -22,51 +22,38 @@
class Select_limit_counters
{
- ha_rows offset_limit_cnt_start,
- select_limit_cnt, offset_limit_cnt;
+ ha_rows select_limit_cnt, offset_limit_cnt;
public:
Select_limit_counters():
- offset_limit_cnt_start(0),
select_limit_cnt(0), offset_limit_cnt(0)
{};
void set_limit(ha_rows limit, ha_rows offset)
{
- offset_limit_cnt_start= offset;
+ offset_limit_cnt= offset;
select_limit_cnt= limit;
- if (select_limit_cnt + offset_limit_cnt_start >=
+ if (select_limit_cnt + offset_limit_cnt >=
select_limit_cnt)
- select_limit_cnt+= offset_limit_cnt_start;
+ select_limit_cnt+= offset_limit_cnt;
else
select_limit_cnt= HA_POS_ERROR;
- reset();
}
void set_single_row()
{
- offset_limit_cnt= offset_limit_cnt_start= 0;
+ offset_limit_cnt= 0;
select_limit_cnt= 1;
}
- void reset()
- {
- offset_limit_cnt= offset_limit_cnt_start;
- }
-
bool is_unlimited()
{ return select_limit_cnt == HA_POS_ERROR; }
void set_unlimited()
{ select_limit_cnt= HA_POS_ERROR; offset_limit_cnt= 0; }
- bool check_and_move_offset()
+ bool check_offset(ha_rows sent)
{
- if (offset_limit_cnt)
- {
- offset_limit_cnt--;
- return TRUE;
- }
- return FALSE;
+ return sent < offset_limit_cnt;
}
void remove_offset() { offset_limit_cnt= 0; }
@@ -76,5 +63,4 @@ class Select_limit_counters
{ return offset_limit_cnt; }
};
-
#endif // INCLUDES_MARIADB_SQL_LIMIT_H
diff --git a/sql/sql_profile.cc b/sql/sql_profile.cc
index 40e7908ac1d..cee55761a98 100644
--- a/sql/sql_profile.cc
+++ b/sql/sql_profile.cc
@@ -436,7 +436,7 @@ bool PROFILING::show_profiles()
double query_time_usecs= prof->m_end_time_usecs - prof->m_start_time_usecs;
- if (unit->lim.check_and_move_offset())
+ if (unit->lim.check_offset(idx))
continue;
if (idx > unit->lim.get_select_limit())
break;
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 82d105a869f..791bfc2c9fc 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -4106,7 +4106,7 @@ bool mysql_show_binlog_events(THD* thd)
description_event,
opt_master_verify_checksum)); )
{
- if (!unit->lim.check_and_move_offset() &&
+ if (!unit->lim.check_offset(event_count) &&
ev->net_send(protocol, linfo.log_file_name, pos))
{
errmsg = "Net error";
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index db6c725dda4..e3c4277f960 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -3987,8 +3987,6 @@ JOIN::reinit()
{
DBUG_ENTER("JOIN::reinit");
- unit->lim.reset();
-
first_record= false;
group_sent= false;
cleaned= false;
@@ -4259,7 +4257,8 @@ void JOIN::exec_inner()
{
if (do_send_rows &&
(procedure ? (procedure->send_row(procedure_fields_list) ||
- procedure->end_of_records()) : result->send_data(fields_list)> 0))
+ procedure->end_of_records()):
+ result->send_data_with_check(fields_list, unit, 0)> 0))
error= 1;
else
send_records= ((select_options & OPTION_FOUND_ROWS) ? 1 :
@@ -14210,7 +14209,7 @@ return_zero_rows(JOIN *join, select_result *result, List<TABLE_LIST> &tables,
{
bool send_error= FALSE;
if (send_row)
- send_error= result->send_data(fields) > 0;
+ send_error= result->send_data_with_check(fields, join->unit, 0) > 0;
if (likely(!send_error))
result->send_eof(); // Should be safe
}
@@ -19791,33 +19790,34 @@ do_select(JOIN *join, Procedure *procedure)
HAVING will be checked after processing aggregate functions,
But WHERE should checked here (we alredy have read tables).
Notice that make_join_select() splits all conditions in this case
- into two groups exec_const_cond and outer_ref_cond.
- If join->table_count == join->const_tables then it is
- sufficient to check only the condition pseudo_bits_cond.
- */
- DBUG_ASSERT(join->outer_ref_cond == NULL);
- if (!join->pseudo_bits_cond || join->pseudo_bits_cond->val_int())
- {
- // HAVING will be checked by end_select
- error= (*end_select)(join, 0, 0);
- if (error >= NESTED_LOOP_OK)
- error= (*end_select)(join, 0, 1);
-
- /*
- If we don't go through evaluate_join_record(), do the counting
- here. join->send_records is increased on success in end_send(),
- so we don't touch it here.
+ into two groups exec_const_cond and outer_ref_cond.
+ If join->table_count == join->const_tables then it is
+ sufficient to check only the condition pseudo_bits_cond.
*/
- join->join_examined_rows++;
- DBUG_ASSERT(join->join_examined_rows <= 1);
- }
- else if (join->send_row_on_empty_set())
- {
- if (!join->having || join->having->val_int())
+ DBUG_ASSERT(join->outer_ref_cond == NULL);
+ if (!join->pseudo_bits_cond || join->pseudo_bits_cond->val_int())
{
- List<Item> *columns_list= (procedure ? &join->procedure_fields_list :
+ // HAVING will be checked by end_select
+ error= (*end_select)(join, 0, 0);
+ if (error >= NESTED_LOOP_OK)
+ error= (*end_select)(join, 0, 1);
+
+ /*
+ If we don't go through evaluate_join_record(), do the counting
+ here. join->send_records is increased on success in end_send(),
+ so we don't touch it here.
+ */
+ join->join_examined_rows++;
+ DBUG_ASSERT(join->join_examined_rows <= 1);
+ }
+ else if (join->send_row_on_empty_set())
+ {
+ if (!join->having || join->having->val_int())
+ {
+ List<Item> *columns_list= (procedure ? &join->procedure_fields_list :
join->fields);
- rc= join->result->send_data(*columns_list) > 0;
+ rc= join->result->send_data_with_check(*columns_list,
+ join->unit, 0) > 0;
}
}
/*
@@ -21489,7 +21489,9 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
int error;
/* result < 0 if row was not accepted and should not be counted */
- if (unlikely((error= join->result->send_data(*fields))))
+ if (unlikely((error= join->result->send_data_with_check(*fields,
+ join->unit,
+ join->send_records))))
{
if (error > 0)
DBUG_RETURN(NESTED_LOOP_ERROR);
@@ -21637,7 +21639,9 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
if (join->do_send_rows)
{
- error=join->result->send_data(*fields);
+ error= join->result->send_data_with_check(*fields,
+ join->unit,
+ join->send_records);
if (unlikely(error < 0))
{
/* Duplicate row, don't count */
@@ -25847,7 +25851,8 @@ int JOIN::rollup_send_data(uint idx)
if ((!having || having->val_int()))
{
if (send_records < unit->lim.get_select_limit() && do_send_rows &&
- (res= result->send_data(rollup.fields[i])) > 0)
+ (res= result->send_data_with_check(rollup.fields[i],
+ unit, send_records)) > 0)
return 1;
if (!res)
send_records++;
diff --git a/sql/sql_tvc.cc b/sql/sql_tvc.cc
index f4a99392149..d53ff1f6f30 100644
--- a/sql/sql_tvc.cc
+++ b/sql/sql_tvc.cc
@@ -388,7 +388,8 @@ bool table_value_constr::exec(SELECT_LEX *sl)
{
if (send_records >= sl->master_unit()->lim.get_select_limit())
break;
- int rc= result->send_data(*elem);
+ int rc=
+ result->send_data_with_check(*elem, sl->master_unit(), send_records);
if (!rc)
send_records++;
else if (rc > 0)
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index c10742737b5..47af00865c1 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -111,8 +111,6 @@ int select_unit::send_data(List<Item> &values)
{
int rc= 0;
int not_reported_error= 0;
- if (unit->lim.check_and_move_offset())
- return 0; // using limit offset,count
if (thd->killed == ABORT_QUERY)
return 0;
if (table->no_rows_with_nulls)
@@ -604,8 +602,6 @@ int select_unit_ext::send_data(List<Item> &values)
int rc= 0;
int not_reported_error= 0;
int find_res;
- if (unit->lim.check_and_move_offset())
- return 0;
if (thd->killed == ABORT_QUERY)
return 0;
if (table->no_rows_with_nulls)
@@ -2200,7 +2196,6 @@ bool st_select_lex_unit::exec()
}
if (!sl->tvc)
saved_error= sl->join->error;
- lim.reset();
if (likely(!saved_error))
{
examined_rows+= thd->get_examined_row_count();