summaryrefslogtreecommitdiff
path: root/sql/filesort.cc
diff options
context:
space:
mode:
authorMonty <monty@mariadb.org>2018-04-04 12:16:12 +0300
committerMonty <monty@mariadb.org>2018-05-07 00:07:32 +0300
commit30ebc3ee9efcab635b1f3e14b9198a58ae93c233 (patch)
tree81e3ad66cd4ec8693964317cbf23515d0e9ecf35 /sql/filesort.cc
parenta22a339f8e044a1e8df011beb0b4c8f43792ac96 (diff)
downloadmariadb-git-30ebc3ee9efcab635b1f3e14b9198a58ae93c233.tar.gz
Add likely/unlikely to speed up execution
Added to: - if (error) - Lex - sql_yacc.yy and sql_yacc_ora.yy - In header files to alloc() calls - Added thd argument to thd_net_is_killed()
Diffstat (limited to 'sql/filesort.cc')
-rw-r--r--sql/filesort.cc53
1 files changed, 27 insertions, 26 deletions
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 4585330ea54..e37c36da680 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -376,7 +376,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
}
}
tracker->report_merge_passes_at_end(thd->query_plan_fsort_passes);
- if (error)
+ if (unlikely(error))
{
int kill_errno= thd->killed_errno();
DBUG_ASSERT(thd->is_error() || kill_errno || thd->killed == ABORT_QUERY);
@@ -414,7 +414,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
(longlong) sort->found_rows));
MYSQL_FILESORT_DONE(error, num_rows);
- if (error)
+ if (unlikely(error))
{
delete sort;
sort= 0;
@@ -742,7 +742,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
next_pos=(uchar*) 0; /* Find records in sequence */
DBUG_EXECUTE_IF("bug14365043_1",
DBUG_SET("+d,ha_rnd_init_fail"););
- if (file->ha_rnd_init_with_error(1))
+ if (unlikely(file->ha_rnd_init_with_error(1)))
DBUG_RETURN(HA_POS_ERROR);
file->extra_opt(HA_EXTRA_CACHE, thd->variables.read_buff_size);
}
@@ -779,7 +779,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
{
if (quick_select)
{
- if ((error= select->quick->get_next()))
+ if (unlikely((error= select->quick->get_next())))
break;
file->position(sort_form->record[0]);
DBUG_EXECUTE_IF("debug_filesort", dbug_print_record(sort_form, TRUE););
@@ -793,14 +793,14 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
my_store_ptr(ref_pos,ref_length,record); // Position to row
record+= sort_form->s->db_record_offset;
}
- else if (!error)
+ else if (likely(!error))
file->position(sort_form->record[0]);
}
- if (error && error != HA_ERR_RECORD_DELETED)
+ if (unlikely(error && error != HA_ERR_RECORD_DELETED))
break;
}
- if (thd->check_killed())
+ if (unlikely(thd->check_killed()))
{
DBUG_PRINT("info",("Sort killed by user"));
if (!quick_select)
@@ -812,7 +812,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
}
bool write_record= false;
- if (error == 0)
+ if (likely(error == 0))
{
param->examined_rows++;
if (select && select->cond)
@@ -865,7 +865,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
}
/* It does not make sense to read more keys in case of a fatal error */
- if (thd->is_error())
+ if (unlikely(thd->is_error()))
break;
/*
@@ -885,11 +885,11 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
/* Signal we should use orignal column read and write maps */
sort_form->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set);
- if (thd->is_error())
+ if (unlikely(thd->is_error()))
DBUG_RETURN(HA_POS_ERROR);
DBUG_PRINT("test",("error: %d indexpos: %d",error,indexpos));
- if (error != HA_ERR_END_OF_FILE)
+ if (unlikely(error != HA_ERR_END_OF_FILE))
{
file->print_error(error,MYF(ME_ERROR | ME_WAITTANG)); // purecov: inspected
DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */
@@ -1507,27 +1507,28 @@ cleanup:
/**
Read data to buffer.
- @retval
- (uint)-1 if something goes wrong
+ @retval Number of bytes read
+ (uint)-1 if something goes wrong
*/
uint read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek,
uint rec_length)
{
uint count;
- uint length;
+ uint length= 0;
if ((count=(uint) MY_MIN((ha_rows) buffpek->max_keys,buffpek->count)))
{
- if (my_b_pread(fromfile, (uchar*) buffpek->base,
- (length= rec_length*count), buffpek->file_pos))
+ length= rec_length*count;
+ if (unlikely(my_b_pread(fromfile, (uchar*) buffpek->base, length,
+ buffpek->file_pos)))
return ((uint) -1);
buffpek->key=buffpek->base;
buffpek->file_pos+= length; /* New filepos */
buffpek->count-= count;
buffpek->mem_count= count;
}
- return (count*rec_length);
+ return (length);
} /* read_to_buffer */
@@ -1648,7 +1649,7 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
strpos+=
(uint) (error= (int) read_to_buffer(from_file, buffpek, rec_length));
- if (error == -1)
+ if (unlikely(error == -1))
goto err; /* purecov: inspected */
buffpek->max_keys= buffpek->mem_count; // If less data in buffers than expected
queue_insert(&queue, (uchar*) buffpek);
@@ -1669,13 +1670,13 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
buffpek->key+= rec_length;
if (! --buffpek->mem_count)
{
- if (!(error= (int) read_to_buffer(from_file, buffpek,
- rec_length)))
+ if (unlikely(!(error= (int) read_to_buffer(from_file, buffpek,
+ rec_length))))
{
(void) queue_remove_top(&queue);
reuse_freed_buff(&queue, buffpek, rec_length);
}
- else if (error == -1)
+ else if (unlikely(error == -1))
goto err; /* purecov: inspected */
}
queue_replace_top(&queue); // Top element has been used
@@ -1685,7 +1686,7 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
while (queue.elements > 1)
{
- if (killable && thd->check_killed())
+ if (killable && unlikely(thd->check_killed()))
{
error= 1; goto err; /* purecov: inspected */
}
@@ -1746,8 +1747,8 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
buffpek->key+= rec_length;
if (! --buffpek->mem_count)
{
- if (!(error= (int) read_to_buffer(from_file, buffpek,
- rec_length)))
+ if (unlikely(!(error= (int) read_to_buffer(from_file, buffpek,
+ rec_length))))
{
(void) queue_remove_top(&queue);
reuse_freed_buff(&queue, buffpek, rec_length);
@@ -1837,8 +1838,8 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
}
}
}
- while ((error=(int) read_to_buffer(from_file, buffpek, rec_length))
- != -1 && error != 0);
+ while (likely((error=(int) read_to_buffer(from_file, buffpek, rec_length))
+ != -1 && error != 0));
end:
lastbuff->count= MY_MIN(org_max_rows-max_rows, param->max_rows);