summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Black <daniel@mariadb.org>2021-02-17 16:12:06 +1100
committerDaniel Black <daniel@mariadb.org>2021-02-25 14:57:29 +1100
commitf005fab662bd4ec40a374d2ac4fffa502ba94c9c (patch)
treeb2d7d4512fde80e4ea3d4856ed0b9e5e98568212
parent9001e39fe7dd5a36ab6f94a73a8bdc6100dde962 (diff)
downloadmariadb-git-bb-10.2-danielblack-MDEV-23510-arm-lfhash.tar.gz
MDEV-23510: lf_dynarray_lvalue to return aligned addressbb-10.2-danielblack-MDEV-23510-arm-lfhash
Aligned to sizeof(void *). This ensures that arm64 use of lf_hash remains stable. Remove abuse of volatile in maria/lockman because it wasn't necessary or advisable.
-rw-r--r--mysys/lf_dynarray.c18
-rw-r--r--storage/maria/lockman.c22
2 files changed, 20 insertions, 20 deletions
diff --git a/mysys/lf_dynarray.c b/mysys/lf_dynarray.c
index be23690c70b..ae0f6dc941c 100644
--- a/mysys/lf_dynarray.c
+++ b/mysys/lf_dynarray.c
@@ -56,10 +56,10 @@ static void recursive_free(void **alloc, int level)
int i;
for (i= 0; i < LF_DYNARRAY_LEVEL_LENGTH; i++)
recursive_free(alloc[i], level-1);
- my_free(alloc);
+ my_free_aligned(alloc);
}
else
- my_free(alloc[-1]);
+ my_free_aligned(alloc[-1]);
}
void lf_dynarray_destroy(LF_DYNARRAY *array)
@@ -106,14 +106,14 @@ void *lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
{
if (!(ptr= *ptr_ptr))
{
- void *alloc= my_malloc(LF_DYNARRAY_LEVEL_LENGTH * sizeof(void *),
- MYF(MY_WME|MY_ZEROFILL));
+ void *alloc= my_calloc_aligned(LF_DYNARRAY_LEVEL_LENGTH * sizeof(void *),
+ sizeof(void *));
if (unlikely(!alloc))
return(NULL);
if (my_atomic_casptr(ptr_ptr, &ptr, alloc))
ptr= alloc;
else
- my_free(alloc);
+ my_free_aligned(alloc);
}
ptr_ptr= ((void **)ptr) + idx / dynarray_idxes_in_prev_level[i];
idx%= dynarray_idxes_in_prev_level[i];
@@ -121,9 +121,9 @@ void *lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
if (!(ptr= *ptr_ptr))
{
uchar *alloc, *data;
- alloc= my_malloc(LF_DYNARRAY_LEVEL_LENGTH * array->size_of_element +
- MY_MAX(array->size_of_element, sizeof(void *)),
- MYF(MY_WME|MY_ZEROFILL));
+ alloc= my_calloc_aligned(LF_DYNARRAY_LEVEL_LENGTH * array->size_of_element +
+ MY_MAX(array->size_of_element, sizeof(void *)),
+ sizeof(void *));
if (unlikely(!alloc))
return(NULL);
/* reserve the space for free() address */
@@ -137,7 +137,7 @@ void *lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
if (my_atomic_casptr(ptr_ptr, &ptr, data))
ptr= data;
else
- my_free(alloc);
+ my_free_aligned(alloc);
}
return ((uchar*)ptr) + array->size_of_element * idx;
}
diff --git a/storage/maria/lockman.c b/storage/maria/lockman.c
index a23558e46dd..b9f1c4ca56b 100644
--- a/storage/maria/lockman.c
+++ b/storage/maria/lockman.c
@@ -214,7 +214,7 @@ static enum lockman_getlock_result getlock_result[10][10]=
typedef struct lockman_lock {
uint64 resource;
struct lockman_lock *lonext;
- intptr volatile link;
+ intptr link;
uint32 hashnr;
/* QQ: TODO - remove hashnr from LOCK */
uint16 loid;
@@ -227,7 +227,7 @@ typedef struct lockman_lock {
#define ACTIVE 4
typedef struct {
- intptr volatile *prev;
+ intptr *prev;
LOCK *curr, *next;
LOCK *blocker, *upgrade_from;
} CURSOR;
@@ -240,7 +240,7 @@ typedef struct {
cursor is positioned in either case
pins[0..3] are used, they are NOT removed on return
*/
-static int lockfind(LOCK * volatile *head, LOCK *node,
+static int lockfind(LOCK **head, LOCK *node,
CURSOR *cursor, LF_PINS *pins)
{
uint32 hashnr, cur_hashnr;
@@ -397,7 +397,7 @@ retry:
NOTE
it uses pins[0..3], on return pins 0..2 are removed, pin 3 (blocker) stays
*/
-static int lockinsert(LOCK * volatile *head, LOCK *node, LF_PINS *pins,
+static int lockinsert(LOCK **head, LOCK *node, LF_PINS *pins,
LOCK **blocker)
{
CURSOR cursor;
@@ -453,7 +453,7 @@ static int lockinsert(LOCK * volatile *head, LOCK *node, LF_PINS *pins,
NOTE
it uses pins[0..3], on return pins 0..2 are removed, pin 3 (blocker) stays
*/
-static int lockpeek(LOCK * volatile *head, LOCK *node, LF_PINS *pins,
+static int lockpeek(LOCK **head, LOCK *node, LF_PINS *pins,
LOCK **blocker)
{
CURSOR cursor;
@@ -475,7 +475,7 @@ static int lockpeek(LOCK * volatile *head, LOCK *node, LF_PINS *pins,
One _must_ have the lock (or request) to call this
*/
-static int lockdelete(LOCK * volatile *head, LOCK *node, LF_PINS *pins)
+static int lockdelete(LOCK **head, LOCK *node, LF_PINS *pins)
{
CURSOR cursor;
int res;
@@ -539,7 +539,7 @@ void lockman_destroy(LOCKMAN *lm)
if (el->hashnr & 1)
lf_alloc_direct_free(&lm->alloc, el);
else
- my_free((void *)el);
+ my_free_aligned((void *)el);
el= (LOCK *)next;
}
lf_alloc_destroy(&lm->alloc);
@@ -549,14 +549,14 @@ void lockman_destroy(LOCKMAN *lm)
/* TODO: optimize it */
#define MAX_LOAD 1
-static void initialize_bucket(LOCKMAN *lm, LOCK * volatile *node,
+static void initialize_bucket(LOCKMAN *lm, LOCK **node,
uint bucket, LF_PINS *pins)
{
int res;
uint parent= my_clear_highest_bit(bucket);
LOCK *dummy= (LOCK *)my_malloc(sizeof(LOCK), MYF(MY_WME));
LOCK **tmp= 0, *cur;
- LOCK * volatile *el= lf_dynarray_lvalue(&lm->array, parent);
+ LOCK **el= lf_dynarray_lvalue(&lm->array, parent);
if (*el == NULL && bucket)
initialize_bucket(lm, el, parent, pins);
@@ -599,7 +599,7 @@ enum lockman_getlock_result lockman_getlock(LOCKMAN *lm, LOCK_OWNER *lo,
{
int res;
uint csize, bucket, hashnr;
- LOCK *node, * volatile *el, *blocker;
+ LOCK *node, **el, *blocker;
LF_PINS *pins= lo->pins;
enum lockman_lock_type old_lock;
@@ -717,7 +717,7 @@ enum lockman_getlock_result lockman_getlock(LOCKMAN *lm, LOCK_OWNER *lo,
*/
int lockman_release_locks(LOCKMAN *lm, LOCK_OWNER *lo)
{
- LOCK * volatile *el, *node, *next;
+ LOCK **el, *node, *next;
uint bucket;
LF_PINS *pins= lo->pins;