summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGuillaume Munch-Maccagnoni <Guillaume.Munch-Maccagnoni@inria.fr>2023-04-18 17:24:00 +0200
committerGuillaume Munch-Maccagnoni <Guillaume.Munch-Maccagnoni@inria.fr>2023-04-18 17:42:16 +0200
commit2788503869eae32cbddfbc9952dd7b512a4816cb (patch)
tree45d9b94a3e2d15d5a78bb4c0c0059087ec9099fa
parentb42a488f0b2d08483e54c7dd1d983398542184d7 (diff)
downloadocaml-2788503869eae32cbddfbc9952dd7b512a4816cb.tar.gz
Make use of polymorphic atomic_ helpers
-rw-r--r--otherlibs/runtime_events/runtime_events_consumer.c10
-rw-r--r--otherlibs/unix/symlink_win32.c7
-rw-r--r--otherlibs/unix/unixsupport_unix.c5
-rw-r--r--otherlibs/unix/unixsupport_win32.c5
-rw-r--r--runtime/array.c6
-rw-r--r--runtime/caml/lf_skiplist.h3
-rw-r--r--runtime/codefrag.c6
-rw-r--r--runtime/fiber.c4
-rw-r--r--runtime/interp.c6
-rw-r--r--runtime/lf_skiplist.c50
-rw-r--r--runtime/major_gc.c6
-rw-r--r--runtime/memory.c3
-rw-r--r--runtime/minor_gc.c12
-rw-r--r--runtime/runtime_events.c22
-rw-r--r--testsuite/tests/lf_skiplist/stubs.c3
15 files changed, 61 insertions, 87 deletions
diff --git a/otherlibs/runtime_events/runtime_events_consumer.c b/otherlibs/runtime_events/runtime_events_consumer.c
index 0901a3d931..1e5f229fd1 100644
--- a/otherlibs/runtime_events/runtime_events_consumer.c
+++ b/otherlibs/runtime_events/runtime_events_consumer.c
@@ -23,6 +23,7 @@
#include "caml/misc.h"
#include "caml/mlvalues.h"
#include "caml/osdeps.h"
+#include "caml/platform.h"
#include <fcntl.h>
#include <stdatomic.h>
@@ -391,10 +392,8 @@ caml_runtime_events_read_poll(struct caml_runtime_events_cursor *cursor,
do {
uint64_t buf[RUNTIME_EVENTS_MAX_MSG_LENGTH];
uint64_t ring_mask, header, msg_length;
- ring_head = atomic_load_explicit(&runtime_events_buffer_header->ring_head,
- memory_order_acquire);
- ring_tail = atomic_load_explicit(&runtime_events_buffer_header->ring_tail,
- memory_order_acquire);
+ ring_head = atomic_load_acquire(&runtime_events_buffer_header->ring_head);
+ ring_tail = atomic_load_acquire(&runtime_events_buffer_header->ring_tail);
if (ring_head > cursor->current_positions[domain_num]) {
if (cursor->lost_events) {
@@ -427,8 +426,7 @@ caml_runtime_events_read_poll(struct caml_runtime_events_cursor *cursor,
atomic_thread_fence(memory_order_seq_cst);
- ring_head = atomic_load_explicit(&runtime_events_buffer_header->ring_head,
- memory_order_acquire);
+ ring_head = atomic_load_acquire(&runtime_events_buffer_header->ring_head);
/* Check the message we've read hasn't been overwritten by the writer */
if (ring_head > cursor->current_positions[domain_num]) {
diff --git a/otherlibs/unix/symlink_win32.c b/otherlibs/unix/symlink_win32.c
index 3a1d6a1b8b..aac6545b31 100644
--- a/otherlibs/unix/symlink_win32.c
+++ b/otherlibs/unix/symlink_win32.c
@@ -26,6 +26,7 @@
#include <caml/fail.h>
#include <caml/signals.h>
#include <caml/osdeps.h>
+#include <caml/platform.h>
#include "unixsupport.h"
#ifndef SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE
@@ -78,13 +79,11 @@ CAMLprim value caml_unix_symlink(value to_dir, value osource, value odest)
caml_unix_check_path(osource, "symlink");
caml_unix_check_path(odest, "symlink");
- additional_flags = atomic_load_explicit(&additional_symlink_flags,
- memory_order_relaxed);
+ additional_flags = atomic_load_relaxed(&additional_symlink_flags);
if (additional_flags == -1) {
additional_flags = IsDeveloperModeEnabled() ?
SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE : 0;
- atomic_store_explicit(&additional_symlink_flags, additional_flags,
- memory_order_relaxed);
+ atomic_store_relaxed(&additional_symlink_flags, additional_flags);
}
flags =
diff --git a/otherlibs/unix/unixsupport_unix.c b/otherlibs/unix/unixsupport_unix.c
index 4a206072a0..449af8b5c2 100644
--- a/otherlibs/unix/unixsupport_unix.c
+++ b/otherlibs/unix/unixsupport_unix.c
@@ -13,11 +13,14 @@
/* */
/**************************************************************************/
+#define CAML_INTERNALS
+
#include <caml/mlvalues.h>
#include <caml/alloc.h>
#include <caml/callback.h>
#include <caml/memory.h>
#include <caml/fail.h>
+#include <caml/platform.h>
#include "unixsupport.h"
#include "cst2constr.h"
#include <errno.h>
@@ -293,7 +296,7 @@ void caml_unix_error(int errcode, const char *cmdname, value cmdarg)
value res;
const value * exn;
- exn = atomic_load_explicit(&caml_unix_error_exn, memory_order_acquire);
+ exn = atomic_load_acquire(&caml_unix_error_exn);
if (exn == NULL) {
exn = caml_named_value("Unix.Unix_error");
if (exn == NULL)
diff --git a/otherlibs/unix/unixsupport_win32.c b/otherlibs/unix/unixsupport_win32.c
index f9e85a4586..cb5eb35df7 100644
--- a/otherlibs/unix/unixsupport_win32.c
+++ b/otherlibs/unix/unixsupport_win32.c
@@ -13,6 +13,8 @@
/* */
/**************************************************************************/
+#define CAML_INTERNALS
+
#include <stddef.h>
#include <caml/mlvalues.h>
#include <caml/callback.h>
@@ -20,6 +22,7 @@
#include <caml/memory.h>
#include <caml/fail.h>
#include <caml/custom.h>
+#include <caml/platform.h>
#include "unixsupport.h"
#include "cst2constr.h"
#include <errno.h>
@@ -297,7 +300,7 @@ void caml_unix_error(int errcode, const char *cmdname, value cmdarg)
value res;
const value * exn;
- exn = atomic_load_explicit(&caml_unix_error_exn, memory_order_acquire);
+ exn = atomic_load_acquire(&caml_unix_error_exn);
if (exn == NULL) {
exn = caml_named_value("Unix.Unix_error");
if (exn == NULL)
diff --git a/runtime/array.c b/runtime/array.c
index 317153901e..5a850b4944 100644
--- a/runtime/array.c
+++ b/runtime/array.c
@@ -336,14 +336,12 @@ static void wo_memmove (volatile value* const dst,
if (dst < src) {
/* copy ascending */
for (i = 0; i < nvals; i++)
- atomic_store_explicit(&((atomic_value*)dst)[i], src[i],
- memory_order_release);
+ atomic_store_release(&((atomic_value*)dst)[i], src[i]);
} else {
/* copy descending */
for (i = nvals; i > 0; i--)
- atomic_store_explicit(&((atomic_value*)dst)[i-1], src[i-1],
- memory_order_release);
+ atomic_store_release(&((atomic_value*)dst)[i-1], src[i-1]);
}
}
}
diff --git a/runtime/caml/lf_skiplist.h b/runtime/caml/lf_skiplist.h
index f35f112256..db6544c867 100644
--- a/runtime/caml/lf_skiplist.h
+++ b/runtime/caml/lf_skiplist.h
@@ -95,8 +95,7 @@ extern void caml_lf_skiplist_free_garbage(struct lf_skiplist *sk);
#define LF_SK_UNMARK(p) ((struct lf_skipcell *)(((uintptr_t)(p)) & ~1))
#define LF_SK_EXTRACT(from, mark_to, ptr_to) \
{ \
- uintptr_t tmp = \
- (uintptr_t)atomic_load_explicit(&from, memory_order_acquire); \
+ uintptr_t tmp = (uintptr_t)atomic_load_acquire(&(from)); \
mark_to = LF_SK_IS_MARKED(tmp); \
ptr_to = LF_SK_UNMARK(tmp); \
}
diff --git a/runtime/codefrag.c b/runtime/codefrag.c
index d1659563c8..9237995fa2 100644
--- a/runtime/codefrag.c
+++ b/runtime/codefrag.c
@@ -95,7 +95,7 @@ void caml_remove_code_fragment(struct code_fragment *cf) {
cf_cell->cf = cf;
do {
- cf_cell->next = atomic_load_explicit(&garbage_head, memory_order_acquire);
+ cf_cell->next = atomic_load_acquire(&garbage_head);
} while (!atomic_compare_exchange_strong(&garbage_head, &cf_cell->next,
cf_cell));
}
@@ -167,7 +167,7 @@ void caml_code_fragment_cleanup (void)
caml_lf_skiplist_free_garbage(&code_fragments_by_pc);
caml_lf_skiplist_free_garbage(&code_fragments_by_num);
- curr = atomic_load_explicit(&garbage_head, memory_order_acquire);
+ curr = atomic_load_acquire(&garbage_head);
while (curr != NULL) {
struct code_fragment_garbage *next = curr->next;
@@ -178,5 +178,5 @@ void caml_code_fragment_cleanup (void)
curr = next;
}
- atomic_store_explicit(&garbage_head, NULL, memory_order_release);
+ atomic_store_release(&garbage_head, NULL);
}
diff --git a/runtime/fiber.c b/runtime/fiber.c
index 52d68fbbff..d86daf9141 100644
--- a/runtime/fiber.c
+++ b/runtime/fiber.c
@@ -670,14 +670,14 @@ static const value * cache_named_exception(const value * _Atomic * cache,
const char * name)
{
const value * exn;
- exn = atomic_load_explicit(cache, memory_order_acquire);
+ exn = atomic_load_acquire(cache);
if (exn == NULL) {
exn = caml_named_value(name);
if (exn == NULL) {
fprintf(stderr, "Fatal error: exception %s\n", name);
exit(2);
}
- atomic_store_explicit(cache, exn, memory_order_release);
+ atomic_store_release(cache, exn);
}
return exn;
}
diff --git a/runtime/interp.c b/runtime/interp.c
index 891096debf..92f6dd7d3e 100644
--- a/runtime/interp.c
+++ b/runtime/interp.c
@@ -1211,8 +1211,7 @@ value caml_interprete(code_t prog, asize_t prog_size)
accu = Val_int(*pc++);
/* We use relaxed atomic accesses to avoid racing with other domains
updating the cache */
- ofs = atomic_load_explicit((_Atomic opcode_t *)pc, memory_order_relaxed)
- & Field(meths,1);
+ ofs = atomic_load_relaxed((_Atomic opcode_t *)pc) & Field(meths,1);
if (*(value*)(((char*)&Field(meths,3)) + ofs) == accu) {
#ifdef CAML_TEST_CACHE
hits++;
@@ -1227,8 +1226,7 @@ value caml_interprete(code_t prog, asize_t prog_size)
if (accu < Field(meths,mi)) hi = mi-2;
else li = mi;
}
- atomic_store_explicit((_Atomic opcode_t *)pc, (li-3)*sizeof(value),
- memory_order_relaxed);
+ atomic_store_relaxed((_Atomic opcode_t *)pc, (li-3)*sizeof(value));
accu = Field (meths, li-1);
}
pc++;
diff --git a/runtime/lf_skiplist.c b/runtime/lf_skiplist.c
index 6cbe46d874..59434fee82 100644
--- a/runtime/lf_skiplist.c
+++ b/runtime/lf_skiplist.c
@@ -74,8 +74,7 @@ static int random_level(void) {
(Knuth vol 2 p. 106, line 15 of table 1), additive = 25173. */
while( 1 ) {
- uint32_t curr =
- atomic_load_explicit(&random_seed, memory_order_relaxed);
+ uint32_t curr = atomic_load_relaxed(&random_seed);
r = curr * 69069 + 25173;
@@ -97,7 +96,7 @@ static int random_level(void) {
/* Initialize a skip list */
void caml_lf_skiplist_init(struct lf_skiplist *sk) {
- atomic_store_explicit(&sk->search_level, 0, memory_order_relaxed);
+ atomic_store_relaxed(&sk->search_level, 0);
/* This concurrent skip list has two sentinel nodes, the first [head] is
less than any possible key in the data structure and the second [tail] is
@@ -125,11 +124,9 @@ void caml_lf_skiplist_init(struct lf_skiplist *sk) {
/* each level in the skip list starts of being just head pointing to tail */
for (int j = 0; j < NUM_LEVELS; j++) {
- atomic_store_explicit
- (&sk->head->forward[j], sk->tail, memory_order_release);
+ atomic_store_release(&sk->head->forward[j], sk->tail);
- atomic_store_explicit
- (&sk->tail->forward[j], NULL, memory_order_release);
+ atomic_store_release(&sk->tail->forward[j], NULL);
}
}
@@ -172,8 +169,7 @@ retry:
compare-and-swap.
*/
for (int level = NUM_LEVELS - 1; level >= 0; level--) {
- curr = LF_SK_UNMARK(
- atomic_load_explicit(&pred->forward[level], memory_order_acquire));
+ curr = LF_SK_UNMARK(atomic_load_acquire(&pred->forward[level]));
while (1) {
int is_marked;
@@ -210,10 +206,9 @@ retry:
This is why we need to a retry loop and yet another CAS. */
while (1) {
struct lf_skipcell *_Atomic current_garbage_head =
- atomic_load_explicit(&sk->garbage_head, memory_order_acquire);
+ atomic_load_acquire(&sk->garbage_head);
- atomic_store_explicit(&curr->garbage_next, current_garbage_head,
- memory_order_release);
+ atomic_store_release(&curr->garbage_next, current_garbage_head);
if (atomic_compare_exchange_strong(
&sk->garbage_head,
@@ -225,8 +220,7 @@ retry:
/* Now try to load the current node again. We need to check it too
hasn't been marked. If it has we repeat the process */
- curr = LF_SK_UNMARK(atomic_load_explicit(&pred->forward[level],
- memory_order_acquire));
+ curr = LF_SK_UNMARK(atomic_load_acquire(&pred->forward[level]));
LF_SK_EXTRACT(curr->forward[level], is_marked, succ);
}
@@ -271,11 +265,9 @@ static struct lf_skipcell *lf_skiplist_lookup(struct lf_skiplist *sk,
level then our only cost is an increased number of nodes searched. If we
did the same thing in the find function above then we'd also fail to snip
out marked nodes. If we did that for long enough we might leak memory. */
- for (int level =
- atomic_load_explicit(&sk->search_level, memory_order_relaxed);
+ for (int level = atomic_load_relaxed(&sk->search_level);
level >= 0; level--) {
- curr = LF_SK_UNMARK(
- atomic_load_explicit(&pred->forward[level], memory_order_acquire));
+ curr = LF_SK_UNMARK(atomic_load_acquire(&pred->forward[level]));
while (1) {
LF_SK_EXTRACT(curr->forward[level], marked, succ);
while (marked) {
@@ -355,8 +347,7 @@ int caml_lf_skiplist_insert(struct lf_skiplist *sk, uintnat key, uintnat data) {
if (found) {
/* Already present; update data */
- atomic_store_explicit((atomic_uintnat*)&succs[0]->data, data,
- memory_order_relaxed);
+ atomic_store_relaxed((atomic_uintnat*)&succs[0]->data, data);
return 1;
} else {
/* node does not exist. We need to generate a random top_level and
@@ -374,11 +365,10 @@ int caml_lf_skiplist_insert(struct lf_skiplist *sk, uintnat key, uintnat data) {
new_cell->top_level = top_level;
new_cell->key = key;
new_cell->data = data;
- atomic_store_explicit(&new_cell->garbage_next,NULL,memory_order_relaxed);
+ atomic_store_relaxed(&new_cell->garbage_next,NULL);
for (int level = 0; level <= top_level; level++) {
- atomic_store_explicit(&new_cell->forward[level], succs[level],
- memory_order_release);
+ atomic_store_release(&new_cell->forward[level], succs[level]);
}
/* Now we need to actually slip the node in. We start at the bottom-most
@@ -426,10 +416,8 @@ int caml_lf_skiplist_insert(struct lf_skiplist *sk, uintnat key, uintnat data) {
/* If we put the new node at a higher level than the current
[search_level] then to speed up searches we need to bump it. We don't
care too much if this fails though. */
- if (top_level >
- atomic_load_explicit(&sk->search_level, memory_order_relaxed)) {
- atomic_store_explicit(&sk->search_level, top_level,
- memory_order_relaxed);
+ if (top_level > atomic_load_relaxed(&sk->search_level)) {
+ atomic_store_relaxed(&sk->search_level, top_level);
}
return 1;
@@ -500,17 +488,15 @@ int caml_lf_skiplist_remove(struct lf_skiplist *sk, uintnat key) {
skiplist */
void caml_lf_skiplist_free_garbage(struct lf_skiplist *sk) {
- struct lf_skipcell *curr =
- atomic_load_explicit(&sk->garbage_head, memory_order_acquire);
+ struct lf_skipcell *curr = atomic_load_acquire(&sk->garbage_head);
struct lf_skipcell *head = sk->head;
while (curr != head) {
- struct lf_skipcell *next = atomic_load_explicit
- (&curr->garbage_next, memory_order_relaxed);
+ struct lf_skipcell *next = atomic_load_relaxed(&curr->garbage_next);
// acquire not useful, if executed in STW
caml_stat_free(curr);
curr = next;
}
- atomic_store_explicit(&sk->garbage_head, sk->head, memory_order_release);
+ atomic_store_release(&sk->garbage_head, sk->head);
}
diff --git a/runtime/major_gc.c b/runtime/major_gc.c
index f1245e5afc..7f1dac022d 100644
--- a/runtime/major_gc.c
+++ b/runtime/major_gc.c
@@ -1009,10 +1009,8 @@ void caml_darken_cont(value cont)
if (Ptr_val(stk) != NULL)
caml_scan_stack(&caml_darken, darken_scanning_flags, Caml_state,
Ptr_val(stk), 0);
- atomic_store_explicit(
- Hp_atomic_val(cont),
- With_status_hd(hd, caml_global_heap_state.MARKED),
- memory_order_release);
+ atomic_store_release(Hp_atomic_val(cont),
+ With_status_hd(hd, caml_global_heap_state.MARKED));
}
}
}
diff --git a/runtime/memory.c b/runtime/memory.c
index 3af3a6f72b..1907d5ce84 100644
--- a/runtime/memory.c
+++ b/runtime/memory.c
@@ -152,8 +152,7 @@ CAMLexport CAMLweakdef void caml_modify (volatile value *fp, value val)
/* See Note [MM] above */
atomic_thread_fence(memory_order_acquire);
- atomic_store_explicit(&Op_atomic_val((value)fp)[0], val,
- memory_order_release);
+ atomic_store_release(&Op_atomic_val((value)fp)[0], val);
}
/* Dependent memory is all memory blocks allocated out of the heap
diff --git a/runtime/minor_gc.c b/runtime/minor_gc.c
index 6092dcd80b..faad61c915 100644
--- a/runtime/minor_gc.c
+++ b/runtime/minor_gc.c
@@ -172,7 +172,7 @@ static void spin_on_header(value v) {
}
Caml_inline header_t get_header_val(value v) {
- header_t hd = atomic_load_explicit(Hp_atomic_val(v), memory_order_acquire);
+ header_t hd = atomic_load_acquire(Hp_atomic_val(v));
if (!Is_update_in_progress(hd))
return hd;
@@ -210,7 +210,7 @@ static int try_update_object_header(value v, volatile value *p, value result,
header_t desired_hd = In_progress_update_val;
if( atomic_compare_exchange_strong(Hp_atomic_val(v), &hd, desired_hd) ) {
/* Success. Now we can write the forwarding pointer. */
- atomic_store_explicit(Op_atomic_val(v), result, memory_order_relaxed);
+ atomic_store_relaxed(Op_atomic_val(v), result);
/* And update header ('release' ensures after update of fwd pointer) */
atomic_store_release(Hp_atomic_val(v), 0);
/* Let the caller know we were responsible for the update */
@@ -675,7 +675,7 @@ void caml_do_opportunistic_major_slice
if needed.
*/
void caml_empty_minor_heap_setup(caml_domain_state* domain_unused) {
- atomic_store_explicit(&domains_finished_minor_gc, 0, memory_order_release);
+ atomic_store_release(&domains_finished_minor_gc, 0);
/* Increment the total number of minor collections done in the program */
atomic_fetch_add (&caml_minor_collections_count, 1);
}
@@ -706,10 +706,8 @@ caml_stw_empty_minor_heap_no_major_slice(caml_domain_state* domain,
CAML_EV_BEGIN(EV_MINOR_LEAVE_BARRIER);
{
SPIN_WAIT {
- if( atomic_load_explicit
- (&domains_finished_minor_gc, memory_order_acquire)
- ==
- participating_count ) {
+ if (atomic_load_acquire(&domains_finished_minor_gc) ==
+ participating_count) {
break;
}
diff --git a/runtime/runtime_events.c b/runtime/runtime_events.c
index f4a91d6b39..1e5e141c0f 100644
--- a/runtime/runtime_events.c
+++ b/runtime/runtime_events.c
@@ -478,10 +478,8 @@ static void write_to_ring(ev_category category, ev_message_type type,
/* the head and tail indexes for the current domain's ring buffer (out of
the header) */
- uint64_t ring_head = atomic_load_explicit(&domain_ring_header->ring_head,
- memory_order_acquire);
- uint64_t ring_tail = atomic_load_explicit(&domain_ring_header->ring_tail,
- memory_order_acquire);
+ uint64_t ring_head = atomic_load_acquire(&domain_ring_header->ring_head);
+ uint64_t ring_tail = atomic_load_acquire(&domain_ring_header->ring_tail);
/* since rings can only be powers of two in size, we use this mask to cheaply
convert the head and tail indexes in to the physical offset in the ring
@@ -519,8 +517,8 @@ static void write_to_ring(ev_category category, ev_message_type type,
ring_head += RUNTIME_EVENTS_ITEM_LENGTH(head_header);
- atomic_store_explicit(&domain_ring_header->ring_head, ring_head,
- memory_order_release); // advance the ring head
+ // advance the ring head
+ atomic_store_release(&domain_ring_header->ring_head, ring_head);
}
if (padding_required > 0) {
@@ -532,8 +530,7 @@ static void write_to_ring(ev_category category, ev_message_type type,
ring_tail += ring_distance_to_end;
- atomic_store_explicit(&domain_ring_header->ring_tail, ring_tail,
- memory_order_release);
+ atomic_store_release(&domain_ring_header->ring_tail, ring_tail);
ring_tail_offset = 0;
}
@@ -553,17 +550,16 @@ static void write_to_ring(ev_category category, ev_message_type type,
memcpy(&ring_ptr[ring_tail_offset], content + word_offset,
event_length * sizeof(uint64_t));
}
- atomic_store_explicit(&domain_ring_header->ring_tail,
- ring_tail + length_with_header_ts,
- memory_order_release);
+ atomic_store_release(&domain_ring_header->ring_tail,
+ ring_tail + length_with_header_ts);
}
/* Functions for putting runtime data on to the runtime_events */
static inline int ring_is_active(void) {
return
- atomic_load_explicit(&runtime_events_enabled, memory_order_relaxed)
- && !atomic_load_explicit(&runtime_events_paused, memory_order_relaxed);
+ atomic_load_relaxed(&runtime_events_enabled)
+ && !atomic_load_relaxed(&runtime_events_paused);
}
void caml_ev_begin(ev_runtime_phase phase) {
diff --git a/testsuite/tests/lf_skiplist/stubs.c b/testsuite/tests/lf_skiplist/stubs.c
index 991483e408..75296cd92b 100644
--- a/testsuite/tests/lf_skiplist/stubs.c
+++ b/testsuite/tests/lf_skiplist/stubs.c
@@ -68,8 +68,7 @@ static uintnat count_marks(struct lf_skiplist *sk) {
while (p) {
for (int k = p->top_level; k >= 0; k--) {
- succ =
- (uintptr_t)atomic_load_explicit(&p->forward[k],memory_order_relaxed);
+ succ = (uintptr_t)atomic_load_relaxed(&p->forward[k]);
if (LF_SK_IS_MARKED(succ)) r++ ;
}
p = LF_SK_UNMARK(succ);