summaryrefslogtreecommitdiff
path: root/test/nvmem.c
diff options
context:
space:
mode:
authorScott <scollyer@chromium.org>2016-06-15 15:33:25 -0700
committerchrome-bot <chrome-bot@chromium.org>2016-06-22 13:22:50 -0700
commitb6920f42f933c06c4c362856a5d257ffccb8ba8c (patch)
treead48e961343fcf83f68a307fe88c91d25e7d19b3 /test/nvmem.c
parent00aef53a7b4c1a302b8f1448f2c634712c7b77eb (diff)
downloadchrome-ec-b6920f42f933c06c4c362856a5d257ffccb8ba8c.tar.gz
Cr50: NvMem: Added mutex lock protection for cache memory
Added mutex lock for nvmem write/move operations. In the current implementation, there is no single entry point for the platform specific NvMem calls. The mutex lock is coupled with a task number so that the same task can attempt to grab the lock without stalling itself. In addition to the mutex lock, changed where the cache.base_ptr variable is updated. Previously, this was done prior to the partition being copied from flash to the shared memory area. Now, the variable is only updated after the copy so that read operations will always read from the correctly from either flash or from cache memory if a write operation has been started. BRANCH=none BUG=chrome-os-partner:52520 TEST=Manual make runtests TEST_LIST_HOST=nvmem and verify that all tests pass. Tested with tcg_test utility to test reads/writes using the command "build/test-tpm2/install/bin/compliance --ntpm localhost:9883 --select CPCTPM_TC2_3_33_07_01". Change-Id: Ib6f278ad889424f4df85e4a328da1f45c8d00730 Signed-off-by: Scott <scollyer@chromium.org> Reviewed-on: https://chromium-review.googlesource.com/353026 Commit-Ready: Scott Collyer <scollyer@chromium.org> Tested-by: Scott Collyer <scollyer@chromium.org> Reviewed-by: Bill Richardson <wfrichar@chromium.org>
Diffstat (limited to 'test/nvmem.c')
-rw-r--r--test/nvmem.c92
1 files changed, 92 insertions, 0 deletions
diff --git a/test/nvmem.c b/test/nvmem.c
index 2567853341..d3bc0b99e1 100644
--- a/test/nvmem.c
+++ b/test/nvmem.c
@@ -28,6 +28,7 @@ uint32_t nvmem_user_sizes[NVMEM_NUM_USERS] = {
static uint8_t write_buffer[NVMEM_PARTITION_SIZE];
static uint8_t read_buffer[NVMEM_PARTITION_SIZE];
static int flash_write_fail;
+static int lock_test_started;
void nvmem_compute_sha(uint8_t *p_buf, int num_bytes, uint8_t *p_sha,
int sha_bytes)
@@ -444,6 +445,95 @@ static int test_is_different(void)
return EC_SUCCESS;
}
+int nvmem_first_task(void *unused)
+{
+ uint32_t offset = 0;
+ uint32_t num_bytes = WRITE_SEGMENT_LEN;
+ int user = NVMEM_USER_0;
+
+ task_wait_event(0);
+ /* Generate source data */
+ generate_random_data(0, num_bytes);
+ nvmem_write(0, num_bytes, &write_buffer[offset], user);
+ /* Read from cache memory */
+ nvmem_read(0, num_bytes, read_buffer, user);
+ /* Verify that write to nvmem was successful */
+ TEST_ASSERT_ARRAY_EQ(write_buffer, read_buffer, num_bytes);
+ /* Wait here with mutex held by this task */
+ task_wait_event(0);
+ /* Write to flash which releases nvmem mutex */
+ nvmem_commit();
+ nvmem_read(0, num_bytes, read_buffer, user);
+ /* Verify that write to flash was successful */
+ TEST_ASSERT_ARRAY_EQ(write_buffer, read_buffer, num_bytes);
+
+ return EC_SUCCESS;
+}
+
+int nvmem_second_task(void *unused)
+{
+ uint32_t offset = WRITE_SEGMENT_LEN;
+ uint32_t num_bytes = WRITE_SEGMENT_LEN;
+ int user = NVMEM_USER_0;
+
+ task_wait_event(0);
+
+ /* Gen test data and don't overwite test data generated by 1st task */
+ generate_random_data(offset, num_bytes);
+ /* Write test data at offset 0 nvmem user buffer */
+ nvmem_write(0, num_bytes, &write_buffer[offset], user);
+ /* Write to flash */
+ nvmem_commit();
+ /* Read from nvmem */
+ nvmem_read(0, num_bytes, read_buffer, user);
+ /* Verify that write to nvmem was successful */
+ TEST_ASSERT_ARRAY_EQ(&write_buffer[offset], read_buffer, num_bytes);
+ /* Clear flag to indicate lock test is complete */
+ lock_test_started = 0;
+
+ return EC_SUCCESS;
+}
+
+static int test_lock(void)
+{
+ /*
+ * This purpose of this test is to verify the mutex lock portion of the
+ * nvmem module. There are two additional tasks utilized. The first task
+ * is woken and it creates some test data and does an
+ * nvmem_write(). This will cause the mutex to be locked by the 1st
+ * task. The 1st task then waits and control is returned to this
+ * function and the 2nd task is woken, the 2nd task also attempts to
+ * write data to nvmem. The 2nd task should stall waiting for the mutex
+ * to be unlocked.
+ *
+ * When control returns to this function, the 1st task is woken again
+ * and the nvmem operation is completed. This will allow the 2nd task to
+ * grab the lock and finish its nvmem operation. The test will not
+ * complete until the 2nd task finishes the nvmem write. A static global
+ * flag is used to let this function know when the 2nd task is complete.
+ *
+ * Both tasks write to the same location in nvmem so the test will only
+ * pass if the 2nd task can't write until the nvmem write in the 1st
+ * task is completed.
+ */
+
+ /* Set flag for start of test */
+ lock_test_started = 1;
+ /* Wake first_task */
+ task_wake(TASK_ID_NV_1);
+ task_wait_event(1000);
+ /* Wake second_task. It should stall waiting for mutex */
+ task_wake(TASK_ID_NV_2);
+ task_wait_event(1000);
+ /* Go back to first_task so it can complete its nvmem operation */
+ task_wake(TASK_ID_NV_1);
+ /* Wait for 2nd task to complete nvmem operation */
+ while (lock_test_started)
+ task_wait_event(100);
+
+ return EC_SUCCESS;
+}
+
static void run_test_setup(void)
{
/* Allow Flash erase/writes */
@@ -471,5 +561,7 @@ void run_test(void)
RUN_TEST(test_move);
/* Test NvMem IsDifferent function */
RUN_TEST(test_is_different);
+ /* Test Nvmem write lock */
+ RUN_TEST(test_lock);
test_print_result();
}