summaryrefslogtreecommitdiff
path: root/FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/sha256.c
diff options
context:
space:
mode:
authorlundinc <lundinc@1d2547de-c912-0410-9cb9-b8ca96c0e9e2>2020-08-12 19:11:51 +0000
committerlundinc <lundinc@1d2547de-c912-0410-9cb9-b8ca96c0e9e2>2020-08-12 19:11:51 +0000
commit42255af1e27a3157d541f0812eaca447c569ca49 (patch)
tree5c8702c2f0dc1cb9be1a4d5ff285897d96b97dd2 /FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/sha256.c
parentf5221dff43de249079c2da081723cb7a456f981f (diff)
downloadfreertos-master.tar.gz
commit 70dcbe4527a45ab4fea6d58c016e7d3032f31e8cHEADmaster
Author: Ming Yue <mingyue86010@gmail.com> Date: Tue Aug 11 17:06:59 2020 -0700 Remove unused wolfSSL files. (#197) * Remove unused wolfSSL files. * Add back some removed ciphers. * Update VS project file. commit 0e0edd96e8236b2ea4a6e6018812807be828c77f Author: RichardBarry <3073890+RichardBarry@users.noreply.github.com> Date: Tue Aug 11 10:50:30 2020 -0700 Use new QEMU test project to improve stream/message buffer tests (#168) * Add Eclipse/GCC project that targets the LM3S8962 QEMU model. * Get the Cortex-M QEMU project working. * Continue working on making stream buffer demo more robust and QEMU project. * Rename directory CORTEX_LM3S8986_QEMU to CORTEX_LM3S6965_QEMU. Work on making the Stream Buffer tests more robust. Check in before adding in the trace recorder. * Rename CORTEX_LM3S6969_QEMU to CORTEX_LM3S6969_GCC_QEMU. * Make the StreamBufferDemo.c common demo file (test file) more robust to other test tasks running at an equally high priority. * Work in progress checkin only - comments in main.c are incorrect. * Correct comments at the top of FreeRTOS/Demo/CORTEX_LM3S6965_GCC_QEMU/main.c Make the message buffer tests more robust in the case the a message buffer becomes full when prvSenderTask() has a higher priority than the reader task. * Disable trace recorder in the LM3S6965 QEMU demo. * I'm dropping FreeRTOS-Kernel reference update, since this seems to break the CMBC CI. Co-authored-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> commit 157a7fc39f19583ac8481e93fa3e1c91b1e1860c Author: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Sun Aug 9 22:21:44 2020 -0700 Use chacheable RAM in IAR project for MPU_M7_NUCLEO_H743ZI2 project (#193) This change updates the IAR project for Nucleo H743ZI2 to use the cacheable DTC RAM and enables L1 cache. In order to ensure the correct functioning of cache, the project sets configTEX_S_C_B_SRAM in FreeRTOSConfig.h to not mark the RAM as shareable. Signed-off-by: Gaurav Aggarwal <aggarg@amazon.com> commit f3e43556f90f01b82918ad533b0c616489331919 Author: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Sun Aug 9 16:23:53 2020 -0700 Add MPU demo projects for NUCLEO-H743ZI2 board (#155) * Add MPU demo projects for NUCLEO-H743ZI2 board It contains projects for Keil uVision, STM32CubeIDE and IAR EW. This demo shows the use of newly added support for 16 MPU regions. Signed-off-by: Gaurav Aggarwal <aggarg@amazon.com> * Delete not needed CMSIS files Signed-off-by: Gaurav Aggarwal <aggarg@amazon.com> commit 94aa31c3cbae7c929b8a412768b74631f4a6b461 Author: TakayukiMatsuo <62984531+TakayukiMatsuo@users.noreply.github.com> Date: Sat Aug 8 07:58:14 2020 +0900 Update wolfSSL to the latest version(v.4.4.0) (#186) * deleted old version wolfSSL before updating * updated wolfSSL to the latest version(v4.4.0) * updated wolfSSL to the latest version(v4.4.0) * added macros for timing resistance Co-authored-by: RichardBarry <3073890+RichardBarry@users.noreply.github.com> Co-authored-by: Ming Yue <mingyue86010@gmail.com> commit 68518f5866aac58793c737d9a46dd07a6a816aaf Author: RichardBarry <3073890+RichardBarry@users.noreply.github.com> Date: Fri Aug 7 14:59:24 2020 -0700 Removed a 16MByte flash image file that was checked in by mistake (several years ago). (#173) Remove the copies of lwIP that are no longer reference from demo projects. Co-authored-by: Carl Lundin <53273776+lundinc2@users.noreply.github.com> commit d4bf09480a2c77b1a25cce35b32293be61ab586f Author: m17336 <45935231+m17336@users.noreply.github.com> Date: Thu Aug 6 22:37:08 2020 +0300 Update previous AVR ATmega0 and AVR Dx projecs + addition of equivalent projects in MPLAB.X and IAR (#180) * Updated indentation in AVR_ATMega4809_Atmel_Studio and AVR_Dx_Atmel_Studio projects, plus small fixes in their readme files. * Added AVR_ATMega4809_IAR, AVR_ATMega4809_MPLAB.X, AVR_Dx_IAR and AVR_Dx_MPLAB.X demo projects. * Removed build artefacts and added .gitignore files in AVR_ATMega4809_MPLAB.X and AVR_Dx_MPLAB.X projects. Co-authored-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> commit f32a0647c8228ddd066f5d69a85b2e49086e4c95 Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Mon Aug 3 16:45:10 2020 -0700 Remove CBMC patch which is not used anymore (#187) * Delete 0002-Change-FreeRTOS_IP_Private.h-union-to-struct.patch * Delete 0002-Change-FreeRTOS_IP_Private.h-union-to-struct.patch commit 08af68ef9049279b265c3d00e9c48fb9594129a8 Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Sat Aug 1 16:38:23 2020 -0700 Remove dependency of CBMC on Patches (#181) * Changes to DHCP * CBMC DNS changes * Changes for TCP_IP * Changes to TCP_WIN * Define away static to nothing * Remove patches * Changes after Mark's comments v1 * Update MakefileCommon.json * Correction! commit a7fec906a415363338449447daf10d7517b78848 Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Wed Jul 29 17:39:36 2020 -0700 Misc changes (#183) commit 07cf5e07e4a05d6775a2f9e753269f43f82cf6ba Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Wed Jul 29 16:15:38 2020 -0700 MISRA compliance changes for FreeRTOS+TCP headers (#165) * misra changes * Update FreeRTOS_IP_Private.h * Update FreeRTOS_IP_Private.h commit e903ac0fed7ce59916899e404f3e5ae5b08d1478 Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Wed Jul 29 16:03:14 2020 -0700 UPD MISRA changes (#164) Co-authored-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> commit 97551bf44e7dc7dc1e4484a8fd30f699255e8569 Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Wed Jul 29 15:52:00 2020 -0700 MISRA changes in FreeRTOS_TCP_WIN.c (#162) commit f2611cc5e5999c4c87e040a8c2d2e6b5e77a16a6 Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Wed Jul 29 15:38:37 2020 -0700 MISRA compliance changes in FreeRTOS_Sockets{.c/.h} (#161) * MISRA changes Sockets * add other changes * Update FreeRTOSIPConfig.h * Update FreeRTOSIPConfig.h * Update FreeRTOSIPConfig.h * Update FreeRTOSIPConfig.h * correction * Add 'U' * Update FreeRTOS_Sockets.h * Update FreeRTOS_Sockets.h * Update FreeRTOS_Sockets.c * Update FreeRTOS_Sockets.h * Update after Gary's comments * Correction reverted commit ae4d4d38d9b2685bae159b4c87619cdb157c0bf7 Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Wed Jul 29 13:56:57 2020 -0700 MISRA compliance changes for FreeRTOS_TCP_IP.c (#160) * MISRA tcp-ip changes * Changes after Hein's comments on original PR * Update FreeRTOS_TCP_IP.c Co-authored-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> commit a457f43c66eb0f4be9d8f8678c0e3fb8d7ebd57b Author: Carl Lundin <53273776+lundinc2@users.noreply.github.com> Date: Tue Jul 28 13:01:38 2020 -0700 Add missing error state assignment. (#166) commit 915af50524e15a78ceb6c62b3d33f6562621ee46 Author: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Mon Jul 27 17:30:53 2020 -0700 Add Atmel Studio projects for ATMega4809 and AVR128DA48 (#159) * Added explicit cast to allow roll over and avoid integer promotion during cycles counters comparison in recmutex.c. * Fixed type mismatch between declaration and definition of function xAreSemaphoreTasksStillRunning( void ). * Added Atmel Studio demo projects for ATMega4809 and AVR128DA48. * Per https://www.freertos.org/upgrading-to-FreeRTOS-V8.html, I'm updating portBASE_TYPE to BaseType_t. Signed-off-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> * Update register test for ATmega4809 - to cover r28, r29, r31. - call public API taskYIELD() instead of portYIELD(). * Update ATmega4809 readme.md to include info for serial port setup, and minor wording fix. Co-authored-by: Alexandru Niculae - M17336 <alexandru.niculae@microchip.com> commit 4a7a48790d64127f85cc763721b575c51c452833 Author: Carl Lundin <53273776+lundinc2@users.noreply.github.com> Date: Thu Jul 23 10:22:33 2020 -0700 Add Uncrustify file used for Kernel. (#163) commit e0d62163b08769fd74f020709c398f994088ca96 Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Wed Jul 22 18:06:23 2020 -0700 Sync with +TCP amazon-FreeRTOS (#158) * DNS.c commit * IP.c commit * Add various source & header files commit 8e36bee30eef2107e128edb58e83ee46e8241a91 Author: Nathan Chong <52972368+nchong-at-aws@users.noreply.github.com> Date: Tue Jul 21 12:51:20 2020 -0400 Prove buffer lemmas (#124) * Prove buffer lemmas * Update queue proofs to latest kernel source All changes were syntactic due to uncrustify code-formatting * Strengthen prvCopyDataToQueue proof * Add extract script for diff comparison Co-authored-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> commit c720c18ada40b502436ea811e8d03dca919726d8 Author: Hein Tibosch <hein_tibosch@yahoo.es> Date: Tue Jul 14 05:35:44 2020 +0800 FreeRTOS+TCP Adding the combined driver for SAM4E and SAME70 v2 (#78) * Adding a combined +TCP driver for SAM4E and SAME70 * Changes after review from Aniruddha Co-authored-by: Hein Tibosch <hein@htibosch.net> Co-authored-by: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> commit 4237049b12d9bb6b03694fecf6ea26a353e637c8 Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Mon Jul 13 12:07:56 2020 -0700 Add changes from 2225-2227 amazon-FreeRTOS (#134) commit 7caa32863458c4470d3c620945c30824199f524c Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Fri Jul 10 23:32:30 2020 -0700 Add Full TCP test suite - not using secure sockets (#131) * Add Full-TCP suite * delete unnecessary files * Change after Joshua's comments commit d7667a0034841f2968f9f9f805030cc608bfbce1 Author: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Fri Jul 3 15:45:44 2020 -0700 Remove unnecessary semicolon from the linker file (#121) This was creating problem with the onboard LPCLink debug probe. Signed-off-by: Gaurav Aggarwal <aggarg@amazon.com> commit 529c481c39506d0b331bfd0cdea35e5d1aeaaad0 Author: Nathan Chong <52972368+nchong-at-aws@users.noreply.github.com> Date: Thu Jul 2 15:55:20 2020 -0400 Add VeriFast kernel queue proofs (#117) commit d5fedeaa96b5b1d3c0f6b9b52a8064ab72ff2821 Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Wed Jul 1 13:56:27 2020 -0700 Add checks in FreeRTOS_Socket.c (#104) * Add fail-safes to FreeRTOS_Socket.c * Use all 'pd' errors * Correction after Hein's comments * Correction after Hein's comments v2 * Changes after Hein's comments * Update after Gary's comments commit a9b2aac4e9fda2a259380156df9cc0af51384d2d Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Fri Jun 26 12:09:36 2020 -0700 Folder structure change + Fix broken Projects (#103) * Update folder structure * Correct project files * Move test folder * Some changes after Yuki's comments commit 98bfc38bf3404414878dc68ea41753bea4e24c8e Author: Hein Tibosch <hein_tibosch@yahoo.es> Date: Thu Jun 25 13:01:45 2020 +0800 FreeRTOS+TCP : add memory statistics and dump packets, v3 (#83) * FreeRTOS+TCP : add memory statistics and dump packets, v3 * Two changes as requested by Aniruddha Co-authored-by: Hein Tibosch <hein@htibosch.net> Co-authored-by: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> commit 072a173c9df31c75ff64bde440f3f316cedb9033 Author: S.Burch <8697966+wholl0p@users.noreply.github.com> Date: Mon Jun 22 23:39:26 2020 +0200 Fixed Imports for Infineon XMC1100 Board (#88) Co-authored-by: RichardBarry <3073890+RichardBarry@users.noreply.github.com> commit 2df5eeef5763045c4c74ff0e2a4091b7d19bea89 Author: RichardBarry <3073890+RichardBarry@users.noreply.github.com> Date: Mon Jun 8 14:22:46 2020 -0700 Feature/multiple direct to task notifications (#73) * Add TaskNotifyArray.c with the single task tests updated to use the task notification array up to the point where the timer is created. * Continue working on TaskNotifyArray.c to test the new task notification indexes. Next TaskNotifyArray.c will be refactored to break the tests up a bit. * Refactor and update the comments in TaskNotifyArray.c - no functional changes. * Change from the task notify "array" to task notification "indexed" nomenclature in the new task notification API functions that work on one particular task notification with the array of task notifications. * Update the implementation of the taskNOTIFY_TAKE() and taskNOTIFY_WAIT() trace macros to take the array index of the task notification they are acting on. Rename configNUMBER_OF_TASK_NOTIFICATIONS to configTASK_NOTIFICATION_ARRAY_ENTRIES. Add FreeRTOS/Demo/Common/Minimal/TaskNotifyArray.c to the Visual Studio project - the file implements tests specific to the behaviour of the indexed task notification functions and should be used in addition to the tests already provided in FreeRTOS/Demo/Common/Minimal/TaskNotify.c. commit b9e4ecfaf7286d8493d4a96a93fbb325534ad97b Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Fri Jun 5 11:10:58 2020 -0700 Remove Empty and Un-referenced folder from Demo (#86) commit f11bcc8acc57a23fb03603762e758c25b9d0efb7 Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Wed Jun 3 16:52:31 2020 -0700 Fix a Bug and corresponding CBMC patch (#84) * Update remove-static-in-freertos-tcp-ip.patch * Update FreeRTOS_TCP_IP.c * Update remove-static-in-freertos-tcp-ip.patch * Update remove-static-in-freertos-tcp-ip.patch Co-authored-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> commit bb9f92f771e5f6ea2b9b09c7e89130a75e562eb7 Author: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Wed Jun 3 10:46:55 2020 -0700 Submodule FreeRTOS/Source 10bbbcf0b..6199b72fb (#82) commit 6efc39f44be5b269168836e95aebbdb8ae77dce3 Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Tue Jun 2 15:09:25 2020 -0700 Add Project for running integration tests v2 (#80) * Project for integration tests * relative paths in project files * relative paths in project files-1 * relative paths in project files-2 * addressed comments * addressed comments v2 Co-authored-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> commit 0eb5909fb02bac9dc074ff1bc2fe338d77f73764 Author: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Thu May 28 17:05:24 2020 -0700 readme.md for ATmega328PB Xplained Mini. (#76) readme.md to get users jump started. commit cb7edd2323a77f3dbea144c1f48f95582becc99e Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Thu May 28 10:11:58 2020 -0700 Sync with a:FR (#75) * AFR sync * AFR sync: CBMC * AFR sync: CBMC: remove .bak files * AFR sync: CBMC: more cleanup * Corrected CBMC proofs * Corrected CBMC patches * Corrected CBMC patches-1 * Corrected CBMC patches-2 * remove .bak files (3) Co-authored-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> commit 6557291e5407ca7ec6beca53fced1aaa620c5c02 Author: alfred gedeon <alfred2g@hotmail.com> Date: Wed May 27 14:44:33 2020 -0700 Test: Add Linux Networking support with demo application (#71) * Test: Add Linux Networking support with demo application * Test: revert files affected by uncrustify * Test: revert files affected by uncrustify Co-authored-by: Alfred Gedeon <gedeonag@amazon.com> Co-authored-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> commit 8b079bc394e7b205d72210ce9e052404d782938f Author: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Wed May 27 10:44:03 2020 -0700 ATmega328PB Xplained Mini -- demo project for ATmega port. (#70) * Bootstrap a demo from START. No driver is added in this commit. * Add FreeRTOS source code to project. Remove unnecessary folder nesting. Heap_4 is used here. * Copy over main.c, FreeRTOSConfig.h, and regtest.{c, h}. This commit compiles, but will need some work on timer used. * This port has 2KB RAM. We are using 1KB for heap. Further decreasing minimum stack size, and also use stack overflow check 1 to save some stack space. * Preserve EEPROM set to false. * End of the line. * Reduce register test stack size. 32 8-bit register + 10 bytes for stack frame cost. Round up to 50. * Adding Queue test in Integer test. - g3 to easy debugging. - mainCHECK_PERIOD is set to 1000 ticks. Note that this port for now use WDT as tick timer, and period is set to 15ms. - vErrorChecks, is of highest priority. So if this task gets run before other tasks, the very first check will fail. * Avoid false alarm. Since we don't know in which order the tasks are scheduled, clearing any error for the first entry of vErrorChecks. Signed-off-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> * ParTest.c to init, set, toggle onboard user LED at PB5. * Added a task to blink onboard user LED. Need a magic number for stack size. Signed-off-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> * Explicitly setting timing slicing to 0. This is to avoid unecessary context switch when multiple tasks are of the same priority. Signed-off-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> * Add taskYIELD() at the end of the loop in each register test task. This is to give other tasks of the same priority a chance to run, regardless of scheduling algorithm. Signed-off-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> * minor, update comment in main.c. commit 95a3a02f95749fb7a600723076e291f9dee7426c Author: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Fri May 22 16:26:59 2020 -0700 FreeRTOS-Plus: Unit testing Infrastructure and examples (#72) * Added CMock as submodule * Makefile added * Removed TEMP from Makefile * Added configuration files and header files * Update Makefile * Test runner working * make clean * Example added with README * Update README.md * Restored +TCP files * Cleared +TCP changes * removed comments from Makefile * Update README.md * Update README.md * Update README.md * Updated Test/Unit-test/readme.md commit 5003d17feda25490e655c0f1c15d2b13e395c9f7 Author: Hein Tibosch <hein_tibosch@yahoo.es> Date: Wed May 6 14:16:56 2020 -0400 FreeRTOS+TCP : renewing DHCP lease while network is down (#53) Co-authored-by: Hein Tibosch <hein@htibosch.net> Co-authored-by: Gary Wicker <14828980+gkwicker@users.noreply.github.com> commit d95624c5d6ba95ec0474867d7165de2c28ed41b7 Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Tue May 5 09:57:18 2020 -0700 Move CBMC proofs to FreeRTOS+ directory (#64) * move CBMC proofs to FreeRTOS+ directory * Failing proofs corrected * ParseDNSReply proof added back * removed queue_init.h from -Plus/Test Co-authored-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> commit 95ae7c65758a9473ea16ab08182f056f72331de2 Author: markrtuttle <tuttle@acm.org> Date: Wed Apr 29 04:27:45 2020 +0000 Change cbmc-viewer invocation in CBMC makefile (#63) * Exclude FreeRTOS/Demo from CBMC proof reports. The script cbmc-viewer generates the CBMC proof reports. The script searches source files for symbol definitions and annotates source files with coverage information. This patch causes cbmc-viewer to ignore the directory FreeRTOS/Demo containing 348M of data. The script now terminates in a few seconds. * Make report default target for CBMC Makefile. Modify the Makefile for CBMC proofs to generate the report by default (and not just property checking) and modify property checking to ignore failures (due to property assertions failing) and terminating report generation. Co-authored-by: Mark R. Tuttle <mrtuttle@amazon.com> commit d421ccc89f6f6473dfdd566a00567b0e1fd4cfc3 Author: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Sat Apr 25 16:57:35 2020 -0700 Reword readme.md under ./Test. (#61) commit 38412865985235b90dbd9da9708b68c4de5918f5 Author: Carl Lundin <53273776+lundinc2@users.noreply.github.com> Date: Sat Apr 25 16:56:54 2020 -0700 Removed a:FR reference. (#60) commit 4db195c916c7b13c82ab3a34a499fe606f266810 Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Tue Apr 21 15:40:08 2020 -0700 Adding FreeRTOS+TCP CBMC proofs to FreeRTOS/FreeRTOS (#56) ParseDNSReply is to be added in the next PR. commit 40a31b6d35a866a3a6c551d95bf08dae855da5bd Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Mon Apr 13 13:58:33 2020 -0700 'uL' -> 'UL' commit 5b3a289b69fc92089aa8bd4d1b44ab816f326f73 Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Mon Apr 13 13:50:53 2020 -0700 Changes after Gary's comments commit edf68637dd22470a8d4f59fecc15b51379bcfeda Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Fri Apr 10 16:26:03 2020 -0700 Update FreeRTOS_ARP.c commit 35f3ac32a8899dd714a8a48952a4224fbcebc4aa Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Fri Apr 10 15:56:18 2020 -0700 correct debug output commit 5e12a70db4b6a8e68a434489683306f040252efa Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Fri Apr 10 15:44:45 2020 -0700 Debugging flag check added commit 4e8ac8de25ac4088b9c789b88a77cd39df4d9167 Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Thu Apr 9 16:57:19 2020 -0700 Comment style consistency and Yuhui's suggestions commit e43f7cd086096ad60491fedba69927a1e1a82f20 Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Thu Apr 9 16:47:41 2020 -0700 Cleanup commit ab3b51c7a0d880a6bf453ec63ae604e15050f310 Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Thu Apr 9 16:33:03 2020 -0700 Update after Gary's comments commit 97f7009699ffb972c0745dfdb526d1fa4e0faf84 Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Wed Apr 8 14:30:15 2020 -0700 Update after richard's comments commit a9fcafc074cec559dd67961ef44273df6180c2db Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Wed Apr 8 14:07:39 2020 -0700 Corrected the formatting - visual studio had messed up the formatting commit c381861014a8043ce30723fc5a8cf5107719c8df Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Wed Apr 8 13:01:12 2020 -0700 commit 2 after gary's comments commit 75677a8d85fa802cca9058d6e23796d5043a0982 Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Wed Apr 8 12:51:10 2020 -0700 Commit after Gary's comments commit 666c0da366030109db2c0c5e7253cebb2f899db7 Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Wed Apr 8 10:56:01 2020 -0700 Update after Yuhui's comments - removed (void) from before memcpy, memset etc. - corrected memcpy style as suggested by Yuhui - Added logging for xNetworkInterfaceOutput. No need to configASSERT commit 4a1148d15b6b8169d2412f8179f734683b179795 Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Wed Apr 1 16:05:36 2020 -0700 Coverity + MISRA compliance Modified code to conform to the MISRA directives more closely. commit fa74f7dccf6b1a356993c6a894f8e1173b8c8157 Author: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Thu Apr 2 20:26:10 2020 -0700 Removing writes to read-only PLIC interrupt pending registers. Signed-off-by: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> commit 5b9777e11e16609648fb98d2f9a47553ab238950 Author: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Tue Mar 31 10:45:23 2020 -0700 A readme file to introduce what ./Test directory is about. commit 211bb4cbd9ae6dfa95e8d8501f37d272bde5ab26 Author: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Tue Mar 24 15:14:24 2020 -0700 Ignore whitespace when working with patches. commit 8156f64d1c45dd59ef12279f19a99f03e79e1f8a Author: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Tue Feb 25 18:04:23 2020 -0800 Copying CBMC proofs from aws/amazon-freertos repo ./tools/cbmc to this repo ./FreeRTOS/Test/CBMC as is. The commit ID in aws/amazon-freertos is 0c8e0217f2a43bdeb364b58ae01c6c259e03ef1b. commit 9f316c246baafa15c542a5aea81a94f26e3d6507 Author: David Vrabel <david.vrabel@cambridgeconsultants.com> Date: Mon Mar 16 11:21:46 2020 +0000 Demo/Posix_GCC: add demo application for Posix port using GCC This is largely a copy of the Windows demo application with a few key changes: - heap_3 (use malloc()/free()) so tools like valgrind "just work". - printf() wrapped in a mutex to prevent deadlocks on the internal pthread mutexes inside printf(). SCons (https://scons.org/) is used as the build system. This will be built as a 64-bit application, but note that the memory allocation trace points only record the lower 32-bits of the address. commit f78f919b3e2f0d707531a301a8ca07cd02bc4778 Author: Markus Rinne <markus.ka.rinne@gmail.com> Date: Thu Mar 19 21:00:24 2020 +0200 Fix function comments commit 1cd2d38d960a3576addb224582c88489bade5141 Author: David Chalco <david@chalco.io> Date: Fri Mar 20 10:29:05 2020 -0700 unix separators for path and remove .exe suffix from risc compiler (works on windows/mac) commit 938b19419eded12817737ab0644e94ed2ba7e95d Author: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Thu Mar 19 18:23:09 2020 -0700 Removing ./FreeRTOS-Labs directory, since: - IoT libraries are now in LTS branch. - FAT/POSIX/Light-weight MQTT are in https://github.com/FreeRTOS/FreeRTOS-Labs. commit 1a4abbc9e91b13fd6394464ade59d5e048320c7c Author: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Tue Mar 17 19:30:02 2020 -0700 Maintenance -- clean up readme.txt and add url to GitHub. (#38) * Removing readme.txt, as now we have README.md in place. The only information missing from README.md is about FAQ. * Adding FAQ information in README.md. * Adding a .url to root to redict user to FreeRTOS github home page. commit 47bb466aa19395b7785bcb830e2e4dd35f6bafc5 Author: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Tue Mar 17 13:07:44 2020 -0700 Update issue templates Template maintenance. - adding title prefix. - adding examples to "additional context" section. commit f506290041f56867765f8efa70ed2862125bdb7c Author: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Tue Mar 17 10:15:07 2020 -0700 Create SECURITY.md Apply the recommended SECURITY.md from AWS to our repo. commit 8982a2f80a80a2a0a47cf82de07b52101bd9d606 Author: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Fri Mar 13 12:50:10 2020 -0700 Add ./lib directory to make sure Zynq project compiles. commit ecf0f12aa14ad6fdafe1ef37257cbb4e03e2abd5 Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Wed Mar 11 10:19:48 2020 -0700 Sync up with Amazon-freertos repo (10th March 2020) (#34) * Sync up with amazon-freertos * Sync up with amazon-freertos * Sync up with amazon-freertos commit 0acffef047973e2e61c2201fd69cd9bbd317f674 Author: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Tue Mar 10 10:20:48 2020 -0700 GitHub PR template. (#29) commit c40a6da2e4cb8042b56d1b174051cbbe9813781a Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Mon Mar 9 11:18:48 2020 -0700 pass payload length when calling UDP callback (#30) * pass payload length when calling UDP callback commit 12d580e93d4d9074b9a867632f0681a511b4ad12 Author: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Fri Mar 6 18:16:51 2020 -0800 Update issue templates Initial issue template. Created following https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser. If change is needed, we could go another round. commit 9debffb5e0e42ff716f58b2270b3af09652294af Author: Yuhui Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Fri Mar 6 17:27:46 2020 -0800 Update README.md to remove dead link. See the conversation https://github.com/FreeRTOS/FreeRTOS/commit/42c627b2b88cb3b487fea983d8b566a8bbae54fa#comments . Linkage for both ```./FreeRTOS/Source``` and ```./FreeRTOS/Demo``` are removed, since it looks weird to only provide linkage to Demo. commit 7e1a4bf563240501fc45167aee9d929c533939dd Author: AniruddhaKanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Fri Mar 6 15:18:09 2020 -0800 Fix DHCP option Client-identifier (#28) commit 42c627b2b88cb3b487fea983d8b566a8bbae54fa Author: Yuhui.Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Fri Mar 6 09:15:11 2020 -0800 Update readme and revert relative URL. (#27) * Reordering: bumping cloning instruction up. * Rewording readme.md to be clear kernel code is a submodule of this repository. * Reverting relative URL, since user cannot click through on GitHub page. (With URL, user could still download the correct version of the code. Reverting simply due to UI issue.) commit 5751ae9b60e248ebd0b4dd7c58df54364d2bb9d5 Author: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Fri Mar 6 09:11:42 2020 -0800 Update CORTEX_MPU_M33F_NXP_LPC55S69_MCUXpresso project (#26) This commit updates the project for LPC55S69 so that it works with the latest version of MCUXpresso and SDK. Signed-off-by: Gaurav Aggarwal <aggarg@amazon.com> commit a9ffffe1f01f45f79e127c15727784984077932f Author: Carl Lundin <53273776+lundinc2@users.noreply.github.com> Date: Thu Mar 5 17:16:13 2020 -0800 Using Relative URL For Submoduling. (#24) commit 52c82076b38fe73d1dc46c97abf74ae9b803696c Author: Carl Lundin <53273776+lundinc2@users.noreply.github.com> Date: Thu Mar 5 09:16:31 2020 -0800 use relative path to point to bundled toolchain instead (#25) commit b877e4ec478de2c24d07ab46241070d7c66f375c Author: lundinc2 <53273776+lundinc2@users.noreply.github.com> Date: Tue Feb 25 13:18:38 2020 -0800 Moved vulnerability reporting and code of conduct to top of CONTRIBUTING.md (#20) commit bef165d46799fb8faa58aaa224f80c16b6538e69 Author: Yuhui.Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Tue Feb 18 22:06:38 2020 -0800 Linking test source file from relative path. (#19) commit 89e7bbe292afd3912d1f0b2402cc506878bad869 Author: Yuhui.Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Tue Feb 18 17:47:55 2020 -0800 A preliminary .gitignore file, to prevent us checking in files unnecessary. (#18) https://github.com/github/gitignore. commit c2a98127acb48c4562233230e66ca5c282688579 Author: RichardBarry <3073890+RichardBarry@users.noreply.github.com> Date: Sun Feb 16 13:19:53 2020 -0800 Minor wording changes in the 'previous releases' section of the readme.me file. (#17) commit 24c772d1439e5c291c0a29fce0a46996ca8afaa9 Author: Yuhui.Zheng <10982575+yuhui-zheng@users.noreply.github.com> Date: Fri Feb 14 12:47:01 2020 -0800 Submodule kernel directory. (#16) * Removing FreeRTOS/Source in readiness for submoduling. * Submoduling kernel. * README.md update due to submoduling. When releasing, please follow these steps: 1. in local directory, clean directory and check "git status" shows "nothing to commit, working tree clean" for ALL subdirectories. 2. copy source code and instructions only to an empty folder. Git related should not be in this folder -- this covers .git, .gitignore, .github, .gitmodules, gitmessages, ...... 3. zip the folder from step 2. (create both .zip and .7z) 4. attach .zip and .7z to the release. (e.g. attach these two in new release -- https://github.com/FreeRTOS/FreeRTOS/releases/new) 5. PLEASE download both, unzip, diff with your local git repo. (should not see any difference other than git related.) And, sanity check a couple of projects. commit c3f8b91652392dc55e0d7067b90a40de5f5f0837 Author: Rashed Talukder <9218468+rashedtalukder@users.noreply.github.com> Date: Thu Feb 13 17:47:14 2020 -0800 Update readme. Fixed typos and cli commands (#14) commit 4723b825f2989213c1cdb2ebf4d6793e0292e363 Author: Julian Poidevin <julian-poidevin@users.noreply.github.com> Date: Fri Feb 14 02:43:36 2020 +0100 Fixed wrong git clone SSH command (#13) Replaced bad https URL with proper SSH URL commit fc819b821715c42602819e58499846147a6394f5 Author: RichardBarry <3073890+RichardBarry@users.noreply.github.com> Date: Thu Feb 13 17:42:22 2020 -0800 Correct the xTimerCreate() documentation which said NULL was returned if the timer period was passed into the function as 0, whereas that is not the case. (#15) Add a note to the documentation for both the xTimerCreate() and xTimerCreateStatic() functions that the timer period must be greater than 0. commit 1c711ab530b5f0dbd811d7d62e0a3763706ffff4 Author: Rashed Talukder <9218468+rashedtalukder@users.noreply.github.com> Date: Wed Feb 12 23:00:18 2020 -0800 Updated contributions guidelines (#12) commit 84fcc0d5317d96c6b086034093c8c1c83e050819 Author: Cobus van Eeden <35851496+cobusve@users.noreply.github.com> Date: Wed Feb 12 15:05:06 2020 -0800 Updates to Markdown files and readme.txt (#11) git-svn-id: http://svn.code.sf.net/p/freertos/code/trunk@2826 1d2547de-c912-0410-9cb9-b8ca96c0e9e2
Diffstat (limited to 'FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/sha256.c')
-rw-r--r--FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/sha256.c2954
1 files changed, 1416 insertions, 1538 deletions
diff --git a/FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/sha256.c b/FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/sha256.c
index f9f02b003..eb0911b01 100644
--- a/FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/sha256.c
+++ b/FreeRTOS-Plus/Source/WolfSSL/wolfcrypt/src/sha256.c
@@ -1,8 +1,8 @@
/* sha256.c
*
- * Copyright (C) 2006-2015 wolfSSL Inc.
+ * Copyright (C) 2006-2020 wolfSSL Inc.
*
- * This file is part of wolfSSL. (formerly known as CyaSSL)
+ * This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -16,299 +16,604 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
-/* code submitted by raphael.huck@efixo.com */
-
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <wolfssl/wolfcrypt/settings.h>
-#include <wolfssl/wolfcrypt/sha256.h>
-#if !defined(NO_SHA256)
-#ifdef HAVE_FIPS
+/*
+ * SHA256 Build Options:
+ * USE_SLOW_SHA256: Reduces code size by not partially unrolling
+ (~2KB smaller and ~25% slower) (default OFF)
+ * WOLFSSL_SHA256_BY_SPEC: Uses the Ch/Maj based on SHA256 specification
+ (default ON)
+ * WOLFSSL_SHA256_ALT_CH_MAJ: Alternate Ch/Maj that is easier for compilers to
+ optimize and recognize as SHA256 (default OFF)
+ * SHA256_MANY_REGISTERS: A SHA256 version that keeps all data in registers
+ and partial unrolled (default OFF)
+ */
-int wc_InitSha256(Sha256* sha)
-{
- return InitSha256_fips(sha);
-}
+/* Default SHA256 to use Ch/Maj based on specification */
+#if !defined(WOLFSSL_SHA256_BY_SPEC) && !defined(WOLFSSL_SHA256_ALT_CH_MAJ)
+ #define WOLFSSL_SHA256_BY_SPEC
+#endif
-int wc_Sha256Update(Sha256* sha, const byte* data, word32 len)
-{
- return Sha256Update_fips(sha, data, len);
-}
+#if !defined(NO_SHA256) && !defined(WOLFSSL_ARMASM)
+#if defined(HAVE_FIPS) && \
+ defined(HAVE_FIPS_VERSION) && (HAVE_FIPS_VERSION >= 2)
-int wc_Sha256Final(Sha256* sha, byte* out)
-{
- return Sha256Final_fips(sha, out);
-}
+ /* set NO_WRAPPERS before headers, use direct internal f()s not wrappers */
+ #define FIPS_NO_WRAPPERS
+ #ifdef USE_WINDOWS_API
+ #pragma code_seg(".fipsA$d")
+ #pragma const_seg(".fipsB$d")
+ #endif
+#endif
-int wc_Sha256Hash(const byte* data, word32 len, byte* out)
-{
- return Sha256Hash(data, len, out);
-}
+#include <wolfssl/wolfcrypt/sha256.h>
+#include <wolfssl/wolfcrypt/error-crypt.h>
+#include <wolfssl/wolfcrypt/cpuid.h>
+#include <wolfssl/wolfcrypt/hash.h>
+
+#ifdef WOLF_CRYPTO_CB
+ #include <wolfssl/wolfcrypt/cryptocb.h>
+#endif
+
+/* fips wrapper calls, user can call direct */
+#if defined(HAVE_FIPS) && \
+ (!defined(HAVE_FIPS_VERSION) || (HAVE_FIPS_VERSION < 2))
+
+ int wc_InitSha256(wc_Sha256* sha)
+ {
+ if (sha == NULL) {
+ return BAD_FUNC_ARG;
+ }
+ return InitSha256_fips(sha);
+ }
+ int wc_InitSha256_ex(wc_Sha256* sha, void* heap, int devId)
+ {
+ (void)heap;
+ (void)devId;
+ if (sha == NULL) {
+ return BAD_FUNC_ARG;
+ }
+ return InitSha256_fips(sha);
+ }
+ int wc_Sha256Update(wc_Sha256* sha, const byte* data, word32 len)
+ {
+ if (sha == NULL || (data == NULL && len > 0)) {
+ return BAD_FUNC_ARG;
+ }
+
+ if (data == NULL && len == 0) {
+ /* valid, but do nothing */
+ return 0;
+ }
+
+ return Sha256Update_fips(sha, data, len);
+ }
+ int wc_Sha256Final(wc_Sha256* sha, byte* out)
+ {
+ if (sha == NULL || out == NULL) {
+ return BAD_FUNC_ARG;
+ }
+ return Sha256Final_fips(sha, out);
+ }
+ void wc_Sha256Free(wc_Sha256* sha)
+ {
+ (void)sha;
+ /* Not supported in FIPS */
+ }
-#else /* else build without fips */
+#else /* else build without fips, or for FIPS v2 */
-#if !defined(NO_SHA256) && defined(WOLFSSL_TI_HASH)
+
+#if defined(WOLFSSL_TI_HASH)
/* #include <wolfcrypt/src/port/ti/ti-hash.c> included by wc_port.c */
+#elif defined(WOLFSSL_CRYPTOCELL)
+ /* wc_port.c includes wolfcrypt/src/port/arm/cryptoCellHash.c */
#else
-#if !defined (ALIGN32)
- #if defined (__GNUC__)
- #define ALIGN32 __attribute__ ( (aligned (32)))
- #elif defined(_MSC_VER)
- /* disable align warning, we want alignment ! */
- #pragma warning(disable: 4324)
- #define ALIGN32 __declspec (align (32))
- #else
- #define ALIGN32
- #endif
-#endif
+#include <wolfssl/wolfcrypt/logging.h>
-#ifdef WOLFSSL_PIC32MZ_HASH
-#define wc_InitSha256 wc_InitSha256_sw
-#define wc_Sha256Update wc_Sha256Update_sw
-#define wc_Sha256Final wc_Sha256Final_sw
+#ifdef NO_INLINE
+ #include <wolfssl/wolfcrypt/misc.h>
+#else
+ #define WOLFSSL_MISC_INCLUDED
+ #include <wolfcrypt/src/misc.c>
#endif
-#ifdef HAVE_FIPS
- /* set NO_WRAPPERS before headers, use direct internal f()s not wrappers */
- #define FIPS_NO_WRAPPERS
+#ifdef WOLFSSL_DEVCRYPTO_HASH
+ #include <wolfssl/wolfcrypt/port/devcrypto/wc_devcrypto.h>
#endif
+
+
#if defined(USE_INTEL_SPEEDUP)
-#define HAVE_INTEL_AVX1
-#define HAVE_INTEL_AVX2
+ #if defined(__GNUC__) && ((__GNUC__ < 4) || \
+ (__GNUC__ == 4 && __GNUC_MINOR__ <= 8))
+ #undef NO_AVX2_SUPPORT
+ #define NO_AVX2_SUPPORT
+ #endif
+ #if defined(__clang__) && ((__clang_major__ < 3) || \
+ (__clang_major__ == 3 && __clang_minor__ <= 5))
+ #define NO_AVX2_SUPPORT
+ #elif defined(__clang__) && defined(NO_AVX2_SUPPORT)
+ #undef NO_AVX2_SUPPORT
+ #endif
-#if defined(DEBUG_XMM)
-#include "stdio.h"
+ #define HAVE_INTEL_AVX1
+ #ifndef NO_AVX2_SUPPORT
+ #define HAVE_INTEL_AVX2
+ #endif
+#endif /* USE_INTEL_SPEEDUP */
+
+#if defined(HAVE_INTEL_AVX2)
+ #define HAVE_INTEL_RORX
#endif
+
+#if !defined(WOLFSSL_PIC32MZ_HASH) && !defined(STM32_HASH_SHA2) && \
+ (!defined(WOLFSSL_IMX6_CAAM) || defined(NO_IMX6_CAAM_HASH)) && \
+ !defined(WOLFSSL_AFALG_HASH) && !defined(WOLFSSL_DEVCRYPTO_HASH) && \
+ (!defined(WOLFSSL_ESP32WROOM32_CRYPT) || defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)) && \
+ (!defined(WOLFSSL_RENESAS_TSIP_CRYPT) || defined(NO_WOLFSSL_RENESAS_TSIP_HASH))
+
+static int InitSha256(wc_Sha256* sha256)
+{
+ int ret = 0;
+
+ if (sha256 == NULL)
+ return BAD_FUNC_ARG;
+
+ XMEMSET(sha256->digest, 0, sizeof(sha256->digest));
+ sha256->digest[0] = 0x6A09E667L;
+ sha256->digest[1] = 0xBB67AE85L;
+ sha256->digest[2] = 0x3C6EF372L;
+ sha256->digest[3] = 0xA54FF53AL;
+ sha256->digest[4] = 0x510E527FL;
+ sha256->digest[5] = 0x9B05688CL;
+ sha256->digest[6] = 0x1F83D9ABL;
+ sha256->digest[7] = 0x5BE0CD19L;
+
+ sha256->buffLen = 0;
+ sha256->loLen = 0;
+ sha256->hiLen = 0;
+#if defined(WOLFSSL_HASH_FLAGS) || defined(WOLF_CRYPTO_CB)
+ sha256->flags = 0;
#endif
-#if defined(HAVE_INTEL_AVX2)
-#define HAVE_INTEL_RORX
+ return ret;
+}
#endif
-
-/*****
-Intel AVX1/AVX2 Macro Control Structure
-#define HAVE_INTEL_AVX1
-#define HAVE_INTEL_AVX2
+/* Hardware Acceleration */
+#if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
-#define HAVE_INTEL_RORX
+ /* in case intel instructions aren't available, plus we need the K[] global */
+ #define NEED_SOFT_SHA256
+ /*****
+ Intel AVX1/AVX2 Macro Control Structure
-int InitSha256(Sha256* sha256) {
- Save/Recover XMM, YMM
- ...
-}
+ #define HAVE_INTEL_AVX1
+ #define HAVE_INTEL_AVX2
-#if defined(HAVE_INTEL_AVX1)|| defined(HAVE_INTEL_AVX2)
- Transform() ; Function prototype
-#else
- Transform() { }
- int Sha256Final() {
- Save/Recover XMM, YMM
- ...
- }
-#endif
+ #define HAVE_INTEL_RORX
-#if defined(HAVE_INTEL_AVX1)|| defined(HAVE_INTEL_AVX2)
- #if defined(HAVE_INTEL_RORX
- #define RND with rorx instuction
+
+ int InitSha256(wc_Sha256* sha256) {
+ Save/Recover XMM, YMM
+ ...
+ }
+
+ #if defined(HAVE_INTEL_AVX1)|| defined(HAVE_INTEL_AVX2)
+ Transform_Sha256(); Function prototype
#else
- #define RND
+ Transform_Sha256() { }
+ int Sha256Final() {
+ Save/Recover XMM, YMM
+ ...
+ }
#endif
-#endif
-#if defined(HAVE_INTEL_AVX1)
-
- #define XMM Instructions/inline asm
-
- int Transform() {
- Stitched Message Sched/Round
- }
-
-#elif defined(HAVE_INTEL_AVX2)
-
- #define YMM Instructions/inline asm
-
- int Transform() {
- More granural Stitched Message Sched/Round
- }
-
-*/
+ #if defined(HAVE_INTEL_AVX1)|| defined(HAVE_INTEL_AVX2)
+ #if defined(HAVE_INTEL_RORX
+ #define RND with rorx instruction
+ #else
+ #define RND
+ #endif
+ #endif
+ #if defined(HAVE_INTEL_AVX1)
-#if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
+ #define XMM Instructions/inline asm
-/* Each platform needs to query info type 1 from cpuid to see if aesni is
- * supported. Also, let's setup a macro for proper linkage w/o ABI conflicts
- */
+ int Transform_Sha256() {
+ Stitched Message Sched/Round
+ }
-#ifndef _MSC_VER
- #define cpuid(reg, leaf, sub)\
- __asm__ __volatile__ ("cpuid":\
- "=a" (reg[0]), "=b" (reg[1]), "=c" (reg[2]), "=d" (reg[3]) :\
- "a" (leaf), "c"(sub));
+ #elif defined(HAVE_INTEL_AVX2)
- #define XASM_LINK(f) asm(f)
-#else
+ #define YMM Instructions/inline asm
- #include <intrin.h>
- #define cpuid(a,b) __cpuid((int*)a,b)
-
- #define XASM_LINK(f)
-
-#endif /* _MSC_VER */
-
-#define EAX 0
-#define EBX 1
-#define ECX 2
-#define EDX 3
-
-#define CPUID_AVX1 0x1
-#define CPUID_AVX2 0x2
-#define CPUID_RDRAND 0x4
-#define CPUID_RDSEED 0x8
-#define CPUID_BMI2 0x10 /* MULX, RORX */
-
-#define IS_INTEL_AVX1 (cpuid_flags&CPUID_AVX1)
-#define IS_INTEL_AVX2 (cpuid_flags&CPUID_AVX2)
-#define IS_INTEL_BMI2 (cpuid_flags&CPUID_BMI2)
-#define IS_INTEL_RDRAND (cpuid_flags&CPUID_RDRAND)
-#define IS_INTEL_RDSEED (cpuid_flags&CPUID_RDSEED)
-
-static word32 cpuid_check = 0 ;
-static word32 cpuid_flags = 0 ;
-
-static word32 cpuid_flag(word32 leaf, word32 sub, word32 num, word32 bit) {
- int got_intel_cpu=0;
- unsigned int reg[5];
-
- reg[4] = '\0' ;
- cpuid(reg, 0, 0);
- if(memcmp((char *)&(reg[EBX]), "Genu", 4) == 0 &&
- memcmp((char *)&(reg[EDX]), "ineI", 4) == 0 &&
- memcmp((char *)&(reg[ECX]), "ntel", 4) == 0) {
- got_intel_cpu = 1;
- }
- if (got_intel_cpu) {
- cpuid(reg, leaf, sub);
- return((reg[num]>>bit)&0x1) ;
- }
- return 0 ;
-}
+ int Transform_Sha256() {
+ More granular Stitched Message Sched/Round
+ }
-static int set_cpuid_flags(void) {
- if(cpuid_check==0) {
- if(cpuid_flag(1, 0, ECX, 28)){ cpuid_flags |= CPUID_AVX1 ;}
- if(cpuid_flag(7, 0, EBX, 5)){ cpuid_flags |= CPUID_AVX2 ; }
- if(cpuid_flag(7, 0, EBX, 8)) { cpuid_flags |= CPUID_BMI2 ; }
- if(cpuid_flag(1, 0, ECX, 30)){ cpuid_flags |= CPUID_RDRAND ; }
- if(cpuid_flag(7, 0, EBX, 18)){ cpuid_flags |= CPUID_RDSEED ; }
- cpuid_check = 1 ;
- return 0 ;
- }
- return 1 ;
-}
+ #endif
+ */
-/* #if defined(HAVE_INTEL_AVX1/2) at the tail of sha512 */
-static int Transform(Sha256* sha256);
+ /* Each platform needs to query info type 1 from cpuid to see if aesni is
+ * supported. Also, let's setup a macro for proper linkage w/o ABI conflicts
+ */
-#if defined(HAVE_INTEL_AVX1)
-static int Transform_AVX1(Sha256 *sha256) ;
+ /* #if defined(HAVE_INTEL_AVX1/2) at the tail of sha256 */
+ static int Transform_Sha256(wc_Sha256* sha256, const byte* data);
+
+#ifdef __cplusplus
+ extern "C" {
#endif
-#if defined(HAVE_INTEL_AVX2)
-static int Transform_AVX2(Sha256 *sha256) ;
-static int Transform_AVX1_RORX(Sha256 *sha256) ;
+
+ #if defined(HAVE_INTEL_AVX1)
+ extern int Transform_Sha256_AVX1(wc_Sha256 *sha256, const byte* data);
+ extern int Transform_Sha256_AVX1_Len(wc_Sha256* sha256,
+ const byte* data, word32 len);
+ #endif
+ #if defined(HAVE_INTEL_AVX2)
+ extern int Transform_Sha256_AVX2(wc_Sha256 *sha256, const byte* data);
+ extern int Transform_Sha256_AVX2_Len(wc_Sha256* sha256,
+ const byte* data, word32 len);
+ #ifdef HAVE_INTEL_RORX
+ extern int Transform_Sha256_AVX1_RORX(wc_Sha256 *sha256, const byte* data);
+ extern int Transform_Sha256_AVX1_RORX_Len(wc_Sha256* sha256,
+ const byte* data, word32 len);
+ extern int Transform_Sha256_AVX2_RORX(wc_Sha256 *sha256, const byte* data);
+ extern int Transform_Sha256_AVX2_RORX_Len(wc_Sha256* sha256,
+ const byte* data, word32 len);
+ #endif /* HAVE_INTEL_RORX */
+ #endif /* HAVE_INTEL_AVX2 */
+
+#ifdef __cplusplus
+ } /* extern "C" */
#endif
-static int (*Transform_p)(Sha256* sha256) /* = _Transform */;
+ static int (*Transform_Sha256_p)(wc_Sha256* sha256, const byte* data);
+ /* = _Transform_Sha256 */
+ static int (*Transform_Sha256_Len_p)(wc_Sha256* sha256, const byte* data,
+ word32 len);
+ /* = NULL */
+ static int transform_check = 0;
+ static word32 intel_flags;
-#define XTRANSFORM(sha256, B) (*Transform_p)(sha256)
+ #define XTRANSFORM(S, D) (*Transform_Sha256_p)((S),(D))
+ #define XTRANSFORM_LEN(S, D, L) (*Transform_Sha256_Len_p)((S),(D),(L))
-static void set_Transform(void) {
- if(set_cpuid_flags())return ;
+ static void Sha256_SetTransform(void)
+ {
-#if defined(HAVE_INTEL_AVX2)
- if(IS_INTEL_AVX2 && IS_INTEL_BMI2){
- Transform_p = Transform_AVX1_RORX; return ;
- Transform_p = Transform_AVX2 ;
- /* for avoiding warning,"not used" */
- }
-#endif
-#if defined(HAVE_INTEL_AVX1)
- Transform_p = ((IS_INTEL_AVX1) ? Transform_AVX1 : Transform) ; return ;
-#endif
- Transform_p = Transform ; return ;
-}
+ if (transform_check)
+ return;
-#else
- #if defined(FREESCALE_MMCAU)
- #define XTRANSFORM(sha256, B) Transform(sha256, B)
- #else
- #define XTRANSFORM(sha256, B) Transform(sha256)
- #endif
-#endif
+ intel_flags = cpuid_get_flags();
-/* Dummy for saving MM_REGs on behalf of Transform */
-#if defined(HAVE_INTEL_AVX2)&& !defined(HAVE_INTEL_AVX1)
-#define SAVE_XMM_YMM __asm__ volatile("or %%r8d, %%r8d":::\
- "%ymm4","%ymm5","%ymm6","%ymm7","%ymm8","%ymm9","%ymm10","%ymm11","%ymm12","%ymm13","%ymm14","%ymm15")
-#elif defined(HAVE_INTEL_AVX1)
-#define SAVE_XMM_YMM __asm__ volatile("or %%r8d, %%r8d":::\
- "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10",\
- "xmm11","xmm12","xmm13","xmm14","xmm15")
-#else
-#define SAVE_XMM_YMM
-#endif
+ #ifdef HAVE_INTEL_AVX2
+ if (1 && IS_INTEL_AVX2(intel_flags)) {
+ #ifdef HAVE_INTEL_RORX
+ if (IS_INTEL_BMI2(intel_flags)) {
+ Transform_Sha256_p = Transform_Sha256_AVX2_RORX;
+ Transform_Sha256_Len_p = Transform_Sha256_AVX2_RORX_Len;
+ }
+ else
+ #endif
+ if (1)
+ {
+ Transform_Sha256_p = Transform_Sha256_AVX2;
+ Transform_Sha256_Len_p = Transform_Sha256_AVX2_Len;
+ }
+ #ifdef HAVE_INTEL_RORX
+ else {
+ Transform_Sha256_p = Transform_Sha256_AVX1_RORX;
+ Transform_Sha256_Len_p = Transform_Sha256_AVX1_RORX_Len;
+ }
+ #endif
+ }
+ else
+ #endif
+ #ifdef HAVE_INTEL_AVX1
+ if (IS_INTEL_AVX1(intel_flags)) {
+ Transform_Sha256_p = Transform_Sha256_AVX1;
+ Transform_Sha256_Len_p = Transform_Sha256_AVX1_Len;
+ }
+ else
+ #endif
+ {
+ Transform_Sha256_p = Transform_Sha256;
+ Transform_Sha256_Len_p = NULL;
+ }
-#ifdef WOLFSSL_PIC32MZ_HASH
-#define InitSha256 InitSha256_sw
-#define Sha256Update Sha256Update_sw
-#define Sha256Final Sha256Final_sw
-#endif
+ transform_check = 1;
+ }
-#include <wolfssl/wolfcrypt/logging.h>
-#include <wolfssl/wolfcrypt/error-crypt.h>
+ int wc_InitSha256_ex(wc_Sha256* sha256, void* heap, int devId)
+ {
+ int ret = 0;
+ if (sha256 == NULL)
+ return BAD_FUNC_ARG;
-#ifdef NO_INLINE
- #include <wolfssl/wolfcrypt/misc.h>
-#else
- #include <wolfcrypt/src/misc.c>
-#endif
+ sha256->heap = heap;
+ #ifdef WOLF_CRYPTO_CB
+ sha256->devId = devId;
+ #endif
-#ifdef FREESCALE_MMCAU
- #include "cau_api.h"
-#endif
+ ret = InitSha256(sha256);
+ if (ret != 0)
+ return ret;
+
+ /* choose best Transform function under this runtime environment */
+ Sha256_SetTransform();
+
+ #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA256)
+ ret = wolfAsync_DevCtxInit(&sha256->asyncDev,
+ WOLFSSL_ASYNC_MARKER_SHA256, sha256->heap, devId);
+ #else
+ (void)devId;
+ #endif /* WOLFSSL_ASYNC_CRYPT */
-#ifndef WOLFSSL_HAVE_MIN
-#define WOLFSSL_HAVE_MIN
+ return ret;
+ }
- static INLINE word32 min(word32 a, word32 b)
+#elif defined(FREESCALE_LTC_SHA)
+ int wc_InitSha256_ex(wc_Sha256* sha256, void* heap, int devId)
{
- return a > b ? b : a;
+ (void)heap;
+ (void)devId;
+
+ LTC_HASH_Init(LTC_BASE, &sha256->ctx, kLTC_Sha256, NULL, 0);
+
+ return 0;
}
-#endif /* WOLFSSL_HAVE_MIN */
+#elif defined(FREESCALE_MMCAU_SHA)
+ #ifdef FREESCALE_MMCAU_CLASSIC_SHA
+ #include "cau_api.h"
+ #else
+ #include "fsl_mmcau.h"
+ #endif
-int wc_InitSha256(Sha256* sha256)
-{
- #ifdef FREESCALE_MMCAU
+ #define XTRANSFORM(S, D) Transform_Sha256((S),(D))
+ #define XTRANSFORM_LEN(S, D, L) Transform_Sha256_Len((S),(D),(L))
+
+ #ifndef WC_HASH_DATA_ALIGNMENT
+ /* these hardware API's require 4 byte (word32) alignment */
+ #define WC_HASH_DATA_ALIGNMENT 4
+ #endif
+
+ int wc_InitSha256_ex(wc_Sha256* sha256, void* heap, int devId)
+ {
+ int ret = 0;
+
+ (void)heap;
+ (void)devId;
+
+ ret = wolfSSL_CryptHwMutexLock();
+ if (ret != 0) {
+ return ret;
+ }
+
+ #ifdef FREESCALE_MMCAU_CLASSIC_SHA
cau_sha256_initialize_output(sha256->digest);
#else
+ MMCAU_SHA256_InitializeOutput((uint32_t*)sha256->digest);
+ #endif
+ wolfSSL_CryptHwMutexUnLock();
+
+ sha256->buffLen = 0;
+ sha256->loLen = 0;
+ sha256->hiLen = 0;
+ #ifdef WOLFSSL_SMALL_STACK_CACHE
+ sha256->W = NULL;
+ #endif
+
+ return ret;
+ }
+
+ static int Transform_Sha256(wc_Sha256* sha256, const byte* data)
+ {
+ int ret = wolfSSL_CryptHwMutexLock();
+ if (ret == 0) {
+ #ifdef FREESCALE_MMCAU_CLASSIC_SHA
+ cau_sha256_hash_n((byte*)data, 1, sha256->digest);
+ #else
+ MMCAU_SHA256_HashN((byte*)data, 1, sha256->digest);
+ #endif
+ wolfSSL_CryptHwMutexUnLock();
+ }
+ return ret;
+ }
+
+ static int Transform_Sha256_Len(wc_Sha256* sha256, const byte* data,
+ word32 len)
+ {
+ int ret = wolfSSL_CryptHwMutexLock();
+ if (ret == 0) {
+ #if defined(WC_HASH_DATA_ALIGNMENT) && WC_HASH_DATA_ALIGNMENT > 0
+ if ((size_t)data % WC_HASH_DATA_ALIGNMENT) {
+ /* data pointer is NOT aligned,
+ * so copy and perform one block at a time */
+ byte* local = (byte*)sha256->buffer;
+ while (len >= WC_SHA256_BLOCK_SIZE) {
+ XMEMCPY(local, data, WC_SHA256_BLOCK_SIZE);
+ #ifdef FREESCALE_MMCAU_CLASSIC_SHA
+ cau_sha256_hash_n(local, 1, sha256->digest);
+ #else
+ MMCAU_SHA256_HashN(local, 1, sha256->digest);
+ #endif
+ data += WC_SHA256_BLOCK_SIZE;
+ len -= WC_SHA256_BLOCK_SIZE;
+ }
+ }
+ else
+ #endif
+ {
+ #ifdef FREESCALE_MMCAU_CLASSIC_SHA
+ cau_sha256_hash_n((byte*)data, len/WC_SHA256_BLOCK_SIZE,
+ sha256->digest);
+ #else
+ MMCAU_SHA256_HashN((byte*)data, len/WC_SHA256_BLOCK_SIZE,
+ sha256->digest);
+ #endif
+ }
+ wolfSSL_CryptHwMutexUnLock();
+ }
+ return ret;
+ }
+
+#elif defined(WOLFSSL_PIC32MZ_HASH)
+ #include <wolfssl/wolfcrypt/port/pic32/pic32mz-crypt.h>
+
+#elif defined(STM32_HASH_SHA2)
+
+ /* Supports CubeMX HAL or Standard Peripheral Library */
+
+ int wc_InitSha256_ex(wc_Sha256* sha256, void* heap, int devId)
+ {
+ if (sha256 == NULL)
+ return BAD_FUNC_ARG;
+
+ (void)devId;
+ (void)heap;
+
+ wc_Stm32_Hash_Init(&sha256->stmCtx);
+ return 0;
+ }
+
+ int wc_Sha256Update(wc_Sha256* sha256, const byte* data, word32 len)
+ {
+ int ret = 0;
+
+ if (sha256 == NULL || (data == NULL && len > 0)) {
+ return BAD_FUNC_ARG;
+ }
+
+ ret = wolfSSL_CryptHwMutexLock();
+ if (ret == 0) {
+ ret = wc_Stm32_Hash_Update(&sha256->stmCtx,
+ HASH_AlgoSelection_SHA256, data, len);
+ wolfSSL_CryptHwMutexUnLock();
+ }
+ return ret;
+ }
+
+ int wc_Sha256Final(wc_Sha256* sha256, byte* hash)
+ {
+ int ret = 0;
+
+ if (sha256 == NULL || hash == NULL) {
+ return BAD_FUNC_ARG;
+ }
+
+ ret = wolfSSL_CryptHwMutexLock();
+ if (ret == 0) {
+ ret = wc_Stm32_Hash_Final(&sha256->stmCtx,
+ HASH_AlgoSelection_SHA256, hash, WC_SHA256_DIGEST_SIZE);
+ wolfSSL_CryptHwMutexUnLock();
+ }
+
+ (void)wc_InitSha256(sha256); /* reset state */
+
+ return ret;
+ }
+
+#elif defined(WOLFSSL_IMX6_CAAM) && !defined(NO_IMX6_CAAM_HASH)
+ /* functions defined in wolfcrypt/src/port/caam/caam_sha256.c */
+
+#elif defined(WOLFSSL_AFALG_HASH)
+ /* implemented in wolfcrypt/src/port/af_alg/afalg_hash.c */
+
+#elif defined(WOLFSSL_DEVCRYPTO_HASH)
+ /* implemented in wolfcrypt/src/port/devcrypto/devcrypt_hash.c */
+
+#elif defined(WOLFSSL_SCE) && !defined(WOLFSSL_SCE_NO_HASH)
+ #include "hal_data.h"
+
+ #ifndef WOLFSSL_SCE_SHA256_HANDLE
+ #define WOLFSSL_SCE_SHA256_HANDLE g_sce_hash_0
+ #endif
+
+ #define WC_SHA256_DIGEST_WORD_SIZE 16
+ #define XTRANSFORM(S, D) wc_Sha256SCE_XTRANSFORM((S), (D))
+ static int wc_Sha256SCE_XTRANSFORM(wc_Sha256* sha256, const byte* data)
+ {
+ if (WOLFSSL_SCE_GSCE_HANDLE.p_cfg->endian_flag ==
+ CRYPTO_WORD_ENDIAN_LITTLE)
+ {
+ ByteReverseWords((word32*)data, (word32*)data,
+ WC_SHA256_BLOCK_SIZE);
+ ByteReverseWords(sha256->digest, sha256->digest,
+ WC_SHA256_DIGEST_SIZE);
+ }
+
+ if (WOLFSSL_SCE_SHA256_HANDLE.p_api->hashUpdate(
+ WOLFSSL_SCE_SHA256_HANDLE.p_ctrl, (word32*)data,
+ WC_SHA256_DIGEST_WORD_SIZE, sha256->digest) != SSP_SUCCESS){
+ WOLFSSL_MSG("Unexpected hardware return value");
+ return WC_HW_E;
+ }
+
+ if (WOLFSSL_SCE_GSCE_HANDLE.p_cfg->endian_flag ==
+ CRYPTO_WORD_ENDIAN_LITTLE)
+ {
+ ByteReverseWords((word32*)data, (word32*)data,
+ WC_SHA256_BLOCK_SIZE);
+ ByteReverseWords(sha256->digest, sha256->digest,
+ WC_SHA256_DIGEST_SIZE);
+ }
+
+ return 0;
+ }
+
+
+ int wc_InitSha256_ex(wc_Sha256* sha256, void* heap, int devId)
+ {
+ int ret = 0;
+ if (sha256 == NULL)
+ return BAD_FUNC_ARG;
+
+ sha256->heap = heap;
+
+ ret = InitSha256(sha256);
+ if (ret != 0)
+ return ret;
+
+ (void)devId;
+
+ return ret;
+ }
+
+#elif defined(WOLFSSL_ESP32WROOM32_CRYPT) && \
+ !defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)
+
+ #define NEED_SOFT_SHA256
+
+ static int InitSha256(wc_Sha256* sha256)
+ {
+ int ret = 0;
+
+ if (sha256 == NULL)
+ return BAD_FUNC_ARG;
+
+ XMEMSET(sha256->digest, 0, sizeof(sha256->digest));
sha256->digest[0] = 0x6A09E667L;
sha256->digest[1] = 0xBB67AE85L;
sha256->digest[2] = 0x3C6EF372L;
@@ -317,1450 +622,1023 @@ int wc_InitSha256(Sha256* sha256)
sha256->digest[5] = 0x9B05688CL;
sha256->digest[6] = 0x1F83D9ABL;
sha256->digest[7] = 0x5BE0CD19L;
- #endif
- sha256->buffLen = 0;
- sha256->loLen = 0;
- sha256->hiLen = 0;
-
-#if defined(HAVE_INTEL_AVX1)|| defined(HAVE_INTEL_AVX2)
- set_Transform() ; /* choose best Transform function under this runtime environment */
-#endif
-
- return 0;
-}
+ sha256->buffLen = 0;
+ sha256->loLen = 0;
+ sha256->hiLen = 0;
+
+ /* always start firstblock = 1 when using hw engine */
+ sha256->ctx.isfirstblock = 1;
+ sha256->ctx.sha_type = SHA2_256;
+ if(sha256->ctx.mode == ESP32_SHA_HW) {
+ /* release hw */
+ esp_sha_hw_unlock();
+ }
+ /* always set mode as INIT
+ * whether using HW or SW is determined at first call of update()
+ */
+ sha256->ctx.mode = ESP32_SHA_INIT;
+ return ret;
+ }
+ int wc_InitSha256_ex(wc_Sha256* sha256, void* heap, int devId)
+ {
+ int ret = 0;
-#if !defined(FREESCALE_MMCAU)
-static const ALIGN32 word32 K[64] = {
- 0x428A2F98L, 0x71374491L, 0xB5C0FBCFL, 0xE9B5DBA5L, 0x3956C25BL,
- 0x59F111F1L, 0x923F82A4L, 0xAB1C5ED5L, 0xD807AA98L, 0x12835B01L,
- 0x243185BEL, 0x550C7DC3L, 0x72BE5D74L, 0x80DEB1FEL, 0x9BDC06A7L,
- 0xC19BF174L, 0xE49B69C1L, 0xEFBE4786L, 0x0FC19DC6L, 0x240CA1CCL,
- 0x2DE92C6FL, 0x4A7484AAL, 0x5CB0A9DCL, 0x76F988DAL, 0x983E5152L,
- 0xA831C66DL, 0xB00327C8L, 0xBF597FC7L, 0xC6E00BF3L, 0xD5A79147L,
- 0x06CA6351L, 0x14292967L, 0x27B70A85L, 0x2E1B2138L, 0x4D2C6DFCL,
- 0x53380D13L, 0x650A7354L, 0x766A0ABBL, 0x81C2C92EL, 0x92722C85L,
- 0xA2BFE8A1L, 0xA81A664BL, 0xC24B8B70L, 0xC76C51A3L, 0xD192E819L,
- 0xD6990624L, 0xF40E3585L, 0x106AA070L, 0x19A4C116L, 0x1E376C08L,
- 0x2748774CL, 0x34B0BCB5L, 0x391C0CB3L, 0x4ED8AA4AL, 0x5B9CCA4FL,
- 0x682E6FF3L, 0x748F82EEL, 0x78A5636FL, 0x84C87814L, 0x8CC70208L,
- 0x90BEFFFAL, 0xA4506CEBL, 0xBEF9A3F7L, 0xC67178F2L
-};
+ if (sha256 == NULL)
+ return BAD_FUNC_ARG;
-#endif
+ XMEMSET(sha256, 0, sizeof(wc_Sha256));
+ sha256->ctx.mode = ESP32_SHA_INIT;
+ sha256->ctx.isfirstblock = 1;
+ (void)devId;
-#if defined(FREESCALE_MMCAU)
+ ret = InitSha256(sha256);
-static int Transform(Sha256* sha256, byte* buf)
-{
- cau_sha256_hash_n(buf, 1, sha256->digest);
+ return ret;
+ }
- return 0;
-}
+#elif defined(WOLFSSL_RENESAS_TSIP_CRYPT) && \
+ !defined(NO_WOLFSSL_RENESAS_TSIP_CRYPT_HASH)
-#endif /* FREESCALE_MMCAU */
+ /* implemented in wolfcrypt/src/port/Renesas/renesas_tsip_sha.c */
-#define Ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
-#define Maj(x,y,z) ((((x) | (y)) & (z)) | ((x) & (y)))
-#define R(x, n) (((x)&0xFFFFFFFFU)>>(n))
+#else
+ #define NEED_SOFT_SHA256
-#define S(x, n) rotrFixed(x, n)
-#define Sigma0(x) (S(x, 2) ^ S(x, 13) ^ S(x, 22))
-#define Sigma1(x) (S(x, 6) ^ S(x, 11) ^ S(x, 25))
-#define Gamma0(x) (S(x, 7) ^ S(x, 18) ^ R(x, 3))
-#define Gamma1(x) (S(x, 17) ^ S(x, 19) ^ R(x, 10))
+ int wc_InitSha256_ex(wc_Sha256* sha256, void* heap, int devId)
+ {
+ int ret = 0;
+ if (sha256 == NULL)
+ return BAD_FUNC_ARG;
+
+ sha256->heap = heap;
+ #ifdef WOLF_CRYPTO_CB
+ sha256->devId = devId;
+ sha256->devCtx = NULL;
+ #endif
-#define RND(a,b,c,d,e,f,g,h,i) \
- t0 = (h) + Sigma1((e)) + Ch((e), (f), (g)) + K[(i)] + W[(i)]; \
- t1 = Sigma0((a)) + Maj((a), (b), (c)); \
- (d) += t0; \
- (h) = t0 + t1;
+ ret = InitSha256(sha256);
+ if (ret != 0)
+ return ret;
-#if !defined(FREESCALE_MMCAU)
-static int Transform(Sha256* sha256)
-{
- word32 S[8], t0, t1;
- int i;
+ #ifdef WOLFSSL_SMALL_STACK_CACHE
+ sha256->W = NULL;
+ #endif
-#ifdef WOLFSSL_SMALL_STACK
- word32* W;
+ #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA256)
+ ret = wolfAsync_DevCtxInit(&sha256->asyncDev,
+ WOLFSSL_ASYNC_MARKER_SHA256, sha256->heap, devId);
+ #else
+ (void)devId;
+ #endif /* WOLFSSL_ASYNC_CRYPT */
- W = (word32*) XMALLOC(sizeof(word32) * 64, NULL, DYNAMIC_TYPE_TMP_BUFFER);
- if (W == NULL)
- return MEMORY_E;
+ return ret;
+ }
+#endif /* End Hardware Acceleration */
+
+#ifdef NEED_SOFT_SHA256
+
+ static const ALIGN32 word32 K[64] = {
+ 0x428A2F98L, 0x71374491L, 0xB5C0FBCFL, 0xE9B5DBA5L, 0x3956C25BL,
+ 0x59F111F1L, 0x923F82A4L, 0xAB1C5ED5L, 0xD807AA98L, 0x12835B01L,
+ 0x243185BEL, 0x550C7DC3L, 0x72BE5D74L, 0x80DEB1FEL, 0x9BDC06A7L,
+ 0xC19BF174L, 0xE49B69C1L, 0xEFBE4786L, 0x0FC19DC6L, 0x240CA1CCL,
+ 0x2DE92C6FL, 0x4A7484AAL, 0x5CB0A9DCL, 0x76F988DAL, 0x983E5152L,
+ 0xA831C66DL, 0xB00327C8L, 0xBF597FC7L, 0xC6E00BF3L, 0xD5A79147L,
+ 0x06CA6351L, 0x14292967L, 0x27B70A85L, 0x2E1B2138L, 0x4D2C6DFCL,
+ 0x53380D13L, 0x650A7354L, 0x766A0ABBL, 0x81C2C92EL, 0x92722C85L,
+ 0xA2BFE8A1L, 0xA81A664BL, 0xC24B8B70L, 0xC76C51A3L, 0xD192E819L,
+ 0xD6990624L, 0xF40E3585L, 0x106AA070L, 0x19A4C116L, 0x1E376C08L,
+ 0x2748774CL, 0x34B0BCB5L, 0x391C0CB3L, 0x4ED8AA4AL, 0x5B9CCA4FL,
+ 0x682E6FF3L, 0x748F82EEL, 0x78A5636FL, 0x84C87814L, 0x8CC70208L,
+ 0x90BEFFFAL, 0xA4506CEBL, 0xBEF9A3F7L, 0xC67178F2L
+ };
+
+/* Both versions of Ch and Maj are logically the same, but with the second set
+ the compilers can recognize them better for optimization */
+#ifdef WOLFSSL_SHA256_BY_SPEC
+ /* SHA256 math based on specification */
+ #define Ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
+ #define Maj(x,y,z) ((((x) | (y)) & (z)) | ((x) & (y)))
#else
- word32 W[64];
+ /* SHA256 math reworked for easier compiler optimization */
+ #define Ch(x,y,z) ((((y) ^ (z)) & (x)) ^ (z))
+ #define Maj(x,y,z) ((((x) ^ (y)) & ((y) ^ (z))) ^ (y))
#endif
+ #define R(x, n) (((x) & 0xFFFFFFFFU) >> (n))
+
+ #define S(x, n) rotrFixed(x, n)
+ #define Sigma0(x) (S(x, 2) ^ S(x, 13) ^ S(x, 22))
+ #define Sigma1(x) (S(x, 6) ^ S(x, 11) ^ S(x, 25))
+ #define Gamma0(x) (S(x, 7) ^ S(x, 18) ^ R(x, 3))
+ #define Gamma1(x) (S(x, 17) ^ S(x, 19) ^ R(x, 10))
+
+ #define a(i) S[(0-i) & 7]
+ #define b(i) S[(1-i) & 7]
+ #define c(i) S[(2-i) & 7]
+ #define d(i) S[(3-i) & 7]
+ #define e(i) S[(4-i) & 7]
+ #define f(i) S[(5-i) & 7]
+ #define g(i) S[(6-i) & 7]
+ #define h(i) S[(7-i) & 7]
+
+ #ifndef XTRANSFORM
+ #define XTRANSFORM(S, D) Transform_Sha256((S),(D))
+ #endif
- /* Copy context->state[] to working vars */
- for (i = 0; i < 8; i++)
- S[i] = sha256->digest[i];
-
- for (i = 0; i < 16; i++)
- W[i] = sha256->buffer[i];
-
- for (i = 16; i < 64; i++)
- W[i] = Gamma1(W[i-2]) + W[i-7] + Gamma0(W[i-15]) + W[i-16];
-
- for (i = 0; i < 64; i += 8) {
- RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],i+0);
- RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],i+1);
- RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],i+2);
- RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],i+3);
- RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],i+4);
- RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],i+5);
- RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],i+6);
- RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],i+7);
- }
+#ifndef SHA256_MANY_REGISTERS
+ #define RND(j) \
+ t0 = h(j) + Sigma1(e(j)) + Ch(e(j), f(j), g(j)) + K[i+j] + W[i+j]; \
+ t1 = Sigma0(a(j)) + Maj(a(j), b(j), c(j)); \
+ d(j) += t0; \
+ h(j) = t0 + t1
+
+ static int Transform_Sha256(wc_Sha256* sha256, const byte* data)
+ {
+ word32 S[8], t0, t1;
+ int i;
+
+ #ifdef WOLFSSL_SMALL_STACK_CACHE
+ word32* W = sha256->W;
+ if (W == NULL) {
+ W = (word32*)XMALLOC(sizeof(word32) * WC_SHA256_BLOCK_SIZE, NULL,
+ DYNAMIC_TYPE_DIGEST);
+ if (W == NULL)
+ return MEMORY_E;
+ sha256->W = W;
+ }
+ #elif defined(WOLFSSL_SMALL_STACK)
+ word32* W;
+ W = (word32*)XMALLOC(sizeof(word32) * WC_SHA256_BLOCK_SIZE, NULL,
+ DYNAMIC_TYPE_TMP_BUFFER);
+ if (W == NULL)
+ return MEMORY_E;
+ #else
+ word32 W[WC_SHA256_BLOCK_SIZE];
+ #endif
+
+ /* Copy context->state[] to working vars */
+ for (i = 0; i < 8; i++)
+ S[i] = sha256->digest[i];
+
+ for (i = 0; i < 16; i++)
+ W[i] = *((word32*)&data[i*sizeof(word32)]);
+
+ for (i = 16; i < WC_SHA256_BLOCK_SIZE; i++)
+ W[i] = Gamma1(W[i-2]) + W[i-7] + Gamma0(W[i-15]) + W[i-16];
+
+ #ifdef USE_SLOW_SHA256
+ /* not unrolled - ~2k smaller and ~25% slower */
+ for (i = 0; i < WC_SHA256_BLOCK_SIZE; i += 8) {
+ int j;
+ for (j = 0; j < 8; j++) { /* braces needed here for macros {} */
+ RND(j);
+ }
+ }
+ #else
+ /* partially loop unrolled */
+ for (i = 0; i < WC_SHA256_BLOCK_SIZE; i += 8) {
+ RND(0); RND(1); RND(2); RND(3);
+ RND(4); RND(5); RND(6); RND(7);
+ }
+ #endif /* USE_SLOW_SHA256 */
- /* Add the working vars back into digest state[] */
- for (i = 0; i < 8; i++) {
- sha256->digest[i] += S[i];
+ /* Add the working vars back into digest state[] */
+ for (i = 0; i < 8; i++) {
+ sha256->digest[i] += S[i];
+ }
+
+ #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_SMALL_STACK_CACHE)
+ XFREE(W, NULL, DYNAMIC_TYPE_TMP_BUFFER);
+ #endif
+ return 0;
}
+#else
+ /* SHA256 version that keeps all data in registers */
+ #define SCHED1(j) (W[j] = *((word32*)&data[j*sizeof(word32)]))
+ #define SCHED(j) ( \
+ W[ j & 15] += \
+ Gamma1(W[(j-2) & 15])+ \
+ W[(j-7) & 15] + \
+ Gamma0(W[(j-15) & 15]) \
+ )
+
+ #define RND1(j) \
+ t0 = h(j) + Sigma1(e(j)) + Ch(e(j), f(j), g(j)) + K[i+j] + SCHED1(j); \
+ t1 = Sigma0(a(j)) + Maj(a(j), b(j), c(j)); \
+ d(j) += t0; \
+ h(j) = t0 + t1
+ #define RNDN(j) \
+ t0 = h(j) + Sigma1(e(j)) + Ch(e(j), f(j), g(j)) + K[i+j] + SCHED(j); \
+ t1 = Sigma0(a(j)) + Maj(a(j), b(j), c(j)); \
+ d(j) += t0; \
+ h(j) = t0 + t1
+
+ static int Transform_Sha256(wc_Sha256* sha256, const byte* data)
+ {
+ word32 S[8], t0, t1;
+ int i;
+ word32 W[WC_SHA256_BLOCK_SIZE/sizeof(word32)];
+
+ /* Copy digest to working vars */
+ S[0] = sha256->digest[0];
+ S[1] = sha256->digest[1];
+ S[2] = sha256->digest[2];
+ S[3] = sha256->digest[3];
+ S[4] = sha256->digest[4];
+ S[5] = sha256->digest[5];
+ S[6] = sha256->digest[6];
+ S[7] = sha256->digest[7];
+
+ i = 0;
+ RND1( 0); RND1( 1); RND1( 2); RND1( 3);
+ RND1( 4); RND1( 5); RND1( 6); RND1( 7);
+ RND1( 8); RND1( 9); RND1(10); RND1(11);
+ RND1(12); RND1(13); RND1(14); RND1(15);
+ /* 64 operations, partially loop unrolled */
+ for (i = 16; i < 64; i += 16) {
+ RNDN( 0); RNDN( 1); RNDN( 2); RNDN( 3);
+ RNDN( 4); RNDN( 5); RNDN( 6); RNDN( 7);
+ RNDN( 8); RNDN( 9); RNDN(10); RNDN(11);
+ RNDN(12); RNDN(13); RNDN(14); RNDN(15);
+ }
-#ifdef WOLFSSL_SMALL_STACK
- XFREE(W, NULL, DYNAMIC_TYPE_TMP_BUFFER);
+ /* Add the working vars back into digest */
+ sha256->digest[0] += S[0];
+ sha256->digest[1] += S[1];
+ sha256->digest[2] += S[2];
+ sha256->digest[3] += S[3];
+ sha256->digest[4] += S[4];
+ sha256->digest[5] += S[5];
+ sha256->digest[6] += S[6];
+ sha256->digest[7] += S[7];
+
+ return 0;
+ }
+#endif /* SHA256_MANY_REGISTERS */
#endif
+/* End wc_ software implementation */
- return 0;
-}
-#endif /* #if !defined(FREESCALE_MMCAU) */
+#ifdef XTRANSFORM
-static INLINE void AddLength(Sha256* sha256, word32 len)
-{
- word32 tmp = sha256->loLen;
- if ( (sha256->loLen += len) < tmp)
- sha256->hiLen++; /* carry low to high */
-}
+ static WC_INLINE void AddLength(wc_Sha256* sha256, word32 len)
+ {
+ word32 tmp = sha256->loLen;
+ if ((sha256->loLen += len) < tmp) {
+ sha256->hiLen++; /* carry low to high */
+ }
+ }
-int wc_Sha256Update(Sha256* sha256, const byte* data, word32 len)
-{
+ /* do block size increments/updates */
+ static WC_INLINE int Sha256Update(wc_Sha256* sha256, const byte* data, word32 len)
+ {
+ int ret = 0;
+ word32 blocksLen;
+ byte* local;
- /* do block size increments */
- byte* local = (byte*)sha256->buffer;
+ if (sha256 == NULL || (data == NULL && len > 0)) {
+ return BAD_FUNC_ARG;
+ }
+
+ if (data == NULL && len == 0) {
+ /* valid, but do nothing */
+ return 0;
+ }
+
+ /* check that internal buffLen is valid */
+ if (sha256->buffLen >= WC_SHA256_BLOCK_SIZE) {
+ return BUFFER_E;
+ }
- SAVE_XMM_YMM ; /* for Intel AVX */
+ /* add length for final */
+ AddLength(sha256, len);
- while (len) {
- word32 add = min(len, SHA256_BLOCK_SIZE - sha256->buffLen);
- XMEMCPY(&local[sha256->buffLen], data, add);
+ local = (byte*)sha256->buffer;
- sha256->buffLen += add;
- data += add;
- len -= add;
+ /* process any remainder from previous operation */
+ if (sha256->buffLen > 0) {
+ blocksLen = min(len, WC_SHA256_BLOCK_SIZE - sha256->buffLen);
+ XMEMCPY(&local[sha256->buffLen], data, blocksLen);
- if (sha256->buffLen == SHA256_BLOCK_SIZE) {
- int ret;
+ sha256->buffLen += blocksLen;
+ data += blocksLen;
+ len -= blocksLen;
- #if defined(LITTLE_ENDIAN_ORDER) && !defined(FREESCALE_MMCAU)
+ if (sha256->buffLen == WC_SHA256_BLOCK_SIZE) {
+ #if defined(LITTLE_ENDIAN_ORDER) && !defined(FREESCALE_MMCAU_SHA)
#if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
- if(!IS_INTEL_AVX1 && !IS_INTEL_AVX2)
+ if (!IS_INTEL_AVX1(intel_flags) && !IS_INTEL_AVX2(intel_flags))
#endif
- ByteReverseWords(sha256->buffer, sha256->buffer,
- SHA256_BLOCK_SIZE);
+ {
+ ByteReverseWords(sha256->buffer, sha256->buffer,
+ WC_SHA256_BLOCK_SIZE);
+ }
#endif
- ret = XTRANSFORM(sha256, local);
- if (ret != 0)
- return ret;
- AddLength(sha256, SHA256_BLOCK_SIZE);
- sha256->buffLen = 0;
+ #if defined(WOLFSSL_ESP32WROOM32_CRYPT) && \
+ !defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)
+ if (sha256->ctx.mode == ESP32_SHA_INIT){
+ esp_sha_try_hw_lock(&sha256->ctx);
+ }
+ if (sha256->ctx.mode == ESP32_SHA_SW){
+ ret = XTRANSFORM(sha256, (const byte*)local);
+ } else {
+ esp_sha256_process(sha256, (const byte*)local);
+ }
+ #else
+ ret = XTRANSFORM(sha256, (const byte*)local);
+ #endif
+
+ if (ret == 0)
+ sha256->buffLen = 0;
+ else
+ len = 0; /* error */
+ }
+ }
+
+ /* process blocks */
+ #ifdef XTRANSFORM_LEN
+ #if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
+ if (Transform_Sha256_Len_p != NULL)
+ #endif
+ {
+ /* get number of blocks */
+ /* 64-1 = 0x3F (~ Inverted = 0xFFFFFFC0) */
+ /* len (masked by 0xFFFFFFC0) returns block aligned length */
+ blocksLen = len & ~(WC_SHA256_BLOCK_SIZE-1);
+ if (blocksLen > 0) {
+ /* Byte reversal and alignment handled in function if required */
+ XTRANSFORM_LEN(sha256, data, blocksLen);
+ data += blocksLen;
+ len -= blocksLen;
+ }
}
+ #if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
+ else
+ #endif
+ #endif /* XTRANSFORM_LEN */
+ #if !defined(XTRANSFORM_LEN) || defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
+ {
+ while (len >= WC_SHA256_BLOCK_SIZE) {
+ word32* local32 = sha256->buffer;
+ /* optimization to avoid memcpy if data pointer is properly aligned */
+ /* Intel transform function requires use of sha256->buffer */
+ /* Little Endian requires byte swap, so can't use data directly */
+ #if defined(WC_HASH_DATA_ALIGNMENT) && !defined(LITTLE_ENDIAN_ORDER) && \
+ !defined(HAVE_INTEL_AVX1) && !defined(HAVE_INTEL_AVX2)
+ if (((size_t)data % WC_HASH_DATA_ALIGNMENT) == 0) {
+ local32 = (word32*)data;
+ }
+ else
+ #endif
+ {
+ XMEMCPY(local32, data, WC_SHA256_BLOCK_SIZE);
+ }
+
+ data += WC_SHA256_BLOCK_SIZE;
+ len -= WC_SHA256_BLOCK_SIZE;
+
+ #if defined(LITTLE_ENDIAN_ORDER) && !defined(FREESCALE_MMCAU_SHA)
+ #if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
+ if (!IS_INTEL_AVX1(intel_flags) && !IS_INTEL_AVX2(intel_flags))
+ #endif
+ {
+ ByteReverseWords(local32, local32, WC_SHA256_BLOCK_SIZE);
+ }
+ #endif
+
+ #if defined(WOLFSSL_ESP32WROOM32_CRYPT) && \
+ !defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)
+ if (sha256->ctx.mode == ESP32_SHA_INIT){
+ esp_sha_try_hw_lock(&sha256->ctx);
+ }
+ if (sha256->ctx.mode == ESP32_SHA_SW){
+ ret = XTRANSFORM(sha256, (const byte*)local32);
+ } else {
+ esp_sha256_process(sha256, (const byte*)local32);
+ }
+ #else
+ ret = XTRANSFORM(sha256, (const byte*)local32);
+ #endif
+
+ if (ret != 0)
+ break;
+ }
+ }
+ #endif
+
+ /* save remainder */
+ if (len > 0) {
+ XMEMCPY(local, data, len);
+ sha256->buffLen = len;
+ }
+
+ return ret;
}
- return 0;
-}
+ int wc_Sha256Update(wc_Sha256* sha256, const byte* data, word32 len)
+ {
+ if (sha256 == NULL || (data == NULL && len > 0)) {
+ return BAD_FUNC_ARG;
+ }
-int wc_Sha256Final(Sha256* sha256, byte* hash)
-{
- byte* local = (byte*)sha256->buffer;
- int ret;
-
- SAVE_XMM_YMM ; /* for Intel AVX */
+ if (data == NULL && len == 0) {
+ /* valid, but do nothing */
+ return 0;
+ }
+
+ #ifdef WOLF_CRYPTO_CB
+ if (sha256->devId != INVALID_DEVID) {
+ int ret = wc_CryptoCb_Sha256Hash(sha256, data, len, NULL);
+ if (ret != CRYPTOCB_UNAVAILABLE)
+ return ret;
+ /* fall-through when unavailable */
+ }
+ #endif
+ #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA256)
+ if (sha256->asyncDev.marker == WOLFSSL_ASYNC_MARKER_SHA256) {
+ #if defined(HAVE_INTEL_QA)
+ return IntelQaSymSha256(&sha256->asyncDev, NULL, data, len);
+ #endif
+ }
+ #endif /* WOLFSSL_ASYNC_CRYPT */
+
+ return Sha256Update(sha256, data, len);
+ }
+
+ static WC_INLINE int Sha256Final(wc_Sha256* sha256)
+ {
+
+ int ret;
+ byte* local;
- AddLength(sha256, sha256->buffLen); /* before adding pads */
+ if (sha256 == NULL) {
+ return BAD_FUNC_ARG;
+ }
- local[sha256->buffLen++] = 0x80; /* add 1 */
+ local = (byte*)sha256->buffer;
+ local[sha256->buffLen++] = 0x80; /* add 1 */
- /* pad with zeros */
- if (sha256->buffLen > SHA256_PAD_SIZE) {
- XMEMSET(&local[sha256->buffLen], 0, SHA256_BLOCK_SIZE - sha256->buffLen);
- sha256->buffLen += SHA256_BLOCK_SIZE - sha256->buffLen;
+ /* pad with zeros */
+ if (sha256->buffLen > WC_SHA256_PAD_SIZE) {
+ XMEMSET(&local[sha256->buffLen], 0,
+ WC_SHA256_BLOCK_SIZE - sha256->buffLen);
+ sha256->buffLen += WC_SHA256_BLOCK_SIZE - sha256->buffLen;
- #if defined(LITTLE_ENDIAN_ORDER) && !defined(FREESCALE_MMCAU)
+ #if defined(LITTLE_ENDIAN_ORDER) && !defined(FREESCALE_MMCAU_SHA)
#if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
- if(!IS_INTEL_AVX1 && !IS_INTEL_AVX2)
+ if (!IS_INTEL_AVX1(intel_flags) && !IS_INTEL_AVX2(intel_flags))
#endif
- ByteReverseWords(sha256->buffer, sha256->buffer, SHA256_BLOCK_SIZE);
+ {
+ ByteReverseWords(sha256->buffer, sha256->buffer,
+ WC_SHA256_BLOCK_SIZE);
+ }
#endif
- ret = XTRANSFORM(sha256, local);
- if (ret != 0)
- return ret;
+ #if defined(WOLFSSL_ESP32WROOM32_CRYPT) && \
+ !defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)
+ if (sha256->ctx.mode == ESP32_SHA_INIT) {
+ esp_sha_try_hw_lock(&sha256->ctx);
+ }
+ if (sha256->ctx.mode == ESP32_SHA_SW) {
+ ret = XTRANSFORM(sha256, (const byte*)local);
+ } else {
+ ret = esp_sha256_process(sha256, (const byte*)local);
+ }
+ #else
+ ret = XTRANSFORM(sha256, (const byte*)local);
+ #endif
+ if (ret != 0)
+ return ret;
- sha256->buffLen = 0;
- }
- XMEMSET(&local[sha256->buffLen], 0, SHA256_PAD_SIZE - sha256->buffLen);
+ sha256->buffLen = 0;
+ }
+ XMEMSET(&local[sha256->buffLen], 0,
+ WC_SHA256_PAD_SIZE - sha256->buffLen);
- /* put lengths in bits */
- sha256->hiLen = (sha256->loLen >> (8*sizeof(sha256->loLen) - 3)) +
- (sha256->hiLen << 3);
- sha256->loLen = sha256->loLen << 3;
+ /* put lengths in bits */
+ sha256->hiLen = (sha256->loLen >> (8 * sizeof(sha256->loLen) - 3)) +
+ (sha256->hiLen << 3);
+ sha256->loLen = sha256->loLen << 3;
- /* store lengths */
- #if defined(LITTLE_ENDIAN_ORDER) && !defined(FREESCALE_MMCAU)
+ /* store lengths */
+ #if defined(LITTLE_ENDIAN_ORDER) && !defined(FREESCALE_MMCAU_SHA)
#if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
- if(!IS_INTEL_AVX1 && !IS_INTEL_AVX2)
+ if (!IS_INTEL_AVX1(intel_flags) && !IS_INTEL_AVX2(intel_flags))
#endif
- ByteReverseWords(sha256->buffer, sha256->buffer, SHA256_BLOCK_SIZE);
+ {
+ ByteReverseWords(sha256->buffer, sha256->buffer,
+ WC_SHA256_BLOCK_SIZE);
+ }
#endif
- /* ! length ordering dependent on digest endian type ! */
- XMEMCPY(&local[SHA256_PAD_SIZE], &sha256->hiLen, sizeof(word32));
- XMEMCPY(&local[SHA256_PAD_SIZE + sizeof(word32)], &sha256->loLen,
- sizeof(word32));
+ /* ! length ordering dependent on digest endian type ! */
+ XMEMCPY(&local[WC_SHA256_PAD_SIZE], &sha256->hiLen, sizeof(word32));
+ XMEMCPY(&local[WC_SHA256_PAD_SIZE + sizeof(word32)], &sha256->loLen,
+ sizeof(word32));
- #if defined(FREESCALE_MMCAU) || defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
+ #if defined(FREESCALE_MMCAU_SHA) || defined(HAVE_INTEL_AVX1) || \
+ defined(HAVE_INTEL_AVX2)
/* Kinetis requires only these bytes reversed */
#if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
- if(IS_INTEL_AVX1 || IS_INTEL_AVX2)
+ if (IS_INTEL_AVX1(intel_flags) || IS_INTEL_AVX2(intel_flags))
#endif
- ByteReverseWords(&sha256->buffer[SHA256_PAD_SIZE/sizeof(word32)],
- &sha256->buffer[SHA256_PAD_SIZE/sizeof(word32)],
- 2 * sizeof(word32));
+ {
+ ByteReverseWords(
+ &sha256->buffer[WC_SHA256_PAD_SIZE / sizeof(word32)],
+ &sha256->buffer[WC_SHA256_PAD_SIZE / sizeof(word32)],
+ 2 * sizeof(word32));
+ }
+ #endif
+
+ #if defined(WOLFSSL_ESP32WROOM32_CRYPT) && \
+ !defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)
+ if (sha256->ctx.mode == ESP32_SHA_INIT) {
+ esp_sha_try_hw_lock(&sha256->ctx);
+ }
+ if (sha256->ctx.mode == ESP32_SHA_SW) {
+ ret = XTRANSFORM(sha256, (const byte*)local);
+ } else {
+ ret = esp_sha256_digest_process(sha256, 1);
+ }
+ #else
+ ret = XTRANSFORM(sha256, (const byte*)local);
#endif
- ret = XTRANSFORM(sha256, local);
- if (ret != 0)
return ret;
+ }
- #if defined(LITTLE_ENDIAN_ORDER)
- ByteReverseWords(sha256->digest, sha256->digest, SHA256_DIGEST_SIZE);
+ int wc_Sha256FinalRaw(wc_Sha256* sha256, byte* hash)
+ {
+ #ifdef LITTLE_ENDIAN_ORDER
+ word32 digest[WC_SHA256_DIGEST_SIZE / sizeof(word32)];
#endif
- XMEMCPY(hash, sha256->digest, SHA256_DIGEST_SIZE);
- return wc_InitSha256(sha256); /* reset state */
-}
+ if (sha256 == NULL || hash == NULL) {
+ return BAD_FUNC_ARG;
+ }
+ #ifdef LITTLE_ENDIAN_ORDER
+ ByteReverseWords((word32*)digest, (word32*)sha256->digest,
+ WC_SHA256_DIGEST_SIZE);
+ XMEMCPY(hash, digest, WC_SHA256_DIGEST_SIZE);
+ #else
+ XMEMCPY(hash, sha256->digest, WC_SHA256_DIGEST_SIZE);
+ #endif
+ return 0;
+ }
-int wc_Sha256Hash(const byte* data, word32 len, byte* hash)
-{
- int ret = 0;
-#ifdef WOLFSSL_SMALL_STACK
- Sha256* sha256;
-#else
- Sha256 sha256[1];
-#endif
+ int wc_Sha256Final(wc_Sha256* sha256, byte* hash)
+ {
+ int ret;
-#ifdef WOLFSSL_SMALL_STACK
- sha256 = (Sha256*)XMALLOC(sizeof(Sha256), NULL, DYNAMIC_TYPE_TMP_BUFFER);
- if (sha256 == NULL)
- return MEMORY_E;
-#endif
+ if (sha256 == NULL || hash == NULL) {
+ return BAD_FUNC_ARG;
+ }
- if ((ret = wc_InitSha256(sha256)) != 0) {
- WOLFSSL_MSG("InitSha256 failed");
+ #ifdef WOLF_CRYPTO_CB
+ if (sha256->devId != INVALID_DEVID) {
+ ret = wc_CryptoCb_Sha256Hash(sha256, NULL, 0, hash);
+ if (ret != CRYPTOCB_UNAVAILABLE)
+ return ret;
+ /* fall-through when unavailable */
+ }
+ #endif
+
+ #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA256)
+ if (sha256->asyncDev.marker == WOLFSSL_ASYNC_MARKER_SHA256) {
+ #if defined(HAVE_INTEL_QA)
+ return IntelQaSymSha256(&sha256->asyncDev, hash, NULL,
+ WC_SHA256_DIGEST_SIZE);
+ #endif
+ }
+ #endif /* WOLFSSL_ASYNC_CRYPT */
+
+ ret = Sha256Final(sha256);
+ if (ret != 0)
+ return ret;
+
+ #if defined(LITTLE_ENDIAN_ORDER)
+ ByteReverseWords(sha256->digest, sha256->digest, WC_SHA256_DIGEST_SIZE);
+ #endif
+ XMEMCPY(hash, sha256->digest, WC_SHA256_DIGEST_SIZE);
+
+ return InitSha256(sha256); /* reset state */
}
- else if ((ret = wc_Sha256Update(sha256, data, len)) != 0) {
- WOLFSSL_MSG("Sha256Update failed");
+
+#endif /* XTRANSFORM */
+
+#ifdef WOLFSSL_SHA224
+
+#ifdef STM32_HASH_SHA2
+
+ /* Supports CubeMX HAL or Standard Peripheral Library */
+
+ int wc_InitSha224_ex(wc_Sha224* sha224, void* heap, int devId)
+ {
+ if (sha224 == NULL)
+ return BAD_FUNC_ARG;
+
+ (void)devId;
+ (void)heap;
+
+ wc_Stm32_Hash_Init(&sha224->stmCtx);
+ return 0;
}
- else if ((ret = wc_Sha256Final(sha256, hash)) != 0) {
- WOLFSSL_MSG("Sha256Final failed");
+
+ int wc_Sha224Update(wc_Sha224* sha224, const byte* data, word32 len)
+ {
+ int ret = 0;
+
+ if (sha224 == NULL || (data == NULL && len > 0)) {
+ return BAD_FUNC_ARG;
+ }
+
+ ret = wolfSSL_CryptHwMutexLock();
+ if (ret == 0) {
+ ret = wc_Stm32_Hash_Update(&sha224->stmCtx,
+ HASH_AlgoSelection_SHA224, data, len);
+ wolfSSL_CryptHwMutexUnLock();
+ }
+ return ret;
}
-#ifdef WOLFSSL_SMALL_STACK
- XFREE(sha256, NULL, DYNAMIC_TYPE_TMP_BUFFER);
-#endif
+ int wc_Sha224Final(wc_Sha224* sha224, byte* hash)
+ {
+ int ret = 0;
- return ret;
-}
+ if (sha224 == NULL || hash == NULL) {
+ return BAD_FUNC_ARG;
+ }
-#if defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)
+ ret = wolfSSL_CryptHwMutexLock();
+ if (ret == 0) {
+ ret = wc_Stm32_Hash_Final(&sha224->stmCtx,
+ HASH_AlgoSelection_SHA224, hash, WC_SHA224_DIGEST_SIZE);
+ wolfSSL_CryptHwMutexUnLock();
+ }
-#define _DigestToReg(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- { word32 d ;\
- d = sha256->digest[0]; __asm__ volatile("movl %0, %"#S_0::"r"(d):SSE_REGs) ;\
- d = sha256->digest[1]; __asm__ volatile("movl %0, %"#S_1::"r"(d):SSE_REGs) ;\
- d = sha256->digest[2]; __asm__ volatile("movl %0, %"#S_2::"r"(d):SSE_REGs) ;\
- d = sha256->digest[3]; __asm__ volatile("movl %0, %"#S_3::"r"(d):SSE_REGs) ;\
- d = sha256->digest[4]; __asm__ volatile("movl %0, %"#S_4::"r"(d):SSE_REGs) ;\
- d = sha256->digest[5]; __asm__ volatile("movl %0, %"#S_5::"r"(d):SSE_REGs) ;\
- d = sha256->digest[6]; __asm__ volatile("movl %0, %"#S_6::"r"(d):SSE_REGs) ;\
- d = sha256->digest[7]; __asm__ volatile("movl %0, %"#S_7::"r"(d):SSE_REGs) ;\
-}
+ (void)wc_InitSha224(sha224); /* reset state */
-#define _RegToDigest(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- { word32 d ; \
- __asm__ volatile("movl %"#S_0", %0":"=r"(d)::SSE_REGs) ; sha256->digest[0] += d;\
- __asm__ volatile("movl %"#S_1", %0":"=r"(d)::SSE_REGs) ; sha256->digest[1] += d;\
- __asm__ volatile("movl %"#S_2", %0":"=r"(d)::SSE_REGs) ; sha256->digest[2] += d;\
- __asm__ volatile("movl %"#S_3", %0":"=r"(d)::SSE_REGs) ; sha256->digest[3] += d;\
- __asm__ volatile("movl %"#S_4", %0":"=r"(d)::SSE_REGs) ; sha256->digest[4] += d;\
- __asm__ volatile("movl %"#S_5", %0":"=r"(d)::SSE_REGs) ; sha256->digest[5] += d;\
- __asm__ volatile("movl %"#S_6", %0":"=r"(d)::SSE_REGs) ; sha256->digest[6] += d;\
- __asm__ volatile("movl %"#S_7", %0":"=r"(d)::SSE_REGs) ; sha256->digest[7] += d;\
-}
+ return ret;
+ }
+#elif defined(WOLFSSL_IMX6_CAAM) && !defined(NO_IMX6_CAAM_HASH)
+ /* functions defined in wolfcrypt/src/port/caam/caam_sha256.c */
-#define DigestToReg(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- _DigestToReg(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )
+#elif defined(WOLFSSL_AFALG_HASH)
+ #error SHA224 currently not supported with AF_ALG enabled
-#define RegToDigest(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- _RegToDigest(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )
+#elif defined(WOLFSSL_DEVCRYPTO_HASH)
+ /* implemented in wolfcrypt/src/port/devcrypto/devcrypt_hash.c */
+#else
+ #define NEED_SOFT_SHA224
-#define S_0 %r15d
-#define S_1 %r10d
-#define S_2 %r11d
-#define S_3 %r12d
-#define S_4 %r13d
-#define S_5 %r14d
-#define S_6 %ebx
-#define S_7 %r9d
+ static int InitSha224(wc_Sha224* sha224)
+ {
+ int ret = 0;
-#define SSE_REGs "%edi", "%ecx", "%esi", "%edx", "%ebx","%r8","%r9","%r10","%r11","%r12","%r13","%r14","%r15"
+ if (sha224 == NULL) {
+ return BAD_FUNC_ARG;
+ }
-#if defined(HAVE_INTEL_RORX)
-#define RND_STEP_RORX_1(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("rorx $6, %"#e", %%edx\n\t":::"%edx",SSE_REGs); /* edx = e>>6 */\
+ sha224->digest[0] = 0xc1059ed8;
+ sha224->digest[1] = 0x367cd507;
+ sha224->digest[2] = 0x3070dd17;
+ sha224->digest[3] = 0xf70e5939;
+ sha224->digest[4] = 0xffc00b31;
+ sha224->digest[5] = 0x68581511;
+ sha224->digest[6] = 0x64f98fa7;
+ sha224->digest[7] = 0xbefa4fa4;
+
+ sha224->buffLen = 0;
+ sha224->loLen = 0;
+ sha224->hiLen = 0;
+
+ #if defined(HAVE_INTEL_AVX1)|| defined(HAVE_INTEL_AVX2)
+ /* choose best Transform function under this runtime environment */
+ Sha256_SetTransform();
+ #endif
+ #if defined(WOLFSSL_HASH_FLAGS) || defined(WOLF_CRYPTO_CB)
+ sha224->flags = 0;
+ #endif
-#define RND_STEP_RORX_2(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("rorx $11, %"#e",%%edi\n\t":::"%edi",SSE_REGs); /* edi = e>>11 */\
-__asm__ volatile("xorl %%edx, %%edi\n\t":::"%edx","%edi",SSE_REGs); /* edi = (e>>11) ^ (e>>6) */\
-__asm__ volatile("rorx $25, %"#e", %%edx\n\t":::"%edx",SSE_REGs); /* edx = e>>25 */\
+ return ret;
+ }
-#define RND_STEP_RORX_3(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("movl %"#f", %%esi\n\t":::"%esi",SSE_REGs); /* esi = f */\
-__asm__ volatile("xorl %"#g", %%esi\n\t":::"%esi",SSE_REGs); /* esi = f ^ g */\
-__asm__ volatile("xorl %%edi, %%edx\n\t":::"%edi","%edx",SSE_REGs); /* edx = Sigma1(e) */\
-__asm__ volatile("andl %"#e", %%esi\n\t":::"%esi",SSE_REGs); /* esi = (f ^ g) & e */\
-__asm__ volatile("xorl %"#g", %%esi\n\t":::"%esi",SSE_REGs); /* esi = Ch(e,f,g) */\
+#endif
-#define RND_STEP_RORX_4(a,b,c,d,e,f,g,h,i)\
-/*__asm__ volatile("movl %0, %%edx\n\t"::"m"(w_k):"%edx");*/\
-__asm__ volatile("addl %0, %"#h"\n\t"::"r"(W_K[i]):SSE_REGs); /* h += w_k */\
-__asm__ volatile("addl %%edx, %"#h"\n\t":::"%edx",SSE_REGs); /* h = h + w_k + Sigma1(e) */\
-__asm__ volatile("rorx $2, %"#a", %%r8d\n\t":::"%r8",SSE_REGs); /* r8d = a>>2 */\
-__asm__ volatile("rorx $13, %"#a", %%edi\n\t":::"%edi",SSE_REGs);/* edi = a>>13 */\
+#ifdef NEED_SOFT_SHA224
+ int wc_InitSha224_ex(wc_Sha224* sha224, void* heap, int devId)
+ {
+ int ret = 0;
-#define RND_STEP_RORX_5(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("rorx $22, %"#a", %%edx\n\t":::"%edx",SSE_REGs); /* edx = a>>22 */\
-__asm__ volatile("xorl %%r8d, %%edi\n\t":::"%edi","%r8",SSE_REGs);/* edi = (a>>2) ^ (a>>13) */\
-__asm__ volatile("xorl %%edi, %%edx\n\t":::"%edi","%edx",SSE_REGs); /* edx = Sigma0(a) */\
-
-#define RND_STEP_RORX_6(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("movl %"#b", %%edi\n\t":::"%edi",SSE_REGs); /* edi = b */\
-__asm__ volatile("orl %"#a", %%edi\n\t":::"%edi",SSE_REGs); /* edi = a | b */\
-__asm__ volatile("andl %"#c", %%edi\n\t":::"%edi",SSE_REGs); /* edi = (a | b) & c*/\
-__asm__ volatile("movl %"#b", %%r8d\n\t":::"%r8",SSE_REGs); /* r8d = b */\
+ if (sha224 == NULL)
+ return BAD_FUNC_ARG;
-#define RND_STEP_RORX_7(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("addl %%esi, %"#h"\n\t":::"%esi",SSE_REGs); /* h += Ch(e,f,g) */\
-__asm__ volatile("andl %"#a", %%r8d\n\t":::"%r8",SSE_REGs); /* r8d = b & a */\
-__asm__ volatile("orl %%edi, %%r8d\n\t":::"%edi","%r8",SSE_REGs); /* r8d = Maj(a,b,c) */\
+ sha224->heap = heap;
-#define RND_STEP_RORX_8(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("addl "#h", "#d"\n\t"); /* d += h + w_k + Sigma1(e) + Ch(e,f,g) */\
-__asm__ volatile("addl %"#h", %%r8d\n\t":::"%r8",SSE_REGs); \
-__asm__ volatile("addl %%edx, %%r8d\n\t":::"%edx","%r8",SSE_REGs); \
-__asm__ volatile("movl %r8d, "#h"\n\t");
+ ret = InitSha224(sha224);
+ if (ret != 0)
+ return ret;
-#endif
+ #ifdef WOLFSSL_SMALL_STACK_CACHE
+ sha224->W = NULL;
+ #endif
-#define RND_STEP_1(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("movl %"#e", %%edx\n\t":::"%edx",SSE_REGs);\
-__asm__ volatile("roll $26, %%edx\n\t":::"%edx",SSE_REGs); /* edx = e>>6 */\
-__asm__ volatile("movl %"#e", %%edi\n\t":::"%edi",SSE_REGs);\
-
-#define RND_STEP_2(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("roll $21, %%edi\n\t":::"%edi",SSE_REGs); /* edi = e>>11 */\
-__asm__ volatile("xorl %%edx, %%edi\n\t":::"%edx","%edi",SSE_REGs); /* edi = (e>>11) ^ (e>>6) */\
-__asm__ volatile("movl %"#e", %%edx\n\t":::"%edx",SSE_REGs); /* edx = e */\
-__asm__ volatile("roll $7, %%edx\n\t":::"%edx",SSE_REGs); /* edx = e>>25 */\
-
-#define RND_STEP_3(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("movl %"#f", %%esi\n\t":::"%esi",SSE_REGs); /* esi = f */\
-__asm__ volatile("xorl %"#g", %%esi\n\t":::"%esi",SSE_REGs); /* esi = f ^ g */\
-__asm__ volatile("xorl %%edi, %%edx\n\t":::"%edi","%edx",SSE_REGs); /* edx = Sigma1(e) */\
-__asm__ volatile("andl %"#e", %%esi\n\t":::"%esi",SSE_REGs); /* esi = (f ^ g) & e */\
-__asm__ volatile("xorl %"#g", %%esi\n\t":::"%esi",SSE_REGs); /* esi = Ch(e,f,g) */\
-
-#define RND_STEP_4(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("addl %0, %"#h"\n\t"::"r"(W_K[i]):SSE_REGs); /* h += w_k */\
-__asm__ volatile("addl %%edx, %"#h"\n\t":::"%edx",SSE_REGs); /* h = h + w_k + Sigma1(e) */\
-__asm__ volatile("movl %"#a", %%r8d\n\t":::"%r8",SSE_REGs); /* r8d = a */\
-__asm__ volatile("roll $30, %%r8d\n\t":::"%r8",SSE_REGs); /* r8d = a>>2 */\
-__asm__ volatile("movl %"#a", %%edi\n\t":::"%edi",SSE_REGs); /* edi = a */\
-__asm__ volatile("roll $19, %%edi\n\t":::"%edi",SSE_REGs); /* edi = a>>13 */\
-__asm__ volatile("movl %"#a", %%edx\n\t":::"%edx",SSE_REGs); /* edx = a */\
-
-#define RND_STEP_5(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("roll $10, %%edx\n\t":::"%edx",SSE_REGs); /* edx = a>>22 */\
-__asm__ volatile("xorl %%r8d, %%edi\n\t":::"%edi","%r8",SSE_REGs); /* edi = (a>>2) ^ (a>>13) */\
-__asm__ volatile("xorl %%edi, %%edx\n\t":::"%edi","%edx",SSE_REGs);/* edx = Sigma0(a) */\
-
-#define RND_STEP_6(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("movl %"#b", %%edi\n\t":::"%edi",SSE_REGs); /* edi = b */\
-__asm__ volatile("orl %"#a", %%edi\n\t":::"%edi",SSE_REGs); /* edi = a | b */\
-__asm__ volatile("andl %"#c", %%edi\n\t":::"%edi",SSE_REGs); /* edi = (a | b) & c */\
-__asm__ volatile("movl %"#b", %%r8d\n\t":::"%r8",SSE_REGs); /* r8d = b */\
-
-#define RND_STEP_7(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("addl %%esi, %"#h"\n\t":::"%esi",SSE_REGs); /* h += Ch(e,f,g) */\
-__asm__ volatile("andl %"#a", %%r8d\n\t":::"%r8",SSE_REGs); /* r8d = b & a */\
-__asm__ volatile("orl %%edi, %%r8d\n\t":::"%edi","%r8",SSE_REGs); /* r8d = Maj(a,b,c) */\
-
-#define RND_STEP_8(a,b,c,d,e,f,g,h,i)\
-__asm__ volatile("addl "#h", "#d"\n\t"); /* d += h + w_k + Sigma1(e) + Ch(e,f,g) */\
-__asm__ volatile("addl %"#h", %%r8d\n\t":::"%r8",SSE_REGs); \
- /* r8b = h + w_k + Sigma1(e) + Ch(e,f,g) + Maj(a,b,c) */\
-__asm__ volatile("addl %%edx, %%r8d\n\t":::"%edx","%r8",SSE_REGs);\
- /* r8b = h + w_k + Sigma1(e) Sigma0(a) + Ch(e,f,g) + Maj(a,b,c) */\
-__asm__ volatile("movl %%r8d, %"#h"\n\t":::"%r8", SSE_REGs); \
- /* h = h + w_k + Sigma1(e) + Sigma0(a) + Ch(e,f,g) + Maj(a,b,c) */ \
-
-#define RND_X(a,b,c,d,e,f,g,h,i) \
- RND_STEP_1(a,b,c,d,e,f,g,h,i); \
- RND_STEP_2(a,b,c,d,e,f,g,h,i); \
- RND_STEP_3(a,b,c,d,e,f,g,h,i); \
- RND_STEP_4(a,b,c,d,e,f,g,h,i); \
- RND_STEP_5(a,b,c,d,e,f,g,h,i); \
- RND_STEP_6(a,b,c,d,e,f,g,h,i); \
- RND_STEP_7(a,b,c,d,e,f,g,h,i); \
- RND_STEP_8(a,b,c,d,e,f,g,h,i);
-
-#define RND_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i);
-#define RND_7(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_7,S_0,S_1,S_2,S_3,S_4,S_5,S_6,_i);
-#define RND_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_6,S_7,S_0,S_1,S_2,S_3,S_4,S_5,_i);
-#define RND_5(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_5,S_6,S_7,S_0,S_1,S_2,S_3,S_4,_i);
-#define RND_4(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,_i);
-#define RND_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_3,S_4,S_5,S_6,S_7,S_0,S_1,S_2,_i);
-#define RND_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_2,S_3,S_4,S_5,S_6,S_7,S_0,S_1,_i);
-#define RND_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_1,S_2,S_3,S_4,S_5,S_6,S_7,S_0,_i);
-
-
-#define RND_1_3(a,b,c,d,e,f,g,h,i) {\
- RND_STEP_1(a,b,c,d,e,f,g,h,i); \
- RND_STEP_2(a,b,c,d,e,f,g,h,i); \
- RND_STEP_3(a,b,c,d,e,f,g,h,i); \
-}
+ #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA224)
+ ret = wolfAsync_DevCtxInit(&sha224->asyncDev,
+ WOLFSSL_ASYNC_MARKER_SHA224, sha224->heap, devId);
+ #else
+ (void)devId;
+ #endif /* WOLFSSL_ASYNC_CRYPT */
-#define RND_4_6(a,b,c,d,e,f,g,h,i) {\
- RND_STEP_4(a,b,c,d,e,f,g,h,i); \
- RND_STEP_5(a,b,c,d,e,f,g,h,i); \
- RND_STEP_6(a,b,c,d,e,f,g,h,i); \
-}
+ return ret;
+ }
-#define RND_7_8(a,b,c,d,e,f,g,h,i) {\
- RND_STEP_7(a,b,c,d,e,f,g,h,i); \
- RND_STEP_8(a,b,c,d,e,f,g,h,i); \
-}
+ int wc_Sha224Update(wc_Sha224* sha224, const byte* data, word32 len)
+ {
+ int ret;
-#define RND_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i);
-#define RND_7(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_7,S_0,S_1,S_2,S_3,S_4,S_5,S_6,_i);
-#define RND_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_6,S_7,S_0,S_1,S_2,S_3,S_4,S_5,_i);
-#define RND_5(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_5,S_6,S_7,S_0,S_1,S_2,S_3,S_4,_i);
-#define RND_4(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,_i);
-#define RND_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_3,S_4,S_5,S_6,S_7,S_0,S_1,S_2,_i);
-#define RND_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_2,S_3,S_4,S_5,S_6,S_7,S_0,S_1,_i);
-#define RND_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_X(S_1,S_2,S_3,S_4,S_5,S_6,S_7,S_0,_i);
-
-
-#define RND_0_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_1_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i);
-#define RND_7_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_1_3(S_7,S_0,S_1,S_2,S_3,S_4,S_5,S_6,_i);
-#define RND_6_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_1_3(S_6,S_7,S_0,S_1,S_2,S_3,S_4,S_5,_i);
-#define RND_5_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_1_3(S_5,S_6,S_7,S_0,S_1,S_2,S_3,S_4,_i);
-#define RND_4_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_1_3(S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,_i);
-#define RND_3_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_1_3(S_3,S_4,S_5,S_6,S_7,S_0,S_1,S_2,_i);
-#define RND_2_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_1_3(S_2,S_3,S_4,S_5,S_6,S_7,S_0,S_1,_i);
-#define RND_1_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_1_3(S_1,S_2,S_3,S_4,S_5,S_6,S_7,S_0,_i);
-
-#define RND_0_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_4_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i);
-#define RND_7_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_4_6(S_7,S_0,S_1,S_2,S_3,S_4,S_5,S_6,_i);
-#define RND_6_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_4_6(S_6,S_7,S_0,S_1,S_2,S_3,S_4,S_5,_i);
-#define RND_5_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_4_6(S_5,S_6,S_7,S_0,S_1,S_2,S_3,S_4,_i);
-#define RND_4_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_4_6(S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,_i);
-#define RND_3_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_4_6(S_3,S_4,S_5,S_6,S_7,S_0,S_1,S_2,_i);
-#define RND_2_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_4_6(S_2,S_3,S_4,S_5,S_6,S_7,S_0,S_1,_i);
-#define RND_1_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_4_6(S_1,S_2,S_3,S_4,S_5,S_6,S_7,S_0,_i);
-
-#define RND_0_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_7_8(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i);
-#define RND_7_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_7_8(S_7,S_0,S_1,S_2,S_3,S_4,S_5,S_6,_i);
-#define RND_6_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_7_8(S_6,S_7,S_0,S_1,S_2,S_3,S_4,S_5,_i);
-#define RND_5_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_7_8(S_5,S_6,S_7,S_0,S_1,S_2,S_3,S_4,_i);
-#define RND_4_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_7_8(S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,_i);
-#define RND_3_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_7_8(S_3,S_4,S_5,S_6,S_7,S_0,S_1,S_2,_i);
-#define RND_2_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_7_8(S_2,S_3,S_4,S_5,S_6,S_7,S_0,S_1,_i);
-#define RND_1_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,_i) RND_7_8(S_1,S_2,S_3,S_4,S_5,S_6,S_7,S_0,_i);
-
-#define FOR(cnt, init, max, inc, loop) \
- __asm__ volatile("movl $"#init", %0\n\t"#loop":"::"m"(cnt):)
-#define END(cnt, init, max, inc, loop) \
- __asm__ volatile("addl $"#inc", %0\n\tcmpl $"#max", %0\n\tjle "#loop"\n\t":"=m"(cnt)::) ;
-
-#endif /* defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2) */
-
-#if defined(HAVE_INTEL_AVX1) /* inline Assember for Intel AVX1 instructions */
-
-#define VPALIGNR(op1,op2,op3,op4) __asm__ volatile("vpalignr $"#op4", %"#op3", %"#op2", %"#op1:::XMM_REGs)
-#define VPADDD(op1,op2,op3) __asm__ volatile("vpaddd %"#op3", %"#op2", %"#op1:::XMM_REGs)
-#define VPSRLD(op1,op2,op3) __asm__ volatile("vpsrld $"#op3", %"#op2", %"#op1:::XMM_REGs)
-#define VPSRLQ(op1,op2,op3) __asm__ volatile("vpsrlq $"#op3", %"#op2", %"#op1:::XMM_REGs)
-#define VPSLLD(op1,op2,op3) __asm__ volatile("vpslld $"#op3", %"#op2", %"#op1:::XMM_REGs)
-#define VPOR(op1,op2,op3) __asm__ volatile("vpor %"#op3", %"#op2", %"#op1:::XMM_REGs)
-#define VPXOR(op1,op2,op3) __asm__ volatile("vpxor %"#op3", %"#op2", %"#op1:::XMM_REGs)
-#define VPSHUFD(op1,op2,op3) __asm__ volatile("vpshufd $"#op3", %"#op2", %"#op1:::XMM_REGs)
-#define VPSHUFB(op1,op2,op3) __asm__ volatile("vpshufb %"#op3", %"#op2", %"#op1:::XMM_REGs)
-
-#define MessageSched(X0, X1, X2, X3, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER, SHUF_00BA, SHUF_DC00,\
- a,b,c,d,e,f,g,h,_i)\
- RND_STEP_1(a,b,c,d,e,f,g,h,_i);\
- VPALIGNR (XTMP0, X3, X2, 4) ;\
- RND_STEP_2(a,b,c,d,e,f,g,h,_i);\
- VPADDD (XTMP0, XTMP0, X0) ;\
- RND_STEP_3(a,b,c,d,e,f,g,h,_i);\
- VPALIGNR (XTMP1, X1, X0, 4) ; /* XTMP1 = W[-15] */\
- RND_STEP_4(a,b,c,d,e,f,g,h,_i);\
- VPSRLD (XTMP2, XTMP1, 7) ;\
- RND_STEP_5(a,b,c,d,e,f,g,h,_i);\
- VPSLLD (XTMP3, XTMP1, 25) ; /* VPSLLD (XTMP3, XTMP1, (32-7)) */\
- RND_STEP_6(a,b,c,d,e,f,g,h,_i);\
- VPOR (XTMP3, XTMP3, XTMP2) ; /* XTMP1 = W[-15] MY_ROR 7 */\
- RND_STEP_7(a,b,c,d,e,f,g,h,_i);\
- VPSRLD (XTMP2, XTMP1,18) ;\
- RND_STEP_8(a,b,c,d,e,f,g,h,_i);\
-\
- RND_STEP_1(h,a,b,c,d,e,f,g,_i+1);\
- VPSRLD (XTMP4, XTMP1, 3) ; /* XTMP4 = W[-15] >> 3 */\
- RND_STEP_2(h,a,b,c,d,e,f,g,_i+1);\
- VPSLLD (XTMP1, XTMP1, 14) ; /* VPSLLD (XTMP1, XTMP1, (32-18)) */\
- RND_STEP_3(h,a,b,c,d,e,f,g,_i+1);\
- VPXOR (XTMP3, XTMP3, XTMP1) ;\
- RND_STEP_4(h,a,b,c,d,e,f,g,_i+1);\
- VPXOR (XTMP3, XTMP3, XTMP2) ; /* XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR 18 */\
- RND_STEP_5(h,a,b,c,d,e,f,g,_i+1);\
- VPXOR (XTMP1, XTMP3, XTMP4) ; /* XTMP1 = s0 */\
- RND_STEP_6(h,a,b,c,d,e,f,g,_i+1);\
- VPSHUFD(XTMP2, X3, 0b11111010) ; /* XTMP2 = W[-2] {BBAA}*/\
- RND_STEP_7(h,a,b,c,d,e,f,g,_i+1);\
- VPADDD (XTMP0, XTMP0, XTMP1) ; /* XTMP0 = W[-16] + W[-7] + s0 */\
- RND_STEP_8(h,a,b,c,d,e,f,g,_i+1);\
-\
- RND_STEP_1(g,h,a,b,c,d,e,f,_i+2);\
- VPSRLD (XTMP4, XTMP2, 10) ; /* XTMP4 = W[-2] >> 10 {BBAA} */\
- RND_STEP_2(g,h,a,b,c,d,e,f,_i+2);\
- VPSRLQ (XTMP3, XTMP2, 19) ; /* XTMP3 = W[-2] MY_ROR 19 {xBxA} */\
- RND_STEP_3(g,h,a,b,c,d,e,f,_i+2);\
- VPSRLQ (XTMP2, XTMP2, 17) ; /* XTMP2 = W[-2] MY_ROR 17 {xBxA} */\
- RND_STEP_4(g,h,a,b,c,d,e,f,_i+2);\
- VPXOR (XTMP2, XTMP2, XTMP3) ;\
- RND_STEP_5(g,h,a,b,c,d,e,f,_i+2);\
- VPXOR (XTMP4, XTMP4, XTMP2) ; /* XTMP4 = s1 {xBxA} */\
- RND_STEP_6(g,h,a,b,c,d,e,f,_i+2);\
- VPSHUFB (XTMP4, XTMP4, SHUF_00BA) ; /* XTMP4 = s1 {00BA} */\
- RND_STEP_7(g,h,a,b,c,d,e,f,_i+2);\
- VPADDD (XTMP0, XTMP0, XTMP4) ; /* XTMP0 = {..., ..., W[1], W[0]} */\
- RND_STEP_8(g,h,a,b,c,d,e,f,_i+2);\
-\
- RND_STEP_1(f,g,h,a,b,c,d,e,_i+3);\
- VPSHUFD (XTMP2, XTMP0, 0b01010000) ; /* XTMP2 = W[-2] {DDCC} */\
- RND_STEP_2(f,g,h,a,b,c,d,e,_i+3);\
- VPSRLD (XTMP5, XTMP2, 10); /* XTMP5 = W[-2] >> 10 {DDCC} */\
- RND_STEP_3(f,g,h,a,b,c,d,e,_i+3);\
- VPSRLQ (XTMP3, XTMP2, 19); /* XTMP3 = W[-2] MY_ROR 19 {xDxC} */\
- RND_STEP_4(f,g,h,a,b,c,d,e,_i+3);\
- VPSRLQ (XTMP2, XTMP2, 17) ; /* XTMP2 = W[-2] MY_ROR 17 {xDxC} */\
- RND_STEP_5(f,g,h,a,b,c,d,e,_i+3);\
- VPXOR (XTMP2, XTMP2, XTMP3) ;\
- RND_STEP_6(f,g,h,a,b,c,d,e,_i+3);\
- VPXOR (XTMP5, XTMP5, XTMP2) ; /* XTMP5 = s1 {xDxC} */\
- RND_STEP_7(f,g,h,a,b,c,d,e,_i+3);\
- VPSHUFB (XTMP5, XTMP5, SHUF_DC00) ; /* XTMP5 = s1 {DC00} */\
- RND_STEP_8(f,g,h,a,b,c,d,e,_i+3);\
- VPADDD (X0, XTMP5, XTMP0) ; /* X0 = {W[3], W[2], W[1], W[0]} */\
-
-#if defined(HAVE_INTEL_RORX)
-
-#define MessageSched_RORX(X0, X1, X2, X3, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, \
- XFER, SHUF_00BA, SHUF_DC00,a,b,c,d,e,f,g,h,_i)\
- RND_STEP_RORX_1(a,b,c,d,e,f,g,h,_i);\
- VPALIGNR (XTMP0, X3, X2, 4) ;\
- RND_STEP_RORX_2(a,b,c,d,e,f,g,h,_i);\
- VPADDD (XTMP0, XTMP0, X0) ;\
- RND_STEP_RORX_3(a,b,c,d,e,f,g,h,_i);\
- VPALIGNR (XTMP1, X1, X0, 4) ; /* XTMP1 = W[-15] */\
- RND_STEP_RORX_4(a,b,c,d,e,f,g,h,_i);\
- VPSRLD (XTMP2, XTMP1, 7) ;\
- RND_STEP_RORX_5(a,b,c,d,e,f,g,h,_i);\
- VPSLLD (XTMP3, XTMP1, 25) ; /* VPSLLD (XTMP3, XTMP1, (32-7)) */\
- RND_STEP_RORX_6(a,b,c,d,e,f,g,h,_i);\
- VPOR (XTMP3, XTMP3, XTMP2) ; /* XTMP1 = W[-15] MY_ROR 7 */\
- RND_STEP_RORX_7(a,b,c,d,e,f,g,h,_i);\
- VPSRLD (XTMP2, XTMP1,18) ;\
- RND_STEP_RORX_8(a,b,c,d,e,f,g,h,_i);\
-\
- RND_STEP_RORX_1(h,a,b,c,d,e,f,g,_i+1);\
- VPSRLD (XTMP4, XTMP1, 3) ; /* XTMP4 = W[-15] >> 3 */\
- RND_STEP_RORX_2(h,a,b,c,d,e,f,g,_i+1);\
- VPSLLD (XTMP1, XTMP1, 14) ; /* VPSLLD (XTMP1, XTMP1, (32-18)) */\
- RND_STEP_RORX_3(h,a,b,c,d,e,f,g,_i+1);\
- VPXOR (XTMP3, XTMP3, XTMP1) ;\
- RND_STEP_RORX_4(h,a,b,c,d,e,f,g,_i+1);\
- VPXOR (XTMP3, XTMP3, XTMP2) ; /* XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR 18 */\
- RND_STEP_RORX_5(h,a,b,c,d,e,f,g,_i+1);\
- VPXOR (XTMP1, XTMP3, XTMP4) ; /* XTMP1 = s0 */\
- RND_STEP_RORX_6(h,a,b,c,d,e,f,g,_i+1);\
- VPSHUFD(XTMP2, X3, 0b11111010) ; /* XTMP2 = W[-2] {BBAA}*/\
- RND_STEP_RORX_7(h,a,b,c,d,e,f,g,_i+1);\
- VPADDD (XTMP0, XTMP0, XTMP1) ; /* XTMP0 = W[-16] + W[-7] + s0 */\
- RND_STEP_RORX_8(h,a,b,c,d,e,f,g,_i+1);\
-\
- RND_STEP_RORX_1(g,h,a,b,c,d,e,f,_i+2);\
- VPSRLD (XTMP4, XTMP2, 10) ; /* XTMP4 = W[-2] >> 10 {BBAA} */\
- RND_STEP_RORX_2(g,h,a,b,c,d,e,f,_i+2);\
- VPSRLQ (XTMP3, XTMP2, 19) ; /* XTMP3 = W[-2] MY_ROR 19 {xBxA} */\
- RND_STEP_RORX_3(g,h,a,b,c,d,e,f,_i+2);\
- VPSRLQ (XTMP2, XTMP2, 17) ; /* XTMP2 = W[-2] MY_ROR 17 {xBxA} */\
- RND_STEP_RORX_4(g,h,a,b,c,d,e,f,_i+2);\
- VPXOR (XTMP2, XTMP2, XTMP3) ;\
- RND_STEP_RORX_5(g,h,a,b,c,d,e,f,_i+2);\
- VPXOR (XTMP4, XTMP4, XTMP2) ; /* XTMP4 = s1 {xBxA} */\
- RND_STEP_RORX_6(g,h,a,b,c,d,e,f,_i+2);\
- VPSHUFB (XTMP4, XTMP4, SHUF_00BA) ; /* XTMP4 = s1 {00BA} */\
- RND_STEP_RORX_7(g,h,a,b,c,d,e,f,_i+2);\
- VPADDD (XTMP0, XTMP0, XTMP4) ; /* XTMP0 = {..., ..., W[1], W[0]} */\
- RND_STEP_RORX_8(g,h,a,b,c,d,e,f,_i+2);\
-\
- RND_STEP_RORX_1(f,g,h,a,b,c,d,e,_i+3);\
- VPSHUFD (XTMP2, XTMP0, 0b01010000) ; /* XTMP2 = W[-2] {DDCC} */\
- RND_STEP_RORX_2(f,g,h,a,b,c,d,e,_i+3);\
- VPSRLD (XTMP5, XTMP2, 10); /* XTMP5 = W[-2] >> 10 {DDCC} */\
- RND_STEP_RORX_3(f,g,h,a,b,c,d,e,_i+3);\
- VPSRLQ (XTMP3, XTMP2, 19); /* XTMP3 = W[-2] MY_ROR 19 {xDxC} */\
- RND_STEP_RORX_4(f,g,h,a,b,c,d,e,_i+3);\
- VPSRLQ (XTMP2, XTMP2, 17) ; /* XTMP2 = W[-2] MY_ROR 17 {xDxC} */\
- RND_STEP_RORX_5(f,g,h,a,b,c,d,e,_i+3);\
- VPXOR (XTMP2, XTMP2, XTMP3) ;\
- RND_STEP_RORX_6(f,g,h,a,b,c,d,e,_i+3);\
- VPXOR (XTMP5, XTMP5, XTMP2) ; /* XTMP5 = s1 {xDxC} */\
- RND_STEP_RORX_7(f,g,h,a,b,c,d,e,_i+3);\
- VPSHUFB (XTMP5, XTMP5, SHUF_DC00) ; /* XTMP5 = s1 {DC00} */\
- RND_STEP_RORX_8(f,g,h,a,b,c,d,e,_i+3);\
- VPADDD (X0, XTMP5, XTMP0) ; /* X0 = {W[3], W[2], W[1], W[0]} */\
+ if (sha224 == NULL || (data == NULL && len > 0)) {
+ return BAD_FUNC_ARG;
+ }
-#endif
+ #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA224)
+ if (sha224->asyncDev.marker == WOLFSSL_ASYNC_MARKER_SHA224) {
+ #if defined(HAVE_INTEL_QA)
+ return IntelQaSymSha224(&sha224->asyncDev, NULL, data, len);
+ #endif
+ }
+ #endif /* WOLFSSL_ASYNC_CRYPT */
+ ret = Sha256Update((wc_Sha256*)sha224, data, len);
-#define W_K_from_buff\
- __asm__ volatile("vmovdqu %0, %%xmm4\n\t"\
- "vpshufb %%xmm13, %%xmm4, %%xmm4\n\t"\
- :: "m"(sha256->buffer[0]):"%xmm4") ;\
- __asm__ volatile("vmovdqu %0, %%xmm5\n\t"\
- "vpshufb %%xmm13, %%xmm5, %%xmm5\n\t"\
- ::"m"(sha256->buffer[4]):"%xmm5") ;\
- __asm__ volatile("vmovdqu %0, %%xmm6\n\t"\
- "vpshufb %%xmm13, %%xmm6, %%xmm6\n\t"\
- ::"m"(sha256->buffer[8]):"%xmm6") ;\
- __asm__ volatile("vmovdqu %0, %%xmm7\n\t"\
- "vpshufb %%xmm13, %%xmm7, %%xmm7\n\t"\
- ::"m"(sha256->buffer[12]):"%xmm7") ;\
-
-#define _SET_W_K_XFER(reg, i)\
- __asm__ volatile("vpaddd %0, %"#reg", %%xmm9"::"m"(K[i]):XMM_REGs) ;\
- __asm__ volatile("vmovdqa %%xmm9, %0":"=m"(W_K[i])::XMM_REGs) ;
-
-#define SET_W_K_XFER(reg, i) _SET_W_K_XFER(reg, i)
-
-static const ALIGN32 word64 mSHUF_00BA[] = { 0x0b0a090803020100, 0xFFFFFFFFFFFFFFFF } ; /* shuffle xBxA -> 00BA */
-static const ALIGN32 word64 mSHUF_DC00[] = { 0xFFFFFFFFFFFFFFFF, 0x0b0a090803020100 } ; /* shuffle xDxC -> DC00 */
-static const ALIGN32 word64 mBYTE_FLIP_MASK[] = { 0x0405060700010203, 0x0c0d0e0f08090a0b } ;
-
-
-#define _Init_Masks(mask1, mask2, mask3)\
-__asm__ volatile("vmovdqu %0, %"#mask1 ::"m"(mBYTE_FLIP_MASK[0])) ;\
-__asm__ volatile("vmovdqu %0, %"#mask2 ::"m"(mSHUF_00BA[0])) ;\
-__asm__ volatile("vmovdqu %0, %"#mask3 ::"m"(mSHUF_DC00[0])) ;
-
-#define Init_Masks(BYTE_FLIP_MASK, SHUF_00BA, SHUF_DC00)\
- _Init_Masks(BYTE_FLIP_MASK, SHUF_00BA, SHUF_DC00)
-
-#define X0 %xmm4
-#define X1 %xmm5
-#define X2 %xmm6
-#define X3 %xmm7
-#define X_ X0
-
-#define XTMP0 %xmm0
-#define XTMP1 %xmm1
-#define XTMP2 %xmm2
-#define XTMP3 %xmm3
-#define XTMP4 %xmm8
-#define XTMP5 %xmm9
-#define XFER %xmm10
-
-#define SHUF_00BA %xmm11 /* shuffle xBxA -> 00BA */
-#define SHUF_DC00 %xmm12 /* shuffle xDxC -> DC00 */
-#define BYTE_FLIP_MASK %xmm13
-
-#define XMM_REGs /* Registers are saved in Sha256Update/Finel */
- /*"xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13" */
-
-static int Transform_AVX1(Sha256* sha256)
-{
+ return ret;
+ }
- word32 W_K[64] ; /* temp for W+K */
+ int wc_Sha224Final(wc_Sha224* sha224, byte* hash)
+ {
+ int ret;
- #if defined(DEBUG_XMM)
- int i, j ;
- word32 xmm[29][4*15] ;
+ if (sha224 == NULL || hash == NULL) {
+ return BAD_FUNC_ARG;
+ }
+
+ #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA224)
+ if (sha224->asyncDev.marker == WOLFSSL_ASYNC_MARKER_SHA224) {
+ #if defined(HAVE_INTEL_QA)
+ return IntelQaSymSha224(&sha224->asyncDev, hash, NULL,
+ WC_SHA224_DIGEST_SIZE);
+ #endif
+ }
+ #endif /* WOLFSSL_ASYNC_CRYPT */
+
+ ret = Sha256Final((wc_Sha256*)sha224);
+ if (ret != 0)
+ return ret;
+
+ #if defined(LITTLE_ENDIAN_ORDER)
+ ByteReverseWords(sha224->digest, sha224->digest, WC_SHA224_DIGEST_SIZE);
#endif
+ XMEMCPY(hash, sha224->digest, WC_SHA224_DIGEST_SIZE);
- Init_Masks(BYTE_FLIP_MASK, SHUF_00BA, SHUF_DC00) ;
- W_K_from_buff ; /* X0, X1, X2, X3 = W[0..15] ; */
-
- DigestToReg(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7) ;
-
- SET_W_K_XFER(X0, 0) ;
- MessageSched(X0, X1, X2, X3, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,0) ;
- SET_W_K_XFER(X1, 4) ;
- MessageSched(X1, X2, X3, X0, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,4) ;
- SET_W_K_XFER(X2, 8) ;
- MessageSched(X2, X3, X0, X1, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,8) ;
- SET_W_K_XFER(X3, 12) ;
- MessageSched(X3, X0, X1, X2, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,12) ;
- SET_W_K_XFER(X0, 16) ;
- MessageSched(X0, X1, X2, X3, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,16) ;
- SET_W_K_XFER(X1, 20) ;
- MessageSched(X1, X2, X3, X0, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,20) ;
- SET_W_K_XFER(X2, 24) ;
- MessageSched(X2, X3, X0, X1, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,24) ;
- SET_W_K_XFER(X3, 28) ;
- MessageSched(X3, X0, X1, X2, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,28) ;
- SET_W_K_XFER(X0, 32) ;
- MessageSched(X0, X1, X2, X3, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,32) ;
- SET_W_K_XFER(X1, 36) ;
- MessageSched(X1, X2, X3, X0, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,36) ;
- SET_W_K_XFER(X2, 40) ;
- MessageSched(X2, X3, X0, X1, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,40) ;
- SET_W_K_XFER(X3, 44) ;
- MessageSched(X3, X0, X1, X2, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5, XFER,
- SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,44) ;
-
- SET_W_K_XFER(X0, 48) ;
- SET_W_K_XFER(X1, 52) ;
- SET_W_K_XFER(X2, 56) ;
- SET_W_K_XFER(X3, 60) ;
-
- RND_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,48) ;
- RND_7(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,49) ;
- RND_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,50) ;
- RND_5(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,51) ;
-
- RND_4(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,52) ;
- RND_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,53) ;
- RND_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,54) ;
- RND_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,55) ;
-
- RND_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,56) ;
- RND_7(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,57) ;
- RND_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,58) ;
- RND_5(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,59) ;
-
- RND_4(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,60) ;
- RND_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,61) ;
- RND_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,62) ;
- RND_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,63) ;
-
- RegToDigest(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7) ;
-
- #if defined(DEBUG_XMM)
- for(i=0; i<29; i++) {
- for(j=0; j<4*14; j+=4)
- printf("xmm%d[%d]=%08x,%08x,%08x,%08x\n", j/4, i,
- xmm[i][j],xmm[i][j+1],xmm[i][j+2],xmm[i][j+3]) ;
- printf("\n") ;
+ return InitSha224(sha224); /* reset state */
}
-
- for(i=0; i<64; i++)printf("W_K[%d]%08x\n", i, W_K[i]) ;
+#endif /* end of SHA224 software implementation */
+
+ int wc_InitSha224(wc_Sha224* sha224)
+ {
+ return wc_InitSha224_ex(sha224, NULL, INVALID_DEVID);
+ }
+
+ void wc_Sha224Free(wc_Sha224* sha224)
+ {
+ if (sha224 == NULL)
+ return;
+
+#ifdef WOLFSSL_SMALL_STACK_CACHE
+ if (sha224->W != NULL) {
+ XFREE(sha224->W, NULL, DYNAMIC_TYPE_DIGEST);
+ sha224->W = NULL;
+ }
+#endif
+
+ #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA224)
+ wolfAsync_DevCtxFree(&sha224->asyncDev, WOLFSSL_ASYNC_MARKER_SHA224);
+ #endif /* WOLFSSL_ASYNC_CRYPT */
+
+ #ifdef WOLFSSL_PIC32MZ_HASH
+ wc_Sha256Pic32Free(sha224);
#endif
+ }
+#endif /* WOLFSSL_SHA224 */
- return 0;
+
+int wc_InitSha256(wc_Sha256* sha256)
+{
+ return wc_InitSha256_ex(sha256, NULL, INVALID_DEVID);
}
-#if defined(HAVE_INTEL_RORX)
-static int Transform_AVX1_RORX(Sha256* sha256)
+void wc_Sha256Free(wc_Sha256* sha256)
{
+ if (sha256 == NULL)
+ return;
- word32 W_K[64] ; /* temp for W+K */
+#ifdef WOLFSSL_SMALL_STACK_CACHE
+ if (sha256->W != NULL) {
+ XFREE(sha256->W, NULL, DYNAMIC_TYPE_DIGEST);
+ sha256->W = NULL;
+ }
+#endif
- #if defined(DEBUG_XMM)
- int i, j ;
- word32 xmm[29][4*15] ;
- #endif
+#if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_SHA256)
+ wolfAsync_DevCtxFree(&sha256->asyncDev, WOLFSSL_ASYNC_MARKER_SHA256);
+#endif /* WOLFSSL_ASYNC_CRYPT */
+#ifdef WOLFSSL_PIC32MZ_HASH
+ wc_Sha256Pic32Free(sha256);
+#endif
+#if defined(WOLFSSL_AFALG_HASH)
+ if (sha256->alFd > 0) {
+ close(sha256->alFd);
+ sha256->alFd = -1; /* avoid possible double close on socket */
+ }
+ if (sha256->rdFd > 0) {
+ close(sha256->rdFd);
+ sha256->rdFd = -1; /* avoid possible double close on socket */
+ }
+#endif /* WOLFSSL_AFALG_HASH */
+#ifdef WOLFSSL_DEVCRYPTO_HASH
+ wc_DevCryptoFree(&sha256->ctx);
+#endif /* WOLFSSL_DEVCRYPTO */
+#if (defined(WOLFSSL_AFALG_HASH) && defined(WOLFSSL_AFALG_HASH_KEEP)) || \
+ (defined(WOLFSSL_DEVCRYPTO_HASH) && defined(WOLFSSL_DEVCRYPTO_HASH_KEEP)) || \
+ (defined(WOLFSSL_RENESAS_TSIP_CRYPT) && \
+ !defined(NO_WOLFSSL_RENESAS_TSIP_CRYPT_HASH))
+ if (sha256->msg != NULL) {
+ XFREE(sha256->msg, sha256->heap, DYNAMIC_TYPE_TMP_BUFFER);
+ sha256->msg = NULL;
+ }
+#endif
+}
+
+#endif /* !WOLFSSL_TI_HASH */
+#endif /* HAVE_FIPS */
+
+
+#ifndef WOLFSSL_TI_HASH
+#ifdef WOLFSSL_SHA224
+ int wc_Sha224GetHash(wc_Sha224* sha224, byte* hash)
+ {
+ int ret;
+ wc_Sha224 tmpSha224;
- Init_Masks(BYTE_FLIP_MASK, SHUF_00BA, SHUF_DC00) ;
- W_K_from_buff ; /* X0, X1, X2, X3 = W[0..15] ; */
-
- DigestToReg(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7) ;
- SET_W_K_XFER(X0, 0) ;
- MessageSched_RORX(X0, X1, X2, X3, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,0) ;
- SET_W_K_XFER(X1, 4) ;
- MessageSched_RORX(X1, X2, X3, X0, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,4) ;
- SET_W_K_XFER(X2, 8) ;
- MessageSched_RORX(X2, X3, X0, X1, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,8) ;
- SET_W_K_XFER(X3, 12) ;
- MessageSched_RORX(X3, X0, X1, X2, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,12) ;
- SET_W_K_XFER(X0, 16) ;
- MessageSched_RORX(X0, X1, X2, X3, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,16) ;
- SET_W_K_XFER(X1, 20) ;
- MessageSched_RORX(X1, X2, X3, X0, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,20) ;
- SET_W_K_XFER(X2, 24) ;
- MessageSched_RORX(X2, X3, X0, X1, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,24) ;
- SET_W_K_XFER(X3, 28) ;
- MessageSched_RORX(X3, X0, X1, X2, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,28) ;
- SET_W_K_XFER(X0, 32) ;
- MessageSched_RORX(X0, X1, X2, X3, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,32) ;
- SET_W_K_XFER(X1, 36) ;
- MessageSched_RORX(X1, X2, X3, X0, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,36) ;
- SET_W_K_XFER(X2, 40) ;
- MessageSched_RORX(X2, X3, X0, X1, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,40) ;
- SET_W_K_XFER(X3, 44) ;
- MessageSched_RORX(X3, X0, X1, X2, XTMP0, XTMP1, XTMP2, XTMP3, XTMP4, XTMP5,
- XFER, SHUF_00BA, SHUF_DC00, S_4,S_5,S_6,S_7,S_0,S_1,S_2,S_3,44) ;
-
- SET_W_K_XFER(X0, 48) ;
- SET_W_K_XFER(X1, 52) ;
- SET_W_K_XFER(X2, 56) ;
- SET_W_K_XFER(X3, 60) ;
-
- RND_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,48) ;
- RND_7(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,49) ;
- RND_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,50) ;
- RND_5(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,51) ;
-
- RND_4(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,52) ;
- RND_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,53) ;
- RND_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,54) ;
- RND_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,55) ;
-
- RND_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,56) ;
- RND_7(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,57) ;
- RND_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,58) ;
- RND_5(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,59) ;
-
- RND_4(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,60) ;
- RND_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,61) ;
- RND_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,62) ;
- RND_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,63) ;
-
- RegToDigest(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7) ;
-
- #if defined(DEBUG_XMM)
- for(i=0; i<29; i++) {
- for(j=0; j<4*14; j+=4)
- printf("xmm%d[%d]=%08x,%08x,%08x,%08x\n", j/4, i,
- xmm[i][j],xmm[i][j+1],xmm[i][j+2],xmm[i][j+3]) ;
- printf("\n") ;
+ if (sha224 == NULL || hash == NULL)
+ return BAD_FUNC_ARG;
+
+ ret = wc_Sha224Copy(sha224, &tmpSha224);
+ if (ret == 0) {
+ ret = wc_Sha224Final(&tmpSha224, hash);
+ wc_Sha224Free(&tmpSha224);
+ }
+ return ret;
}
-
- for(i=0; i<64; i++)printf("W_K[%d]%08x\n", i, W_K[i]) ;
+ int wc_Sha224Copy(wc_Sha224* src, wc_Sha224* dst)
+ {
+ int ret = 0;
+
+ if (src == NULL || dst == NULL)
+ return BAD_FUNC_ARG;
+
+ XMEMCPY(dst, src, sizeof(wc_Sha224));
+ #ifdef WOLFSSL_SMALL_STACK_CACHE
+ dst->W = NULL;
#endif
- return 0;
-}
-#endif /* HAVE_INTEL_RORX */
+ #ifdef WOLFSSL_ASYNC_CRYPT
+ ret = wolfAsync_DevCopy(&src->asyncDev, &dst->asyncDev);
+ #endif
+ #if defined(WOLFSSL_HASH_FLAGS) || defined(WOLF_CRYPTO_CB)
+ dst->flags |= WC_HASH_FLAG_ISCOPY;
+ #endif
+
+ return ret;
+ }
-#endif /* HAVE_INTEL_AVX1 */
+#if defined(WOLFSSL_HASH_FLAGS) || defined(WOLF_CRYPTO_CB)
+ int wc_Sha224SetFlags(wc_Sha224* sha224, word32 flags)
+ {
+ if (sha224) {
+ sha224->flags = flags;
+ }
+ return 0;
+ }
+ int wc_Sha224GetFlags(wc_Sha224* sha224, word32* flags)
+ {
+ if (sha224 && flags) {
+ *flags = sha224->flags;
+ }
+ return 0;
+ }
+#endif
+#endif /* WOLFSSL_SHA224 */
-#if defined(HAVE_INTEL_AVX2)
+#ifdef WOLFSSL_AFALG_HASH
+ /* implemented in wolfcrypt/src/port/af_alg/afalg_hash.c */
-#define _MOVE_to_REG(ymm, mem) __asm__ volatile("vmovdqu %0, %%"#ymm" ":: "m"(mem):YMM_REGs) ;
-#define _MOVE_to_MEM(mem, ymm) __asm__ volatile("vmovdqu %%"#ymm", %0" : "=m"(mem)::YMM_REGs) ;
-#define _BYTE_SWAP(ymm, map) __asm__ volatile("vpshufb %0, %%"#ymm", %%"#ymm"\n\t"\
- :: "m"(map):YMM_REGs) ;
-#define _MOVE_128(ymm0, ymm1, ymm2, map) __asm__ volatile("vperm2i128 $"#map", %%"\
- #ymm2", %%"#ymm1", %%"#ymm0" ":::YMM_REGs) ;
-#define _MOVE_BYTE(ymm0, ymm1, map) __asm__ volatile("vpshufb %0, %%"#ymm1", %%"\
- #ymm0"\n\t":: "m"(map):YMM_REGs) ;
-#define _S_TEMP(dest, src, bits, temp) __asm__ volatile("vpsrld $"#bits", %%"\
- #src", %%"#dest"\n\tvpslld $32-"#bits", %%"#src", %%"#temp"\n\tvpor %%"\
- #temp",%%"#dest", %%"#dest" ":::YMM_REGs) ;
-#define _AVX2_R(dest, src, bits) __asm__ volatile("vpsrld $"#bits", %%"\
- #src", %%"#dest" ":::YMM_REGs) ;
-#define _XOR(dest, src1, src2) __asm__ volatile("vpxor %%"#src1", %%"\
- #src2", %%"#dest" ":::YMM_REGs) ;
-#define _OR(dest, src1, src2) __asm__ volatile("vpor %%"#src1", %%"\
- #src2", %%"#dest" ":::YMM_REGs) ;
-#define _ADD(dest, src1, src2) __asm__ volatile("vpaddd %%"#src1", %%"\
- #src2", %%"#dest" ":::YMM_REGs) ;
-#define _ADD_MEM(dest, src1, mem) __asm__ volatile("vpaddd %0, %%"#src1", %%"\
- #dest" "::"m"(mem):YMM_REGs) ;
-#define _BLEND(map, dest, src1, src2) __asm__ volatile("vpblendd $"#map", %%"\
- #src1", %%"#src2", %%"#dest" ":::YMM_REGs) ;
-
-#define _EXTRACT_XMM_0(xmm, mem) __asm__ volatile("vpextrd $0, %%"#xmm", %0 ":"=r"(mem)::YMM_REGs) ;
-#define _EXTRACT_XMM_1(xmm, mem) __asm__ volatile("vpextrd $1, %%"#xmm", %0 ":"=r"(mem)::YMM_REGs) ;
-#define _EXTRACT_XMM_2(xmm, mem) __asm__ volatile("vpextrd $2, %%"#xmm", %0 ":"=r"(mem)::YMM_REGs) ;
-#define _EXTRACT_XMM_3(xmm, mem) __asm__ volatile("vpextrd $3, %%"#xmm", %0 ":"=r"(mem)::YMM_REGs) ;
-#define _EXTRACT_XMM_4(ymm, xmm, mem)\
- __asm__ volatile("vperm2i128 $0x1, %%"#ymm", %%"#ymm", %%"#ymm" ":::YMM_REGs) ;\
- __asm__ volatile("vpextrd $0, %%"#xmm", %0 ":"=r"(mem)::YMM_REGs) ;
-#define _EXTRACT_XMM_5(xmm, mem) __asm__ volatile("vpextrd $1, %%"#xmm", %0 ":"=r"(mem)::YMM_REGs) ;
-#define _EXTRACT_XMM_6(xmm, mem) __asm__ volatile("vpextrd $2, %%"#xmm", %0 ":"=r"(mem)::YMM_REGs) ;
-#define _EXTRACT_XMM_7(xmm, mem) __asm__ volatile("vpextrd $3, %%"#xmm", %0 ":"=r"(mem)::YMM_REGs) ;
-
-#define _SWAP_YMM_HL(ymm) __asm__ volatile("vperm2i128 $0x1, %%"#ymm", %%"#ymm", %%"#ymm" ":::YMM_REGs) ;
-#define SWAP_YMM_HL(ymm) _SWAP_YMM_HL(ymm)
-
-#define MOVE_to_REG(ymm, mem) _MOVE_to_REG(ymm, mem)
-#define MOVE_to_MEM(mem, ymm) _MOVE_to_MEM(mem, ymm)
-#define BYTE_SWAP(ymm, map) _BYTE_SWAP(ymm, map)
-#define MOVE_128(ymm0, ymm1, ymm2, map) _MOVE_128(ymm0, ymm1, ymm2, map)
-#define MOVE_BYTE(ymm0, ymm1, map) _MOVE_BYTE(ymm0, ymm1, map)
-#define XOR(dest, src1, src2) _XOR(dest, src1, src2)
-#define OR(dest, src1, src2) _OR(dest, src1, src2)
-#define ADD(dest, src1, src2) _ADD(dest, src1, src2)
-#define ADD_MEM(dest, src1, mem) _ADD_MEM(dest, src1, mem)
-#define BLEND(map, dest, src1, src2) _BLEND(map, dest, src1, src2)
-
-#define S_TMP(dest, src, bits, temp) _S_TEMP(dest, src, bits, temp);
-#define AVX2_S(dest, src, bits) S_TMP(dest, src, bits, S_TEMP)
-#define AVX2_R(dest, src, bits) _AVX2_R(dest, src, bits)
-
-#define GAMMA0(dest, src) AVX2_S(dest, src, 7); AVX2_S(G_TEMP, src, 18); \
- XOR(dest, G_TEMP, dest) ; AVX2_R(G_TEMP, src, 3); XOR(dest, G_TEMP, dest) ;
-#define GAMMA0_1(dest, src) AVX2_S(dest, src, 7); AVX2_S(G_TEMP, src, 18);
-#define GAMMA0_2(dest, src) XOR(dest, G_TEMP, dest) ; AVX2_R(G_TEMP, src, 3); \
- XOR(dest, G_TEMP, dest) ;
-
-#define GAMMA1(dest, src) AVX2_S(dest, src, 17); AVX2_S(G_TEMP, src, 19); \
- XOR(dest, G_TEMP, dest) ; AVX2_R(G_TEMP, src, 10); XOR(dest, G_TEMP, dest) ;
-#define GAMMA1_1(dest, src) AVX2_S(dest, src, 17); AVX2_S(G_TEMP, src, 19);
-#define GAMMA1_2(dest, src) XOR(dest, G_TEMP, dest) ; AVX2_R(G_TEMP, src, 10); \
- XOR(dest, G_TEMP, dest) ;
-
-#define FEEDBACK1_to_W_I_2 MOVE_BYTE(YMM_TEMP0, W_I, mMAP1toW_I_2[0]) ; \
- BLEND(0x0c, W_I_2, YMM_TEMP0, W_I_2) ;
-#define FEEDBACK2_to_W_I_2 MOVE_128(YMM_TEMP0, W_I, W_I, 0x08) ; \
- MOVE_BYTE(YMM_TEMP0, YMM_TEMP0, mMAP2toW_I_2[0]) ; BLEND(0x30, W_I_2, YMM_TEMP0, W_I_2) ;
-#define FEEDBACK3_to_W_I_2 MOVE_BYTE(YMM_TEMP0, W_I, mMAP3toW_I_2[0]) ; \
- BLEND(0xc0, W_I_2, YMM_TEMP0, W_I_2) ;
-
-#define FEEDBACK_to_W_I_7 MOVE_128(YMM_TEMP0, W_I, W_I, 0x08) ;\
- MOVE_BYTE(YMM_TEMP0, YMM_TEMP0, mMAPtoW_I_7[0]) ; BLEND(0x80, W_I_7, YMM_TEMP0, W_I_7) ;
-
-#undef voitle
-
-#define W_I_16 ymm8
-#define W_I_15 ymm9
-#define W_I_7 ymm10
-#define W_I_2 ymm11
-#define W_I ymm12
-#define G_TEMP ymm13
-#define S_TEMP ymm14
-#define YMM_TEMP0 ymm15
-#define YMM_TEMP0x xmm15
-#define W_I_TEMP ymm7
-#define W_K_TEMP ymm15
-#define W_K_TEMPx xmm15
-
-#define YMM_REGs /* Registers are saved in Sha256Update/Finel */
- /* "%ymm7","%ymm8","%ymm9","%ymm10","%ymm11","%ymm12","%ymm13","%ymm14","%ymm15"*/
-
-
-#define MOVE_15_to_16(w_i_16, w_i_15, w_i_7)\
- __asm__ volatile("vperm2i128 $0x01, %%"#w_i_15", %%"#w_i_15", %%"#w_i_15" ":::YMM_REGs) ;\
- __asm__ volatile("vpblendd $0x08, %%"#w_i_15", %%"#w_i_7", %%"#w_i_16" ":::YMM_REGs) ;\
- __asm__ volatile("vperm2i128 $0x01, %%"#w_i_7", %%"#w_i_7", %%"#w_i_15" ":::YMM_REGs) ;\
- __asm__ volatile("vpblendd $0x80, %%"#w_i_15", %%"#w_i_16", %%"#w_i_16" ":::YMM_REGs) ;\
- __asm__ volatile("vpshufd $0x93, %%"#w_i_16", %%"#w_i_16" ":::YMM_REGs) ;\
-
-#define MOVE_7_to_15(w_i_15, w_i_7)\
- __asm__ volatile("vmovdqu %%"#w_i_7", %%"#w_i_15" ":::YMM_REGs) ;\
-
-#define MOVE_I_to_7(w_i_7, w_i)\
- __asm__ volatile("vperm2i128 $0x01, %%"#w_i", %%"#w_i", %%"#w_i_7" ":::YMM_REGs) ;\
- __asm__ volatile("vpblendd $0x01, %%"#w_i_7", %%"#w_i", %%"#w_i_7" ":::YMM_REGs) ;\
- __asm__ volatile("vpshufd $0x39, %%"#w_i_7", %%"#w_i_7" ":::YMM_REGs) ;\
-
-#define MOVE_I_to_2(w_i_2, w_i)\
- __asm__ volatile("vperm2i128 $0x01, %%"#w_i", %%"#w_i", %%"#w_i_2" ":::YMM_REGs) ;\
- __asm__ volatile("vpshufd $0x0e, %%"#w_i_2", %%"#w_i_2" ":::YMM_REGs) ;\
-
-#define ROTATE_W(w_i_16, w_i_15, w_i_7, w_i_2, w_i)\
- MOVE_15_to_16(w_i_16, w_i_15, w_i_7) ; \
- MOVE_7_to_15(w_i_15, w_i_7) ; \
- MOVE_I_to_7(w_i_7, w_i) ; \
- MOVE_I_to_2(w_i_2, w_i) ;\
-
-#define _RegToDigest(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- { word32 d ;\
- __asm__ volatile("movl %"#S_0", %0":"=r"(d)::SSE_REGs) ;\
- sha256->digest[0] += d;\
- __asm__ volatile("movl %"#S_1", %0":"=r"(d)::SSE_REGs) ;\
- sha256->digest[1] += d;\
- __asm__ volatile("movl %"#S_2", %0":"=r"(d)::SSE_REGs) ;\
- sha256->digest[2] += d;\
- __asm__ volatile("movl %"#S_3", %0":"=r"(d)::SSE_REGs) ;\
- sha256->digest[3] += d;\
- __asm__ volatile("movl %"#S_4", %0":"=r"(d)::SSE_REGs) ;\
- sha256->digest[4] += d;\
- __asm__ volatile("movl %"#S_5", %0":"=r"(d)::SSE_REGs) ;\
- sha256->digest[5] += d;\
- __asm__ volatile("movl %"#S_6", %0":"=r"(d)::SSE_REGs) ;\
- sha256->digest[6] += d;\
- __asm__ volatile("movl %"#S_7", %0":"=r"(d)::SSE_REGs) ;\
- sha256->digest[7] += d;\
-}
+#elif defined(WOLFSSL_DEVCRYPTO_HASH)
+ /* implemented in wolfcrypt/src/port/devcrypto/devcrypt_hash.c */
-#define _DumpS(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- { word32 d[8] ;\
- __asm__ volatile("movl %"#S_0", %0":"=r"(d[0])::SSE_REGs) ;\
- __asm__ volatile("movl %"#S_1", %0":"=r"(d[1])::SSE_REGs) ;\
- __asm__ volatile("movl %"#S_2", %0":"=r"(d[2])::SSE_REGs) ;\
- __asm__ volatile("movl %"#S_3", %0":"=r"(d[3])::SSE_REGs) ;\
- __asm__ volatile("movl %"#S_4", %0":"=r"(d[4])::SSE_REGs) ;\
- __asm__ volatile("movl %"#S_5", %0":"=r"(d[5])::SSE_REGs) ;\
- __asm__ volatile("movl %"#S_6", %0":"=r"(d[6])::SSE_REGs) ;\
- __asm__ volatile("movl %"#S_7", %0":"=r"(d[7])::SSE_REGs) ;\
- printf("S[0..7]=%08x,%08x,%08x,%08x,%08x,%08x,%08x,%08x\n", d[0],d[1],d[2],d[3],d[4],d[5],d[6],d[7]);\
- __asm__ volatile("movl %0, %"#S_0::"r"(d[0]):SSE_REGs) ;\
- __asm__ volatile("movl %0, %"#S_1::"r"(d[1]):SSE_REGs) ;\
- __asm__ volatile("movl %0, %"#S_2::"r"(d[2]):SSE_REGs) ;\
- __asm__ volatile("movl %0, %"#S_3::"r"(d[3]):SSE_REGs) ;\
- __asm__ volatile("movl %0, %"#S_4::"r"(d[4]):SSE_REGs) ;\
- __asm__ volatile("movl %0, %"#S_5::"r"(d[5]):SSE_REGs) ;\
- __asm__ volatile("movl %0, %"#S_6::"r"(d[6]):SSE_REGs) ;\
- __asm__ volatile("movl %0, %"#S_7::"r"(d[7]):SSE_REGs) ;\
-}
+#elif defined(WOLFSSL_RENESAS_TSIP_CRYPT) && \
+ !defined(NO_WOLFSSL_RENESAS_TSIP_CRYPT_HASH)
+ /* implemented in wolfcrypt/src/port/Renesas/renesas_tsip_sha.c */
+#else
-#define DigestToReg(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- _DigestToReg(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )
-
-#define RegToDigest(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- _RegToDigest(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )
-
-#define DumS(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )\
- _DumpS(S_0, S_1, S_2, S_3, S_4, S_5, S_6, S_7 )
-
-
- /* Byte swap Masks to ensure that rest of the words are filled with zero's. */
- static const unsigned long mBYTE_FLIP_MASK_16[] =
- { 0x0405060700010203, 0x0c0d0e0f08090a0b, 0x0405060700010203, 0x0c0d0e0f08090a0b } ;
- static const unsigned long mBYTE_FLIP_MASK_15[] =
- { 0x0405060700010203, 0x0c0d0e0f08090a0b, 0x0405060700010203, 0x0c0d0e0f08090a0b } ;
- static const unsigned long mBYTE_FLIP_MASK_7 [] =
- { 0x0405060700010203, 0x0c0d0e0f08090a0b, 0x0405060700010203, 0x8080808008090a0b } ;
- static const unsigned long mBYTE_FLIP_MASK_2 [] =
- { 0x0405060700010203, 0x8080808080808080, 0x8080808080808080, 0x8080808080808080 } ;
-
- static const unsigned long mMAPtoW_I_7[] =
- { 0x8080808080808080, 0x8080808080808080, 0x8080808080808080, 0x0302010080808080 } ;
- static const unsigned long mMAP1toW_I_2[] =
- { 0x8080808080808080, 0x0706050403020100, 0x8080808080808080, 0x8080808080808080 } ;
- static const unsigned long mMAP2toW_I_2[] =
- { 0x8080808080808080, 0x8080808080808080, 0x0f0e0d0c0b0a0908, 0x8080808080808080 } ;
- static const unsigned long mMAP3toW_I_2[] =
- { 0x8080808080808080, 0x8080808080808080, 0x8080808080808080, 0x0706050403020100 } ;
-
-static int Transform_AVX2(Sha256* sha256)
+int wc_Sha256GetHash(wc_Sha256* sha256, byte* hash)
{
+ int ret;
+ wc_Sha256 tmpSha256;
- #ifdef WOLFSSL_SMALL_STACK
- word32* W_K;
- W_K = (word32*) XMALLOC(sizeof(word32) * 64, NULL, DYNAMIC_TYPE_TMP_BUFFER);
- if (W_K == NULL)
- return MEMORY_E;
- #else
- word32 W_K[64] ;
- #endif
+ if (sha256 == NULL || hash == NULL)
+ return BAD_FUNC_ARG;
- MOVE_to_REG(W_I_16, sha256->buffer[0]); BYTE_SWAP(W_I_16, mBYTE_FLIP_MASK_16[0]) ;
- MOVE_to_REG(W_I_15, sha256->buffer[1]); BYTE_SWAP(W_I_15, mBYTE_FLIP_MASK_15[0]) ;
- MOVE_to_REG(W_I, sha256->buffer[8]) ; BYTE_SWAP(W_I, mBYTE_FLIP_MASK_16[0]) ;
- MOVE_to_REG(W_I_7, sha256->buffer[16-7]) ; BYTE_SWAP(W_I_7, mBYTE_FLIP_MASK_7[0]) ;
- MOVE_to_REG(W_I_2, sha256->buffer[16-2]) ; BYTE_SWAP(W_I_2, mBYTE_FLIP_MASK_2[0]) ;
-
- DigestToReg(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7) ;
-
- ADD_MEM(W_K_TEMP, W_I_16, K[0]) ;
- MOVE_to_MEM(W_K[0], W_K_TEMP) ;
-
- RND_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,0) ;
- RND_7(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,1) ;
- RND_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,2) ;
- RND_5(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,3) ;
- RND_4(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,4) ;
- RND_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,5) ;
- RND_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,6) ;
- RND_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,7) ;
-
- ADD_MEM(YMM_TEMP0, W_I, K[8]) ;
- MOVE_to_MEM(W_K[8], YMM_TEMP0) ;
-
- /* W[i] = Gamma1(W[i-2]) + W[i-7] + Gamma0(W[i-15] + W[i-16]) */
- RND_0_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,8) ;
- GAMMA0_1(W_I_TEMP, W_I_15) ;
- RND_0_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,8) ;
- GAMMA0_2(W_I_TEMP, W_I_15) ;
- RND_0_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,8) ;
- ADD(W_I_TEMP, W_I_16, W_I_TEMP) ;/* for saving W_I before adding incomplete W_I_7 */
- RND_7_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,9) ;
- ADD(W_I, W_I_7, W_I_TEMP);
- RND_7_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,9) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_7_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,9) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_6_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,10) ;
- ADD(W_I, W_I, YMM_TEMP0) ;/* now W[16..17] are completed */
- RND_6_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,10) ;
- FEEDBACK1_to_W_I_2 ;
- RND_6_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,10) ;
- FEEDBACK_to_W_I_7 ;
- RND_5_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,11) ;
- ADD(W_I_TEMP, W_I_7, W_I_TEMP);
- RND_5_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,11) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_5_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,11) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_4_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,12) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ;/* now W[16..19] are completed */
- RND_4_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,12) ;
- FEEDBACK2_to_W_I_2 ;
- RND_4_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,12) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_3_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,13) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_3_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,13) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..21] are completed */
- RND_3_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,13) ;
- FEEDBACK3_to_W_I_2 ;
- RND_2_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,14) ;
- GAMMA1(YMM_TEMP0, W_I_2) ;
- RND_2_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,14) ;
- RND_2_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,14) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..23] are completed */
- RND_1_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,15) ;
-
- MOVE_to_REG(YMM_TEMP0, K[16]) ;
- RND_1_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,15) ;
- ROTATE_W(W_I_16, W_I_15, W_I_7, W_I_2, W_I) ;
- RND_1_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,15) ;
- ADD(YMM_TEMP0, YMM_TEMP0, W_I) ;
- MOVE_to_MEM(W_K[16], YMM_TEMP0) ;
-
- /* W[i] = Gamma1(W[i-2]) + W[i-7] + Gamma0(W[i-15] + W[i-16]) */
- RND_0_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,16) ;
- GAMMA0_1(W_I_TEMP, W_I_15) ;
- RND_0_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,16) ;
- GAMMA0_2(W_I_TEMP, W_I_15) ;
- RND_0_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,16) ;
- ADD(W_I_TEMP, W_I_16, W_I_TEMP) ;/* for saving W_I before adding incomplete W_I_7 */
- RND_7_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,17) ;
- ADD(W_I, W_I_7, W_I_TEMP);
- RND_7_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,17) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_7_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,17) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_6_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,18) ;
- ADD(W_I, W_I, YMM_TEMP0) ;/* now W[16..17] are completed */
- RND_6_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,18) ;
- FEEDBACK1_to_W_I_2 ;
- RND_6_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,18) ;
- FEEDBACK_to_W_I_7 ;
- RND_5_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,19) ;
- ADD(W_I_TEMP, W_I_7, W_I_TEMP);
- RND_5_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,19) ;
- GAMMA1(YMM_TEMP0, W_I_2) ;
- RND_5_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,19) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_4_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,20) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ;/* now W[16..19] are completed */
- RND_4_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,20) ;
- FEEDBACK2_to_W_I_2 ;
- RND_4_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,20) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_3_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,21) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_3_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,21) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..21] are completed */
- RND_3_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,21) ;
- FEEDBACK3_to_W_I_2 ;
- RND_2_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,22) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_2_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,22) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_2_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,22) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..23] are completed */
- RND_1_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,23) ;
-
- MOVE_to_REG(YMM_TEMP0, K[24]) ;
- RND_1_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,23) ;
- ROTATE_W(W_I_16, W_I_15, W_I_7, W_I_2, W_I) ;
- RND_1_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,23) ;
- ADD(YMM_TEMP0, YMM_TEMP0, W_I) ;
- MOVE_to_MEM(W_K[24], YMM_TEMP0) ;
-
- /* W[i] = Gamma1(W[i-2]) + W[i-7] + Gamma0(W[i-15] + W[i-16]) */
- RND_0_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,24) ;
- GAMMA0_1(W_I_TEMP, W_I_15) ;
- RND_0_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,24) ;
- GAMMA0_2(W_I_TEMP, W_I_15) ;
- RND_0_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,24) ;
- ADD(W_I_TEMP, W_I_16, W_I_TEMP) ;/* for saving W_I before adding incomplete W_I_7 */
- RND_7_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,25) ;
- ADD(W_I, W_I_7, W_I_TEMP);
- RND_7_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,25) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_7_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,25) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_6_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,26) ;
- ADD(W_I, W_I, YMM_TEMP0) ;/* now W[16..17] are completed */
- RND_6_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,26) ;
- FEEDBACK1_to_W_I_2 ;
- RND_6_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,26) ;
- FEEDBACK_to_W_I_7 ;
- RND_5_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,27) ;
- ADD(W_I_TEMP, W_I_7, W_I_TEMP);
- RND_5_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,27) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_5_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,27) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_4_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,28) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ;/* now W[16..19] are completed */
- RND_4_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,28) ;
- FEEDBACK2_to_W_I_2 ;
- RND_4_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,28) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_3_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,29) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_3_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,29) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..21] are completed */
- RND_3_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,29) ;
- FEEDBACK3_to_W_I_2 ;
- RND_2_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,30) ;
- GAMMA1(YMM_TEMP0, W_I_2) ;
- RND_2_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,30) ;
- RND_2_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,30) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..23] are completed */
- RND_1_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,31) ;
-
- MOVE_to_REG(YMM_TEMP0, K[32]) ;
- RND_1_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,31) ;
- ROTATE_W(W_I_16, W_I_15, W_I_7, W_I_2, W_I) ;
- RND_1_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,31) ;
- ADD(YMM_TEMP0, YMM_TEMP0, W_I) ;
- MOVE_to_MEM(W_K[32], YMM_TEMP0) ;
-
-
- /* W[i] = Gamma1(W[i-2]) + W[i-7] + Gamma0(W[i-15] + W[i-16]) */
- RND_0_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,32) ;
- GAMMA0_1(W_I_TEMP, W_I_15) ;
- RND_0_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,32) ;
- GAMMA0_2(W_I_TEMP, W_I_15) ;
- RND_0_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,32) ;
- ADD(W_I_TEMP, W_I_16, W_I_TEMP) ;/* for saving W_I before adding incomplete W_I_7 */
- RND_7_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,33) ;
- ADD(W_I, W_I_7, W_I_TEMP);
- RND_7_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,33) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_7_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,33) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_6_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,34) ;
- ADD(W_I, W_I, YMM_TEMP0) ;/* now W[16..17] are completed */
- RND_6_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,34) ;
- FEEDBACK1_to_W_I_2 ;
- RND_6_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,34) ;
- FEEDBACK_to_W_I_7 ;
- RND_5_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,35) ;
- ADD(W_I_TEMP, W_I_7, W_I_TEMP);
- RND_5_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,35) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_5_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,35) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_4_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,36) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ;/* now W[16..19] are completed */
- RND_4_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,36) ;
- FEEDBACK2_to_W_I_2 ;
- RND_4_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,36) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_3_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,37) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_3_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,37) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..21] are completed */
- RND_3_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,37) ;
- FEEDBACK3_to_W_I_2 ;
- RND_2_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,38) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_2_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,38) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_2_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,38) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..23] are completed */
- RND_1_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,39) ;
-
- MOVE_to_REG(YMM_TEMP0, K[40]) ;
- RND_1_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,39) ;
- ROTATE_W(W_I_16, W_I_15, W_I_7, W_I_2, W_I) ;
- RND_1_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,39) ;
- ADD(YMM_TEMP0, YMM_TEMP0, W_I) ;
- MOVE_to_MEM(W_K[40], YMM_TEMP0) ;
-
- /* W[i] = Gamma1(W[i-2]) + W[i-7] + Gamma0(W[i-15] + W[i-16]) */
- RND_0_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,40) ;
- GAMMA0_1(W_I_TEMP, W_I_15) ;
- RND_0_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,40) ;
- GAMMA0_2(W_I_TEMP, W_I_15) ;
- RND_0_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,40) ;
- ADD(W_I_TEMP, W_I_16, W_I_TEMP) ;/* for saving W_I before adding incomplete W_I_7 */
- RND_7_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,41) ;
- ADD(W_I, W_I_7, W_I_TEMP);
- RND_7_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,41) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_7_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,41) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_6_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,42) ;
- ADD(W_I, W_I, YMM_TEMP0) ;/* now W[16..17] are completed */
- RND_6_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,42) ;
- FEEDBACK1_to_W_I_2 ;
- RND_6_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,42) ;
- FEEDBACK_to_W_I_7 ;
- RND_5_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,43) ;
- ADD(W_I_TEMP, W_I_7, W_I_TEMP);
- RND_5_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,43) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_5_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,43) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_4_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,44) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ;/* now W[16..19] are completed */
- RND_4_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,44) ;
- FEEDBACK2_to_W_I_2 ;
- RND_4_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,44) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_3_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,45) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_3_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,45) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..21] are completed */
- RND_3_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,45) ;
- FEEDBACK3_to_W_I_2 ;
- RND_2_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,46) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_2_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,46) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_2_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,46) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..23] are completed */
- RND_1_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,47) ;
-
- MOVE_to_REG(YMM_TEMP0, K[48]) ;
- RND_1_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,47) ;
- ROTATE_W(W_I_16, W_I_15, W_I_7, W_I_2, W_I) ;
- RND_1_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,47) ;
- ADD(YMM_TEMP0, YMM_TEMP0, W_I) ;
- MOVE_to_MEM(W_K[48], YMM_TEMP0) ;
-
- /* W[i] = Gamma1(W[i-2]) + W[i-7] + Gamma0(W[i-15] + W[i-16]) */
- RND_0_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,48) ;
- GAMMA0_1(W_I_TEMP, W_I_15) ;
- RND_0_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,48) ;
- GAMMA0_2(W_I_TEMP, W_I_15) ;
- RND_0_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,48) ;
- ADD(W_I_TEMP, W_I_16, W_I_TEMP) ;/* for saving W_I before adding incomplete W_I_7 */
- RND_7_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,49) ;
- ADD(W_I, W_I_7, W_I_TEMP);
- RND_7_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,49) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_7_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,49) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_6_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,50) ;
- ADD(W_I, W_I, YMM_TEMP0) ;/* now W[16..17] are completed */
- RND_6_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,50) ;
- FEEDBACK1_to_W_I_2 ;
- RND_6_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,50) ;
- FEEDBACK_to_W_I_7 ;
- RND_5_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,51) ;
- ADD(W_I_TEMP, W_I_7, W_I_TEMP);
- RND_5_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,51) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_5_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,51) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_4_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,52) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ;/* now W[16..19] are completed */
- RND_4_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,52) ;
- FEEDBACK2_to_W_I_2 ;
- RND_4_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,52) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_3_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,53) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_3_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,53) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..21] are completed */
- RND_3_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,53) ;
- FEEDBACK3_to_W_I_2 ;
- RND_2_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,54) ;
- GAMMA1_1(YMM_TEMP0, W_I_2) ;
- RND_2_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,54) ;
- GAMMA1_2(YMM_TEMP0, W_I_2) ;
- RND_2_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,54) ;
- ADD(W_I, W_I_TEMP, YMM_TEMP0) ; /* now W[16..23] are completed */
- RND_1_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,55) ;
-
- MOVE_to_REG(YMM_TEMP0, K[56]) ;
- RND_1_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,55) ;
- ROTATE_W(W_I_16, W_I_15, W_I_7, W_I_2, W_I) ;
- RND_1_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,55) ;
- ADD(YMM_TEMP0, YMM_TEMP0, W_I) ;
- MOVE_to_MEM(W_K[56], YMM_TEMP0) ;
-
- RND_0(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,56) ;
- RND_7(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,57) ;
- RND_6(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,58) ;
- RND_5(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,59) ;
-
- RND_4(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,60) ;
- RND_3(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,61) ;
- RND_2(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,62) ;
- RND_1(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7,63) ;
-
- RegToDigest(S_0,S_1,S_2,S_3,S_4,S_5,S_6,S_7) ;
-
- #ifdef WOLFSSL_SMALL_STACK
- XFREE(W, NULL, DYNAMIC_TYPE_TMP_BUFFER);
- #endif
-
- return 0;
+#if defined(WOLFSSL_ESP32WROOM32_CRYPT) && \
+ !defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)
+ if(sha256->ctx.mode == ESP32_SHA_INIT){
+ esp_sha_try_hw_lock(&sha256->ctx);
+ }
+ if(sha256->ctx.mode == ESP32_SHA_HW)
+ {
+ esp_sha256_digest_process(sha256, 0);
+ }
+#endif
+ ret = wc_Sha256Copy(sha256, &tmpSha256);
+ if (ret == 0) {
+ ret = wc_Sha256Final(&tmpSha256, hash);
+#if defined(WOLFSSL_ESP32WROOM32_CRYPT) && \
+ !defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)
+ sha256->ctx.mode = ESP32_SHA_SW;
+#endif
+
+ wc_Sha256Free(&tmpSha256);
+ }
+ return ret;
}
+int wc_Sha256Copy(wc_Sha256* src, wc_Sha256* dst)
+{
+ int ret = 0;
-#endif /* HAVE_INTEL_AVX2 */
+ if (src == NULL || dst == NULL)
+ return BAD_FUNC_ARG;
-#endif /* HAVE_FIPS */
+ XMEMCPY(dst, src, sizeof(wc_Sha256));
+#ifdef WOLFSSL_SMALL_STACK_CACHE
+ dst->W = NULL;
+#endif
-#endif /* WOLFSSL_TI_HAHS */
+#ifdef WOLFSSL_ASYNC_CRYPT
+ ret = wolfAsync_DevCopy(&src->asyncDev, &dst->asyncDev);
+#endif
+#ifdef WOLFSSL_PIC32MZ_HASH
+ ret = wc_Pic32HashCopy(&src->cache, &dst->cache);
+#endif
+#if defined(WOLFSSL_ESP32WROOM32_CRYPT) && \
+ !defined(NO_WOLFSSL_ESP32WROOM32_CRYPT_HASH)
+ dst->ctx.mode = src->ctx.mode;
+ dst->ctx.isfirstblock = src->ctx.isfirstblock;
+ dst->ctx.sha_type = src->ctx.sha_type;
+#endif
+#if defined(WOLFSSL_HASH_FLAGS) || defined(WOLF_CRYPTO_CB)
+ dst->flags |= WC_HASH_FLAG_ISCOPY;
+#endif
-#endif /* NO_SHA256 */
+ return ret;
+}
+#endif
+
+#if defined(WOLFSSL_HASH_FLAGS) || defined(WOLF_CRYPTO_CB)
+int wc_Sha256SetFlags(wc_Sha256* sha256, word32 flags)
+{
+ if (sha256) {
+ sha256->flags = flags;
+ }
+ return 0;
+}
+int wc_Sha256GetFlags(wc_Sha256* sha256, word32* flags)
+{
+ if (sha256 && flags) {
+ *flags = sha256->flags;
+ }
+ return 0;
+}
+#endif
+#endif /* !WOLFSSL_TI_HASH */
+#endif /* NO_SHA256 */