fix(newlib): fix esp32/esp32s3 atomics on PSRAM

Closes https://github.com/espressif/esp-idf/issues/4635
pull/13431/head
Alexey Lapshin 2024-01-01 14:13:02 +04:00
rodzic 174e9f582b
commit 4f09fba127
8 zmienionych plików z 656 dodań i 595 usunięć

Wyświetl plik

@ -31,6 +31,10 @@ set(srcs
)
set(include_dirs platform_include)
if(CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND)
list(APPEND srcs "port/xtensa/stdatomic_s32c1i.c")
endif()
if(CONFIG_SPIRAM_CACHE_WORKAROUND)
set(ldfragments "esp32-spiram-rom-functions-c.lf")
endif()
@ -38,7 +42,7 @@ endif()
list(APPEND ldfragments "newlib.lf" "system_libs.lf")
idf_component_register(SRCS "${srcs}"
INCLUDE_DIRS "${include_dirs}"
INCLUDE_DIRS platform_include
PRIV_INCLUDE_DIRS priv_include
PRIV_REQUIRES soc spi_flash
LDFRAGMENTS "${ldfragments}")
@ -49,6 +53,11 @@ target_link_libraries(${COMPONENT_LIB} INTERFACE c m ${CONFIG_COMPILER_RT_LIB_NA
set_source_files_properties(heap.c PROPERTIES COMPILE_FLAGS -fno-builtin)
if(CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND)
set_source_files_properties("port/xtensa/stdatomic_s32c1i.c"
PROPERTIES COMPILE_FLAGS "-mno-disable-hardware-atomics")
endif()
# Forces the linker to include heap, syscall, pthread, assert, and retargetable locks from this component,
# instead of the implementations provided by newlib.
list(APPEND EXTRA_LINK_FLAGS "-u newlib_include_heap_impl")

Wyświetl plik

@ -113,3 +113,7 @@ menu "Newlib"
endchoice
endmenu # Newlib
config STDATOMIC_S32C1I_SPIRAM_WORKAROUND
bool
default SPIRAM && (IDF_TARGET_ESP32 || IDF_TARGET_ESP32S3) && !IDF_TOOLCHAIN_CLANG # TODO IDF-9032

Wyświetl plik

@ -5,3 +5,5 @@ entries:
abort (noflash)
assert (noflash)
stdatomic (noflash)
if STDATOMIC_S32C1I_SPIRAM_WORKAROUND = y:
stdatomic_s32c1i (noflash)

Wyświetl plik

@ -0,0 +1,79 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdbool.h>
#include <stdint.h>
#include "esp_stdatomic.h"
#undef SYNC_OP_FUNCTIONS
#undef _ATOMIC_OP_FUNCTION
#undef ATOMIC_LOAD
#undef ATOMIC_CMP_EXCHANGE
#undef ATOMIC_STORE
#undef ATOMIC_EXCHANGE
#undef SYNC_BOOL_CMP_EXCHANGE
#undef SYNC_VAL_CMP_EXCHANGE
#undef SYNC_LOCK_TEST_AND_SET
#undef SYNC_LOCK_RELEASE
#define SYNC_OP_FUNCTIONS(n, type, name)
#define _ATOMIC_OP_FUNCTION(n, type, name_1, name_2, ret_var, operation, inverse) \
type __atomic_s32c1i_ ##name_1 ##_ ##name_2 ##_ ##n (volatile void* ptr, type value, int memorder) \
{ \
return __atomic_ ##name_1 ##_ ##name_2 ##_ ##n (ptr, value, memorder); \
}
#define ATOMIC_LOAD(n, type) \
type __atomic_s32c1i_load_ ## n (const volatile void* ptr, int memorder) \
{ \
return __atomic_load_ ## n (ptr, memorder); \
}
#define ATOMIC_CMP_EXCHANGE(n, type) \
bool __atomic_s32c1i_compare_exchange_ ## n (volatile void* ptr, void* expected, type desired, bool weak, int success, int failure) \
{ \
return __atomic_compare_exchange_ ## n (ptr, expected, desired, weak, success, failure); \
}
#define ATOMIC_STORE(n, type) \
void __atomic_s32c1i_store_ ## n (volatile void * ptr, type value, int memorder) \
{ \
__atomic_store_ ## n (ptr, value, memorder); \
}
#define ATOMIC_EXCHANGE(n, type) \
type __atomic_s32c1i_exchange_ ## n (volatile void* ptr, type value, int memorder) \
{ \
return __atomic_exchange_ ## n (ptr, value, memorder); \
}
#define SYNC_BOOL_CMP_EXCHANGE(n, type) \
bool __sync_s32c1i_bool_compare_and_swap_ ## n (volatile void* ptr, type expected, type desired) \
{ \
return __sync_bool_compare_and_swap_ ## n (ptr, expected, desired); \
}
#define SYNC_VAL_CMP_EXCHANGE(n, type) \
type __sync_s32c1i_val_compare_and_swap_ ## n (volatile void* ptr, type expected, type desired) \
{ \
return __sync_val_compare_and_swap_ ## n (ptr, expected, desired); \
}
#define SYNC_LOCK_TEST_AND_SET(n, type) \
type __sync_s32c1i_lock_test_and_set_ ## n (volatile void* ptr, type value) \
{ \
return __sync_lock_test_and_set_ ## n (ptr, value); \
}
#define SYNC_LOCK_RELEASE(n, type) \
void __sync_s32c1i_lock_release_ ## n (volatile void* ptr) \
{ \
__sync_lock_release_ ## n (ptr); \
}
ATOMIC_FUNCTIONS(1, unsigned char)
ATOMIC_FUNCTIONS(2, short unsigned int)
ATOMIC_FUNCTIONS(4, unsigned int)

Wyświetl plik

@ -0,0 +1,296 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "freertos/FreeRTOS.h"
#include "soc/soc_caps.h"
#include "sdkconfig.h"
#ifdef __XTENSA__
#include "xtensa/config/core-isa.h"
#ifndef XCHAL_HAVE_S32C1I
#error "XCHAL_HAVE_S32C1I not defined, include correct header!"
#endif // XCHAL_HAVE_S32C1I
#ifndef CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND
#define CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND 0
#endif
#define HAS_ATOMICS_32 ((XCHAL_HAVE_S32C1I == 1) && !CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND)
// no 64-bit atomics on Xtensa
#define HAS_ATOMICS_64 0
#else // RISCV
// GCC toolchain will define this pre-processor if "A" extension is supported
#ifndef __riscv_atomic
#define __riscv_atomic 0
#endif
#define HAS_ATOMICS_32 (__riscv_atomic == 1)
#define HAS_ATOMICS_64 ((__riscv_atomic == 1) && (__riscv_xlen == 64))
#endif // (__XTENSA__, __riscv)
#if SOC_CPU_CORES_NUM == 1
// Single core SoC: atomics can be implemented using portSET_INTERRUPT_MASK_FROM_ISR
// and portCLEAR_INTERRUPT_MASK_FROM_ISR, which disables and enables interrupts.
#if CONFIG_FREERTOS_SMP
#define _ATOMIC_ENTER_CRITICAL() unsigned int state = portDISABLE_INTERRUPTS();
#define _ATOMIC_EXIT_CRITICAL() portRESTORE_INTERRUPTS(state)
#else // CONFIG_FREERTOS_SMP
#define _ATOMIC_ENTER_CRITICAL() unsigned int state = portSET_INTERRUPT_MASK_FROM_ISR()
#define _ATOMIC_EXIT_CRITICAL() portCLEAR_INTERRUPT_MASK_FROM_ISR(state)
#endif // CONFIG_FREERTOS_SMP
#else // SOC_CPU_CORES_NUM
#define _ATOMIC_ENTER_CRITICAL() portENTER_CRITICAL_SAFE(&s_atomic_lock);
#define _ATOMIC_EXIT_CRITICAL() portEXIT_CRITICAL_SAFE(&s_atomic_lock);
#endif // SOC_CPU_CORES_NUM
#if CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND
#define _ATOMIC_IF_NOT_EXT_RAM() \
if (!((uintptr_t)ptr >= SOC_EXTRAM_DATA_LOW && (uintptr_t) ptr < SOC_EXTRAM_DATA_HIGH))
#define _ATOMIC_HW_STUB_OP_FUNCTION(n, type, name_1, name_2) \
_ATOMIC_IF_NOT_EXT_RAM() { \
type __atomic_s32c1i_ ##name_1 ##_ ##name_2 ##_ ##n (volatile void* ptr, type value, int memorder); \
return __atomic_s32c1i_ ##name_1 ##_ ##name_2 ##_ ##n (ptr, value, memorder); \
}
#define _ATOMIC_HW_STUB_EXCHANGE(n, type) \
_ATOMIC_IF_NOT_EXT_RAM() { \
type __atomic_s32c1i_exchange_ ## n (volatile void* ptr, type value, int memorder); \
return __atomic_s32c1i_exchange_ ## n (ptr, value, memorder); \
}
#define _ATOMIC_HW_STUB_STORE(n, type) \
_ATOMIC_IF_NOT_EXT_RAM() { \
void __atomic_s32c1i_store_ ## n (volatile void * ptr, type value, int memorder); \
__atomic_s32c1i_store_ ## n (ptr, value, memorder); \
return; \
}
#define _ATOMIC_HW_STUB_CMP_EXCHANGE(n, type) \
_ATOMIC_IF_NOT_EXT_RAM() { \
bool __atomic_s32c1i_compare_exchange_ ## n (volatile void* ptr, void* expected, type desired, bool weak, int success, int failure); \
return __atomic_s32c1i_compare_exchange_ ## n (ptr, expected, desired, weak, success, failure); \
}
#define _ATOMIC_HW_STUB_LOAD(n, type) \
_ATOMIC_IF_NOT_EXT_RAM() { \
type __atomic_s32c1i_load_ ## n (const volatile void* ptr, int memorder); \
return __atomic_s32c1i_load_ ## n (ptr,memorder); \
}
#define _ATOMIC_HW_STUB_SYNC_BOOL_CMP_EXCHANGE(n, type) \
_ATOMIC_IF_NOT_EXT_RAM() { \
bool __sync_s32c1i_bool_compare_and_swap_ ## n (volatile void* ptr, type expected, type desired); \
return __sync_s32c1i_bool_compare_and_swap_ ## n (ptr, expected, desired); \
}
#define _ATOMIC_HW_STUB_SYNC_VAL_CMP_EXCHANGE(n, type) \
_ATOMIC_IF_NOT_EXT_RAM() { \
type __sync_s32c1i_val_compare_and_swap_ ## n (volatile void* ptr, type expected, type desired); \
return __sync_s32c1i_val_compare_and_swap_ ## n (ptr, expected, desired); \
}
#define _ATOMIC_HW_STUB_SYNC_LOCK_TEST_AND_SET(n, type) \
_ATOMIC_IF_NOT_EXT_RAM() { \
type __sync_s32c1i_lock_test_and_set_ ## n (volatile void* ptr, type value); \
return __sync_s32c1i_lock_test_and_set_ ## n (ptr, value); \
}
#define _ATOMIC_HW_STUB_SYNC_LOCK_RELEASE(n, type) \
_ATOMIC_IF_NOT_EXT_RAM() { \
void __sync_s32c1i_lock_release_ ## n (volatile void* ptr); \
__sync_s32c1i_lock_release_ ## n (ptr); \
return; \
}
#else // CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND
#define _ATOMIC_HW_STUB_OP_FUNCTION(n, type, name_1, name_2)
#define _ATOMIC_HW_STUB_EXCHANGE(n, type)
#define _ATOMIC_HW_STUB_STORE(n, type)
#define _ATOMIC_HW_STUB_CMP_EXCHANGE(n, type)
#define _ATOMIC_HW_STUB_LOAD(n, type)
#define _ATOMIC_HW_STUB_SYNC_BOOL_CMP_EXCHANGE(n, type)
#define _ATOMIC_HW_STUB_SYNC_VAL_CMP_EXCHANGE(n, type)
#define _ATOMIC_HW_STUB_SYNC_LOCK_TEST_AND_SET(n, type)
#define _ATOMIC_HW_STUB_SYNC_LOCK_RELEASE(n, type)
#endif // CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND
#ifdef __clang__
// Clang doesn't allow to define "__sync_*" atomics. The workaround is to define function with name "__sync_*_builtin",
// which implements "__sync_*" atomic functionality and use asm directive to set the value of symbol "__sync_*" to the name
// of defined function.
#define CLANG_ATOMIC_SUFFIX(name_) name_ ## _builtin
#define CLANG_DECLARE_ALIAS(name_) \
__asm__(".type " # name_ ", @function\n" \
".global " #name_ "\n" \
".equ " #name_ ", " #name_ "_builtin");
#else // __clang__
#define CLANG_ATOMIC_SUFFIX(name_) name_
#define CLANG_DECLARE_ALIAS(name_)
#endif // __clang__
#define ATOMIC_OP_FUNCTIONS(n, type, name, operation, inverse) \
_ATOMIC_OP_FUNCTION(n, type, fetch, name, old, operation, inverse) \
_ATOMIC_OP_FUNCTION(n, type, name, fetch, new, operation, inverse)
#define _ATOMIC_OP_FUNCTION(n, type, name_1, name_2, ret_var, operation, inverse) \
type __atomic_ ##name_1 ##_ ##name_2 ##_ ##n (volatile void* ptr, type value, int memorder) \
{ \
type old, new; \
_ATOMIC_HW_STUB_OP_FUNCTION(n, type, name_1, name_2); \
_ATOMIC_ENTER_CRITICAL(); \
old = (*(volatile type*)ptr); \
new = inverse(old operation value); \
*(volatile type*)ptr = new; \
_ATOMIC_EXIT_CRITICAL(); \
return ret_var; \
}
#define ATOMIC_LOAD(n, type) \
type __atomic_load_ ## n (const volatile void* ptr, int memorder) \
{ \
type old; \
_ATOMIC_HW_STUB_LOAD(n, type); \
_ATOMIC_ENTER_CRITICAL(); \
old = *(const volatile type*)ptr; \
_ATOMIC_EXIT_CRITICAL(); \
return old; \
}
#define ATOMIC_CMP_EXCHANGE(n, type) \
bool __atomic_compare_exchange_ ## n (volatile void* ptr, void* expected, type desired, bool weak, int success, int failure) \
{ \
bool ret = false; \
_ATOMIC_HW_STUB_CMP_EXCHANGE(n, type); \
_ATOMIC_ENTER_CRITICAL(); \
if (*(volatile type*)ptr == *(type*)expected) { \
ret = true; \
*(volatile type*)ptr = desired; \
} else { \
*(type*)expected = *(volatile type*)ptr; \
} \
_ATOMIC_EXIT_CRITICAL(); \
return ret; \
}
#define ATOMIC_STORE(n, type) \
void __atomic_store_ ## n (volatile void * ptr, type value, int memorder) \
{ \
_ATOMIC_HW_STUB_STORE(n, type); \
_ATOMIC_ENTER_CRITICAL(); \
*(volatile type*)ptr = value; \
_ATOMIC_EXIT_CRITICAL(); \
}
#define ATOMIC_EXCHANGE(n, type) \
type __atomic_exchange_ ## n (volatile void* ptr, type value, int memorder) \
{ \
type old; \
_ATOMIC_HW_STUB_EXCHANGE(n, type); \
_ATOMIC_ENTER_CRITICAL(); \
old = *(volatile type*)ptr; \
*(volatile type*)ptr = value; \
_ATOMIC_EXIT_CRITICAL(); \
return old; \
}
#define SYNC_OP_FUNCTIONS(n, type, name) \
_SYNC_OP_FUNCTION(n, type, fetch, name) \
_SYNC_OP_FUNCTION(n, type, name, fetch)
#define _SYNC_OP_FUNCTION(n, type, name_1, name_2) \
type CLANG_ATOMIC_SUFFIX(__sync_ ##name_1 ##_and_ ##name_2 ##_ ##n) (volatile void* ptr, type value) \
{ \
return __atomic_ ##name_1 ##_ ##name_2 ##_ ##n (ptr, value, __ATOMIC_SEQ_CST); \
} \
CLANG_DECLARE_ALIAS( __sync_##name_1 ##_and_ ##name_2 ##_ ##n )
#define SYNC_BOOL_CMP_EXCHANGE(n, type) \
bool CLANG_ATOMIC_SUFFIX(__sync_bool_compare_and_swap_ ## n) (volatile void* ptr, type expected, type desired) \
{ \
bool ret = false; \
_ATOMIC_HW_STUB_SYNC_BOOL_CMP_EXCHANGE(n, type); \
_ATOMIC_ENTER_CRITICAL(); \
if (*(volatile type*)ptr == expected) { \
*(volatile type*)ptr = desired; \
ret = true; \
} \
_ATOMIC_EXIT_CRITICAL(); \
return ret; \
} \
CLANG_DECLARE_ALIAS( __sync_bool_compare_and_swap_ ## n )
#define SYNC_VAL_CMP_EXCHANGE(n, type) \
type CLANG_ATOMIC_SUFFIX(__sync_val_compare_and_swap_ ## n) (volatile void* ptr, type expected, type desired) \
{ \
type old; \
_ATOMIC_HW_STUB_SYNC_VAL_CMP_EXCHANGE(n, type); \
_ATOMIC_ENTER_CRITICAL(); \
old = *(volatile type*)ptr; \
if (old == expected) { \
*(volatile type*)ptr = desired; \
} \
_ATOMIC_EXIT_CRITICAL(); \
return old; \
} \
CLANG_DECLARE_ALIAS( __sync_val_compare_and_swap_ ## n )
#define SYNC_LOCK_TEST_AND_SET(n, type) \
type CLANG_ATOMIC_SUFFIX(__sync_lock_test_and_set_ ## n) (volatile void* ptr, type value) \
{ \
type old; \
_ATOMIC_HW_STUB_SYNC_LOCK_TEST_AND_SET(n, type); \
_ATOMIC_ENTER_CRITICAL(); \
old = *(volatile type*)ptr; \
*(volatile type*)ptr = value; \
_ATOMIC_EXIT_CRITICAL(); \
return old; \
} \
CLANG_DECLARE_ALIAS( __sync_lock_test_and_set_ ## n )
#define SYNC_LOCK_RELEASE(n, type) \
void CLANG_ATOMIC_SUFFIX(__sync_lock_release_ ## n) (volatile void* ptr) \
{ \
_ATOMIC_HW_STUB_SYNC_LOCK_RELEASE(n, type); \
_ATOMIC_ENTER_CRITICAL(); \
*(volatile type*)ptr = 0; \
_ATOMIC_EXIT_CRITICAL(); \
} \
CLANG_DECLARE_ALIAS( __sync_lock_release_ ## n )
#define ATOMIC_FUNCTIONS(n, type) \
ATOMIC_EXCHANGE(n, type) \
ATOMIC_CMP_EXCHANGE(n, type) \
ATOMIC_OP_FUNCTIONS(n, type, add, +, ) \
ATOMIC_OP_FUNCTIONS(n, type, sub, -, ) \
ATOMIC_OP_FUNCTIONS(n, type, and, &, ) \
ATOMIC_OP_FUNCTIONS(n, type, or, |, ) \
ATOMIC_OP_FUNCTIONS(n, type, xor, ^, ) \
ATOMIC_OP_FUNCTIONS(n, type, nand, &, ~) \
/* LLVM has not implemented native atomic load/stores for riscv targets without the Atomic extension. LLVM thread: https://reviews.llvm.org/D47553. \
* Even though GCC does transform them, these libcalls need to be available for the case where a LLVM based project links against IDF. */ \
ATOMIC_LOAD(n, type) \
ATOMIC_STORE(n, type) \
SYNC_OP_FUNCTIONS(n, type, add) \
SYNC_OP_FUNCTIONS(n, type, sub) \
SYNC_OP_FUNCTIONS(n, type, and) \
SYNC_OP_FUNCTIONS(n, type, or) \
SYNC_OP_FUNCTIONS(n, type, xor) \
SYNC_OP_FUNCTIONS(n, type, nand) \
SYNC_BOOL_CMP_EXCHANGE(n, type) \
SYNC_VAL_CMP_EXCHANGE(n, type) \
SYNC_LOCK_TEST_AND_SET(n, type) \
SYNC_LOCK_RELEASE(n, type)

Wyświetl plik

@ -0,0 +1,3 @@
if(CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND)
idf_build_set_property(COMPILE_OPTIONS "-mdisable-hardware-atomics" APPEND)
endif()

Wyświetl plik

@ -6,296 +6,21 @@
//replacement for gcc built-in functions
#include "sdkconfig.h"
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include "soc/soc_caps.h"
#include "esp_stdatomic.h"
#include "freertos/FreeRTOS.h"
#include "sdkconfig.h"
#ifdef __XTENSA__
#include "xtensa/config/core-isa.h"
#ifndef XCHAL_HAVE_S32C1I
#error "XCHAL_HAVE_S32C1I not defined, include correct header!"
#endif
#define HAS_ATOMICS_32 (XCHAL_HAVE_S32C1I == 1)
// no 64-bit atomics on Xtensa
#define HAS_ATOMICS_64 0
#else // RISCV
// GCC toolchain will define this pre-processor if "A" extension is supported
#ifndef __riscv_atomic
#define __riscv_atomic 0
#endif
#define HAS_ATOMICS_32 (__riscv_atomic == 1)
#define HAS_ATOMICS_64 ((__riscv_atomic == 1) && (__riscv_xlen == 64))
#endif // (__XTENSA__, __riscv)
#if SOC_CPU_CORES_NUM == 1
// Single core SoC: atomics can be implemented using portSET_INTERRUPT_MASK_FROM_ISR
// and portCLEAR_INTERRUPT_MASK_FROM_ISR, which disables and enables interrupts.
#if CONFIG_FREERTOS_SMP
#define _ATOMIC_ENTER_CRITICAL() ({ \
unsigned state = portDISABLE_INTERRUPTS(); \
state; \
})
#define _ATOMIC_EXIT_CRITICAL(state) do { \
portRESTORE_INTERRUPTS(state); \
} while (0)
#else // CONFIG_FREERTOS_SMP
#define _ATOMIC_ENTER_CRITICAL() ({ \
unsigned state = portSET_INTERRUPT_MASK_FROM_ISR(); \
state; \
})
#define _ATOMIC_EXIT_CRITICAL(state) do { \
portCLEAR_INTERRUPT_MASK_FROM_ISR(state); \
} while (0)
#endif
#else // SOC_CPU_CORES_NUM
#if SOC_CPU_CORES_NUM > 1
#if !CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND
_Static_assert(HAS_ATOMICS_32, "32-bit atomics should be supported if SOC_CPU_CORES_NUM > 1");
#endif // CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND
// Only need to implement 64-bit atomics here. Use a single global portMUX_TYPE spinlock
// to emulate the atomics.
static portMUX_TYPE s_atomic_lock = portMUX_INITIALIZER_UNLOCKED;
// Return value is not used but kept for compatibility with the single-core version above.
#define _ATOMIC_ENTER_CRITICAL() ({ \
portENTER_CRITICAL_SAFE(&s_atomic_lock); \
0; \
})
#define _ATOMIC_EXIT_CRITICAL(state) do { \
(void) (state); \
portEXIT_CRITICAL_SAFE(&s_atomic_lock); \
} while(0)
#endif // SOC_CPU_CORES_NUM
#ifdef __clang__
// Clang doesn't allow to define "__sync_*" atomics. The workaround is to define function with name "__sync_*_builtin",
// which implements "__sync_*" atomic functionality and use asm directive to set the value of symbol "__sync_*" to the name
// of defined function.
#define CLANG_ATOMIC_SUFFIX(name_) name_ ## _builtin
#define CLANG_DECLARE_ALIAS(name_) \
__asm__(".type " # name_ ", @function\n" \
".global " #name_ "\n" \
".equ " #name_ ", " #name_ "_builtin");
#else // __clang__
#define CLANG_ATOMIC_SUFFIX(name_) name_
#define CLANG_DECLARE_ALIAS(name_)
#endif // __clang__
#define ATOMIC_LOAD(n, type) type __atomic_load_ ## n (const volatile void* mem, int memorder) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
type ret = *(const volatile type*)mem; \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
}
#define ATOMIC_STORE(n, type) void __atomic_store_ ## n (volatile void * mem, type val, int memorder) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
*(volatile type *)mem = val; \
_ATOMIC_EXIT_CRITICAL(state); \
}
#define ATOMIC_EXCHANGE(n, type) type __atomic_exchange_ ## n (volatile void* mem, type val, int memorder) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
type ret = *(volatile type*)mem; \
*(volatile type*)mem = val; \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
}
#define CMP_EXCHANGE(n, type) bool __atomic_compare_exchange_ ## n (volatile void* mem, void* expect, type desired, bool weak, int success, int failure) \
{ \
bool ret = false; \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
if (*(volatile type*)mem == *(type*)expect) { \
ret = true; \
*(volatile type*)mem = desired; \
} else { \
*(type*)expect = *(volatile type*)mem; \
} \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
}
#define FETCH_ADD(n, type) type __atomic_fetch_add_ ## n (volatile void* ptr, type value, int memorder) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
type ret = *(volatile type*)ptr; \
*(volatile type*)ptr = *(volatile type*)ptr + value; \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
}
#define ADD_FETCH(n, type) type __atomic_add_fetch_ ## n (volatile void* ptr, type value, int memorder) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
type ret = *(volatile type*)ptr + value; \
*(volatile type*)ptr = ret; \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
}
#define FETCH_SUB(n, type) type __atomic_fetch_sub_ ## n (volatile void* ptr, type value, int memorder) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
type ret = *(volatile type*)ptr; \
*(volatile type*)ptr = *(volatile type*)ptr - value; \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
}
#define SUB_FETCH(n, type) type __atomic_sub_fetch_ ## n (volatile void* ptr, type value, int memorder) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
type ret = *(volatile type*)ptr - value; \
*(volatile type*)ptr = ret; \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
}
#define FETCH_AND(n, type) type __atomic_fetch_and_ ## n (volatile void* ptr, type value, int memorder) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
type ret = *(volatile type*)ptr; \
*(volatile type*)ptr = *(volatile type*)ptr & value; \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
}
#define AND_FETCH(n, type) type __atomic_and_fetch_ ## n (volatile void* ptr, type value, int memorder) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
type ret = *(volatile type*)ptr & value; \
*(volatile type*)ptr = ret; \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
}
#define FETCH_OR(n, type) type __atomic_fetch_or_ ## n (volatile void* ptr, type value, int memorder) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
type ret = *(volatile type*)ptr; \
*(volatile type*)ptr = *(volatile type*)ptr | value; \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
}
#define OR_FETCH(n, type) type __atomic_or_fetch_ ## n (volatile void* ptr, type value, int memorder) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
type ret = *(volatile type*)ptr | value; \
*(volatile type*)ptr = ret; \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
}
#define FETCH_XOR(n, type) type __atomic_fetch_xor_ ## n (volatile void* ptr, type value, int memorder) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
type ret = *(volatile type*)ptr; \
*(volatile type*)ptr = *(volatile type*)ptr ^ value; \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
}
#define XOR_FETCH(n, type) type __atomic_xor_fetch_ ## n (volatile void* ptr, type value, int memorder) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
type ret = *(volatile type*)ptr ^ value; \
*(volatile type*)ptr = ret; \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
}
#define FETCH_NAND(n, type) type __atomic_fetch_nand_ ## n (volatile void* ptr, type value, int memorder) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
type ret = *(volatile type*)ptr; \
*(volatile type*)ptr = ~(*(volatile type*)ptr & value); \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
}
#define NAND_FETCH(n, type) type __atomic_nand_fetch_ ## n (volatile void* ptr, type value, int memorder) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
type ret = ~(*(volatile type*)ptr & value); \
*(volatile type*)ptr = ret; \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
}
#define SYNC_FETCH_OP(op, n, type) type CLANG_ATOMIC_SUFFIX(__sync_fetch_and_ ## op ##_ ## n) (volatile void* ptr, type value) \
{ \
return __atomic_fetch_ ## op ##_ ## n (ptr, value, __ATOMIC_SEQ_CST); \
} \
CLANG_DECLARE_ALIAS( __sync_fetch_and_ ## op ##_ ## n )
#define SYNC_OP_FETCH(op, n, type) type CLANG_ATOMIC_SUFFIX(__sync_ ## op ##_and_fetch_ ## n) (volatile void* ptr, type value) \
{ \
return __atomic_ ## op ##_fetch_ ## n (ptr, value, __ATOMIC_SEQ_CST); \
} \
CLANG_DECLARE_ALIAS( __sync_ ## op ##_and_fetch_ ## n )
#define SYNC_BOOL_CMP_EXCHANGE(n, type) bool CLANG_ATOMIC_SUFFIX(__sync_bool_compare_and_swap_ ## n) (volatile void* ptr, type oldval, type newval) \
{ \
bool ret = false; \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
if (*(volatile type*)ptr == oldval) { \
*(volatile type*)ptr = newval; \
ret = true; \
} \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
} \
CLANG_DECLARE_ALIAS( __sync_bool_compare_and_swap_ ## n )
#define SYNC_VAL_CMP_EXCHANGE(n, type) type CLANG_ATOMIC_SUFFIX(__sync_val_compare_and_swap_ ## n) (volatile void* ptr, type oldval, type newval) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
type ret = *(volatile type*)ptr; \
if (*(volatile type*)ptr == oldval) { \
*(volatile type*)ptr = newval; \
} \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
} \
CLANG_DECLARE_ALIAS( __sync_val_compare_and_swap_ ## n )
#define SYNC_LOCK_TEST_AND_SET(n, type) type CLANG_ATOMIC_SUFFIX(__sync_lock_test_and_set_ ## n) (volatile void* ptr, type val) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
type ret = *(volatile type*)ptr; \
*(volatile type*)ptr = val; \
_ATOMIC_EXIT_CRITICAL(state); \
return ret; \
} \
CLANG_DECLARE_ALIAS( __sync_lock_test_and_set_ ## n )
#define SYNC_LOCK_RELEASE(n, type) void CLANG_ATOMIC_SUFFIX(__sync_lock_release_ ## n) (volatile void* ptr) \
{ \
unsigned state = _ATOMIC_ENTER_CRITICAL(); \
*(volatile type*)ptr = 0; \
_ATOMIC_EXIT_CRITICAL(state); \
} \
CLANG_DECLARE_ALIAS( __sync_lock_release_ ## n )
#endif
#if !HAS_ATOMICS_32
@ -303,134 +28,9 @@ _Static_assert(sizeof(unsigned char) == 1, "atomics require a 1-byte type");
_Static_assert(sizeof(short unsigned int) == 2, "atomics require a 2-bytes type");
_Static_assert(sizeof(unsigned int) == 4, "atomics require a 4-bytes type");
ATOMIC_EXCHANGE(1, unsigned char)
ATOMIC_EXCHANGE(2, short unsigned int)
ATOMIC_EXCHANGE(4, unsigned int)
CMP_EXCHANGE(1, unsigned char)
CMP_EXCHANGE(2, short unsigned int)
CMP_EXCHANGE(4, unsigned int)
FETCH_ADD(1, unsigned char)
FETCH_ADD(2, short unsigned int)
FETCH_ADD(4, unsigned int)
ADD_FETCH(1, unsigned char)
ADD_FETCH(2, short unsigned int)
ADD_FETCH(4, unsigned int)
FETCH_SUB(1, unsigned char)
FETCH_SUB(2, short unsigned int)
FETCH_SUB(4, unsigned int)
SUB_FETCH(1, unsigned char)
SUB_FETCH(2, short unsigned int)
SUB_FETCH(4, unsigned int)
FETCH_AND(1, unsigned char)
FETCH_AND(2, short unsigned int)
FETCH_AND(4, unsigned int)
AND_FETCH(1, unsigned char)
AND_FETCH(2, short unsigned int)
AND_FETCH(4, unsigned int)
FETCH_OR(1, unsigned char)
FETCH_OR(2, short unsigned int)
FETCH_OR(4, unsigned int)
OR_FETCH(1, unsigned char)
OR_FETCH(2, short unsigned int)
OR_FETCH(4, unsigned int)
FETCH_XOR(1, unsigned char)
FETCH_XOR(2, short unsigned int)
FETCH_XOR(4, unsigned int)
XOR_FETCH(1, unsigned char)
XOR_FETCH(2, short unsigned int)
XOR_FETCH(4, unsigned int)
FETCH_NAND(1, unsigned char)
FETCH_NAND(2, short unsigned int)
FETCH_NAND(4, unsigned int)
NAND_FETCH(1, unsigned char)
NAND_FETCH(2, short unsigned int)
NAND_FETCH(4, unsigned int)
SYNC_FETCH_OP(add, 1, unsigned char)
SYNC_FETCH_OP(add, 2, short unsigned int)
SYNC_FETCH_OP(add, 4, unsigned int)
SYNC_OP_FETCH(add, 1, unsigned char)
SYNC_OP_FETCH(add, 2, short unsigned int)
SYNC_OP_FETCH(add, 4, unsigned int)
SYNC_FETCH_OP(sub, 1, unsigned char)
SYNC_FETCH_OP(sub, 2, short unsigned int)
SYNC_FETCH_OP(sub, 4, unsigned int)
SYNC_OP_FETCH(sub, 1, unsigned char)
SYNC_OP_FETCH(sub, 2, short unsigned int)
SYNC_OP_FETCH(sub, 4, unsigned int)
SYNC_FETCH_OP( and, 1, unsigned char)
SYNC_FETCH_OP( and, 2, short unsigned int)
SYNC_FETCH_OP( and, 4, unsigned int)
SYNC_OP_FETCH( and, 1, unsigned char)
SYNC_OP_FETCH( and, 2, short unsigned int)
SYNC_OP_FETCH( and, 4, unsigned int)
SYNC_FETCH_OP( or, 1, unsigned char)
SYNC_FETCH_OP( or, 2, short unsigned int)
SYNC_FETCH_OP( or, 4, unsigned int)
SYNC_OP_FETCH( or, 1, unsigned char)
SYNC_OP_FETCH( or, 2, short unsigned int)
SYNC_OP_FETCH( or, 4, unsigned int)
SYNC_FETCH_OP(xor, 1, unsigned char)
SYNC_FETCH_OP(xor, 2, short unsigned int)
SYNC_FETCH_OP(xor, 4, unsigned int)
SYNC_OP_FETCH(xor, 1, unsigned char)
SYNC_OP_FETCH(xor, 2, short unsigned int)
SYNC_OP_FETCH(xor, 4, unsigned int)
SYNC_FETCH_OP(nand, 1, unsigned char)
SYNC_FETCH_OP(nand, 2, short unsigned int)
SYNC_FETCH_OP(nand, 4, unsigned int)
SYNC_OP_FETCH(nand, 1, unsigned char)
SYNC_OP_FETCH(nand, 2, short unsigned int)
SYNC_OP_FETCH(nand, 4, unsigned int)
SYNC_BOOL_CMP_EXCHANGE(1, unsigned char)
SYNC_BOOL_CMP_EXCHANGE(2, short unsigned int)
SYNC_BOOL_CMP_EXCHANGE(4, unsigned int)
SYNC_VAL_CMP_EXCHANGE(1, unsigned char)
SYNC_VAL_CMP_EXCHANGE(2, short unsigned int)
SYNC_VAL_CMP_EXCHANGE(4, unsigned int)
SYNC_LOCK_TEST_AND_SET(1, unsigned char)
SYNC_LOCK_TEST_AND_SET(2, short unsigned int)
SYNC_LOCK_TEST_AND_SET(4, unsigned int)
SYNC_LOCK_RELEASE(1, unsigned char)
SYNC_LOCK_RELEASE(2, short unsigned int)
SYNC_LOCK_RELEASE(4, unsigned int)
// LLVM has not implemented native atomic load/stores for riscv targets without the Atomic extension. LLVM thread: https://reviews.llvm.org/D47553.
// Even though GCC does transform them, these libcalls need to be available for the case where a LLVM based project links against IDF.
ATOMIC_LOAD(1, unsigned char)
ATOMIC_LOAD(2, short unsigned int)
ATOMIC_LOAD(4, unsigned int)
ATOMIC_STORE(1, unsigned char)
ATOMIC_STORE(2, short unsigned int)
ATOMIC_STORE(4, unsigned int)
ATOMIC_FUNCTIONS(1, unsigned char)
ATOMIC_FUNCTIONS(2, short unsigned int)
ATOMIC_FUNCTIONS(4, unsigned int)
#elif __riscv_atomic == 1
@ -450,102 +50,62 @@ CLANG_DECLARE_ALIAS(__atomic_is_lock_free)
#if !HAS_ATOMICS_64
#if CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND
#undef _ATOMIC_HW_STUB_OP_FUNCTION
#undef _ATOMIC_HW_STUB_EXCHANGE
#undef _ATOMIC_HW_STUB_STORE
#undef _ATOMIC_HW_STUB_CMP_EXCHANGE
#undef _ATOMIC_HW_STUB_LOAD
#undef _ATOMIC_HW_STUB_SYNC_BOOL_CMP_EXCHANGE
#undef _ATOMIC_HW_STUB_SYNC_VAL_CMP_EXCHANGE
#undef _ATOMIC_HW_STUB_SYNC_LOCK_TEST_AND_SET
#undef _ATOMIC_HW_STUB_SYNC_LOCK_RELEASE
#define _ATOMIC_HW_STUB_OP_FUNCTION(n, type, name_1, name_2)
#define _ATOMIC_HW_STUB_EXCHANGE(n, type)
#define _ATOMIC_HW_STUB_STORE(n, type)
#define _ATOMIC_HW_STUB_CMP_EXCHANGE(n, type)
#define _ATOMIC_HW_STUB_LOAD(n, type)
#define _ATOMIC_HW_STUB_SYNC_BOOL_CMP_EXCHANGE(n, type)
#define _ATOMIC_HW_STUB_SYNC_VAL_CMP_EXCHANGE(n, type)
#define _ATOMIC_HW_STUB_SYNC_LOCK_TEST_AND_SET(n, type)
#define _ATOMIC_HW_STUB_SYNC_LOCK_RELEASE(n, type)
#endif // CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND
_Static_assert(sizeof(long long unsigned int) == 8, "atomics require a 8-bytes type");
ATOMIC_EXCHANGE(8, long long unsigned int)
CMP_EXCHANGE(8, long long unsigned int)
FETCH_ADD(8, long long unsigned int)
FETCH_SUB(8, long long unsigned int)
FETCH_AND(8, long long unsigned int)
FETCH_OR(8, long long unsigned int)
FETCH_XOR(8, long long unsigned int)
FETCH_NAND(8, long long unsigned int)
ADD_FETCH(8, long long unsigned int)
SUB_FETCH(8, long long unsigned int)
AND_FETCH(8, long long unsigned int)
OR_FETCH(8, long long unsigned int)
XOR_FETCH(8, long long unsigned int)
NAND_FETCH(8, long long unsigned int)
SYNC_FETCH_OP(add, 8, long long unsigned int)
SYNC_FETCH_OP(sub, 8, long long unsigned int)
SYNC_FETCH_OP( and, 8, long long unsigned int)
SYNC_FETCH_OP( or, 8, long long unsigned int)
SYNC_FETCH_OP(xor, 8, long long unsigned int)
SYNC_FETCH_OP(nand, 8, long long unsigned int)
SYNC_OP_FETCH(add, 8, long long unsigned int)
SYNC_OP_FETCH(sub, 8, long long unsigned int)
SYNC_OP_FETCH( and, 8, long long unsigned int)
SYNC_OP_FETCH( or, 8, long long unsigned int)
SYNC_OP_FETCH(xor, 8, long long unsigned int)
SYNC_OP_FETCH(nand, 8, long long unsigned int)
SYNC_BOOL_CMP_EXCHANGE(8, long long unsigned int)
SYNC_VAL_CMP_EXCHANGE(8, long long unsigned int)
SYNC_LOCK_TEST_AND_SET(8, long long unsigned int)
SYNC_LOCK_RELEASE(8, long long unsigned int)
// LLVM has not implemented native atomic load/stores for riscv targets without the Atomic extension. LLVM thread: https://reviews.llvm.org/D47553.
// Even though GCC does transform them, these libcalls need to be available for the case where a LLVM based project links against IDF.
ATOMIC_LOAD(8, long long unsigned int)
ATOMIC_STORE(8, long long unsigned int)
ATOMIC_FUNCTIONS(8, long long unsigned int)
#endif // !HAS_ATOMICS_64
// Clang generates calls to the __atomic_load/__atomic_store functions for object size more then 4 bytes
void CLANG_ATOMIC_SUFFIX(__atomic_load)(size_t size, const volatile void *src, void *dest, int model)
{
unsigned state = _ATOMIC_ENTER_CRITICAL();
_ATOMIC_ENTER_CRITICAL();
memcpy(dest, (const void *)src, size);
_ATOMIC_EXIT_CRITICAL(state);
_ATOMIC_EXIT_CRITICAL();
}
CLANG_DECLARE_ALIAS(__atomic_load)
void CLANG_ATOMIC_SUFFIX(__atomic_store)(size_t size, volatile void *dest, void *src, int model)
{
unsigned state = _ATOMIC_ENTER_CRITICAL();
_ATOMIC_ENTER_CRITICAL();
memcpy((void *)dest, (const void *)src, size);
_ATOMIC_EXIT_CRITICAL(state);
_ATOMIC_EXIT_CRITICAL();
}
CLANG_DECLARE_ALIAS(__atomic_store)
bool CLANG_ATOMIC_SUFFIX(__atomic_compare_exchange)(size_t size, volatile void *ptr, void *expected, void *desired, int success_memorder, int failure_memorder)
{
bool ret = false;
unsigned state = _ATOMIC_ENTER_CRITICAL();
_ATOMIC_ENTER_CRITICAL();
if (!memcmp((void *)ptr, expected, size)) {
memcpy((void *)ptr, (const void *)desired, size);
ret = true;
} else {
memcpy((void *)expected, (const void *)ptr, size);
}
_ATOMIC_EXIT_CRITICAL(state);
_ATOMIC_EXIT_CRITICAL();
return ret;
}
CLANG_DECLARE_ALIAS(__atomic_compare_exchange)

Wyświetl plik

@ -12,219 +12,233 @@
#include <stdio.h>
#include <pthread.h>
#include "esp_pthread.h"
#include "esp_attr.h"
#include "freertos/portmacro.h"
#include "unity.h"
#include "esp_heap_caps.h"
#include "sdkconfig.h"
#define MALLOC_CAP_ATOMIC MALLOC_CAP_DEFAULT
/* non-static to prevent optimization */
atomic_ullong g_atomic64;
atomic_uint g_atomic32;
atomic_ushort g_atomic16;
atomic_uchar g_atomic8;
atomic_ullong *g_atomic64;
atomic_uint *g_atomic32;
atomic_ushort *g_atomic16;
atomic_uchar *g_atomic8;
TEST_CASE("stdatomic - test_64bit_atomics", "[newlib_stdatomic]")
{
unsigned long long x64 = 0;
g_atomic64 = 0; // calls atomic_store
g_atomic64 = heap_caps_calloc(sizeof(*g_atomic64), 1, MALLOC_CAP_DEFAULT);
x64 += atomic_fetch_or(&g_atomic64, 0x1111111111111111ULL);
x64 += atomic_fetch_xor(&g_atomic64, 0x3333333333333333ULL);
x64 += atomic_fetch_and(&g_atomic64, 0xf0f0f0f0f0f0f0f0ULL);
x64 += atomic_fetch_sub(&g_atomic64, 0x0f0f0f0f0f0f0f0fULL);
x64 += atomic_fetch_add(&g_atomic64, 0x2222222222222222ULL);
x64 += atomic_fetch_or(g_atomic64, 0x1111111111111111ULL);
x64 += atomic_fetch_xor(g_atomic64, 0x3333333333333333ULL);
x64 += atomic_fetch_and(g_atomic64, 0xf0f0f0f0f0f0f0f0ULL);
x64 += atomic_fetch_sub(g_atomic64, 0x0f0f0f0f0f0f0f0fULL);
x64 += atomic_fetch_add(g_atomic64, 0x2222222222222222ULL);
#ifndef __clang__
x64 += __atomic_fetch_nand_8(&g_atomic64, 0xAAAAAAAAAAAAAAAAULL, 0);
x64 += __atomic_fetch_nand_8(g_atomic64, 0xAAAAAAAAAAAAAAAAULL, 0);
TEST_ASSERT_EQUAL_HEX64(0x9797979797979797ULL, x64);
TEST_ASSERT_EQUAL_HEX64(0xDDDDDDDDDDDDDDDDULL, g_atomic64); // calls atomic_load
TEST_ASSERT_EQUAL_HEX64(0xDDDDDDDDDDDDDDDDULL, *g_atomic64); // calls atomic_load
#else
TEST_ASSERT_EQUAL_HEX64(0x6464646464646464ULL, x64);
TEST_ASSERT_EQUAL_HEX64(0x3333333333333333ULL, g_atomic64); // calls atomic_load
TEST_ASSERT_EQUAL_HEX64(0x3333333333333333ULL, *g_atomic64); // calls atomic_load
#endif
free(g_atomic64);
}
TEST_CASE("stdatomic - test_32bit_atomics", "[newlib_stdatomic]")
{
unsigned int x32 = 0;
g_atomic32 = 0;
g_atomic32 = heap_caps_calloc(sizeof(*g_atomic32), 1, MALLOC_CAP_DEFAULT);
x32 += atomic_fetch_or(&g_atomic32, 0x11111111U);
x32 += atomic_fetch_xor(&g_atomic32, 0x33333333U);
x32 += atomic_fetch_and(&g_atomic32, 0xf0f0f0f0U);
x32 += atomic_fetch_sub(&g_atomic32, 0x0f0f0f0fU);
x32 += atomic_fetch_add(&g_atomic32, 0x22222222U);
x32 += atomic_fetch_or(g_atomic32, 0x11111111U);
x32 += atomic_fetch_xor(g_atomic32, 0x33333333U);
x32 += atomic_fetch_and(g_atomic32, 0xf0f0f0f0U);
x32 += atomic_fetch_sub(g_atomic32, 0x0f0f0f0fU);
x32 += atomic_fetch_add(g_atomic32, 0x22222222U);
#ifndef __clang__
x32 += __atomic_fetch_nand_4(&g_atomic32, 0xAAAAAAAAU, 0);
x32 += __atomic_fetch_nand_4(g_atomic32, 0xAAAAAAAAU, 0);
TEST_ASSERT_EQUAL_HEX32(0x97979797U, x32);
TEST_ASSERT_EQUAL_HEX32(0xDDDDDDDDU, g_atomic32);
TEST_ASSERT_EQUAL_HEX32(0xDDDDDDDDU, *g_atomic32);
#else
TEST_ASSERT_EQUAL_HEX32(0x64646464U, x32);
TEST_ASSERT_EQUAL_HEX32(0x33333333U, g_atomic32); // calls atomic_load
TEST_ASSERT_EQUAL_HEX32(0x33333333U, *g_atomic32); // calls atomic_load
#endif
free(g_atomic32);
}
TEST_CASE("stdatomic - test_16bit_atomics", "[newlib_stdatomic]")
{
unsigned int x16 = 0;
g_atomic16 = 0;
g_atomic16 = heap_caps_calloc(sizeof(*g_atomic16), 1, MALLOC_CAP_DEFAULT);
x16 += atomic_fetch_or(&g_atomic16, 0x1111);
x16 += atomic_fetch_xor(&g_atomic16, 0x3333);
x16 += atomic_fetch_and(&g_atomic16, 0xf0f0);
x16 += atomic_fetch_sub(&g_atomic16, 0x0f0f);
x16 += atomic_fetch_add(&g_atomic16, 0x2222);
x16 += atomic_fetch_or(g_atomic16, 0x1111);
x16 += atomic_fetch_xor(g_atomic16, 0x3333);
x16 += atomic_fetch_and(g_atomic16, 0xf0f0);
x16 += atomic_fetch_sub(g_atomic16, 0x0f0f);
x16 += atomic_fetch_add(g_atomic16, 0x2222);
#ifndef __clang__
x16 += __atomic_fetch_nand_2(&g_atomic16, 0xAAAA, 0);
x16 += __atomic_fetch_nand_2(g_atomic16, 0xAAAA, 0);
TEST_ASSERT_EQUAL_HEX16(0x9797, x16);
TEST_ASSERT_EQUAL_HEX16(0xDDDD, g_atomic16);
TEST_ASSERT_EQUAL_HEX16(0xDDDD, *g_atomic16);
#else
TEST_ASSERT_EQUAL_HEX16(0x6464, x16);
TEST_ASSERT_EQUAL_HEX16(0x3333, g_atomic16); // calls atomic_load
TEST_ASSERT_EQUAL_HEX16(0x3333, *g_atomic16); // calls atomic_load
#endif
free(g_atomic16);
}
TEST_CASE("stdatomic - test_8bit_atomics", "[newlib_stdatomic]")
{
unsigned int x8 = 0;
g_atomic8 = 0;
g_atomic8 = heap_caps_calloc(sizeof(*g_atomic8), 1, MALLOC_CAP_DEFAULT);
x8 += atomic_fetch_or(&g_atomic8, 0x11);
x8 += atomic_fetch_xor(&g_atomic8, 0x33);
x8 += atomic_fetch_and(&g_atomic8, 0xf0);
x8 += atomic_fetch_sub(&g_atomic8, 0x0f);
x8 += atomic_fetch_add(&g_atomic8, 0x22);
x8 += atomic_fetch_or(g_atomic8, 0x11);
x8 += atomic_fetch_xor(g_atomic8, 0x33);
x8 += atomic_fetch_and(g_atomic8, 0xf0);
x8 += atomic_fetch_sub(g_atomic8, 0x0f);
x8 += atomic_fetch_add(g_atomic8, 0x22);
#ifndef __clang__
x8 += __atomic_fetch_nand_1(&g_atomic8, 0xAA, 0);
x8 += __atomic_fetch_nand_1(g_atomic8, 0xAA, 0);
TEST_ASSERT_EQUAL_HEX8(0x97, x8);
TEST_ASSERT_EQUAL_HEX8(0xDD, g_atomic8);
TEST_ASSERT_EQUAL_HEX8(0xDD, *g_atomic8);
#else
TEST_ASSERT_EQUAL_HEX8(0x64, x8);
TEST_ASSERT_EQUAL_HEX8(0x33, g_atomic8); // calls atomic_load
TEST_ASSERT_EQUAL_HEX8(0x33, *g_atomic8); // calls atomic_load
#endif
free(g_atomic8);
}
#ifndef __clang__
TEST_CASE("stdatomic - test_64bit_atomics", "[newlib_stdatomic]")
{
unsigned long long x64 = 0;
g_atomic64 = 0; // calls atomic_store
g_atomic64 = heap_caps_calloc(sizeof(*g_atomic64), 1, MALLOC_CAP_DEFAULT);
x64 += __atomic_or_fetch_8(&g_atomic64, 0x1111111111111111ULL, 0);
x64 += __atomic_xor_fetch_8(&g_atomic64, 0x3333333333333333ULL, 0);
x64 += __atomic_and_fetch_8(&g_atomic64, 0xf0f0f0f0f0f0f0f0ULL, 0);
x64 += __atomic_sub_fetch_8(&g_atomic64, 0x0f0f0f0f0f0f0f0fULL, 0);
x64 += __atomic_add_fetch_8(&g_atomic64, 0x2222222222222222ULL, 0);
x64 += __atomic_nand_fetch_8(&g_atomic64, 0xAAAAAAAAAAAAAAAAULL, 0);
x64 += __atomic_or_fetch_8(g_atomic64, 0x1111111111111111ULL, 0);
x64 += __atomic_xor_fetch_8(g_atomic64, 0x3333333333333333ULL, 0);
x64 += __atomic_and_fetch_8(g_atomic64, 0xf0f0f0f0f0f0f0f0ULL, 0);
x64 += __atomic_sub_fetch_8(g_atomic64, 0x0f0f0f0f0f0f0f0fULL, 0);
x64 += __atomic_add_fetch_8(g_atomic64, 0x2222222222222222ULL, 0);
x64 += __atomic_nand_fetch_8(g_atomic64, 0xAAAAAAAAAAAAAAAAULL, 0);
TEST_ASSERT_EQUAL_HEX64(0x7575757575757574ULL, x64);
TEST_ASSERT_EQUAL_HEX64(0xDDDDDDDDDDDDDDDDULL, g_atomic64); // calls atomic_load
TEST_ASSERT_EQUAL_HEX64(0xDDDDDDDDDDDDDDDDULL, *g_atomic64); // calls atomic_load
free(g_atomic64);
}
TEST_CASE("stdatomic - test_32bit_atomics", "[newlib_stdatomic]")
{
unsigned int x32 = 0;
g_atomic32 = 0;
g_atomic32 = heap_caps_calloc(sizeof(*g_atomic32), 1, MALLOC_CAP_DEFAULT);
x32 += __atomic_or_fetch_4(&g_atomic32, 0x11111111U, 0);
x32 += __atomic_xor_fetch_4(&g_atomic32, 0x33333333U, 0);
x32 += __atomic_and_fetch_4(&g_atomic32, 0xf0f0f0f0U, 0);
x32 += __atomic_sub_fetch_4(&g_atomic32, 0x0f0f0f0fU, 0);
x32 += __atomic_add_fetch_4(&g_atomic32, 0x22222222U, 0);
x32 += __atomic_nand_fetch_4(&g_atomic32, 0xAAAAAAAAU, 0);
x32 += __atomic_or_fetch_4(g_atomic32, 0x11111111U, 0);
x32 += __atomic_xor_fetch_4(g_atomic32, 0x33333333U, 0);
x32 += __atomic_and_fetch_4(g_atomic32, 0xf0f0f0f0U, 0);
x32 += __atomic_sub_fetch_4(g_atomic32, 0x0f0f0f0fU, 0);
x32 += __atomic_add_fetch_4(g_atomic32, 0x22222222U, 0);
x32 += __atomic_nand_fetch_4(g_atomic32, 0xAAAAAAAAU, 0);
TEST_ASSERT_EQUAL_HEX32(0x75757574U, x32);
TEST_ASSERT_EQUAL_HEX32(0xDDDDDDDDU, g_atomic32);
TEST_ASSERT_EQUAL_HEX32(0xDDDDDDDDU, *g_atomic32);
free(g_atomic32);
}
TEST_CASE("stdatomic - test_16bit_atomics", "[newlib_stdatomic]")
{
unsigned int x16 = 0;
g_atomic16 = 0;
g_atomic16 = heap_caps_calloc(sizeof(*g_atomic16), 1, MALLOC_CAP_DEFAULT);
x16 += __atomic_or_fetch_2(&g_atomic16, 0x1111, 0);
x16 += __atomic_xor_fetch_2(&g_atomic16, 0x3333, 0);
x16 += __atomic_and_fetch_2(&g_atomic16, 0xf0f0, 0);
x16 += __atomic_sub_fetch_2(&g_atomic16, 0x0f0f, 0);
x16 += __atomic_add_fetch_2(&g_atomic16, 0x2222, 0);
x16 += __atomic_nand_fetch_2(&g_atomic16, 0xAAAA, 0);
x16 += __atomic_or_fetch_2(g_atomic16, 0x1111, 0);
x16 += __atomic_xor_fetch_2(g_atomic16, 0x3333, 0);
x16 += __atomic_and_fetch_2(g_atomic16, 0xf0f0, 0);
x16 += __atomic_sub_fetch_2(g_atomic16, 0x0f0f, 0);
x16 += __atomic_add_fetch_2(g_atomic16, 0x2222, 0);
x16 += __atomic_nand_fetch_2(g_atomic16, 0xAAAA, 0);
TEST_ASSERT_EQUAL_HEX16(0x7574, x16);
TEST_ASSERT_EQUAL_HEX16(0xDDDD, g_atomic16);
TEST_ASSERT_EQUAL_HEX16(0xDDDD, *g_atomic16);
free(g_atomic16);
}
TEST_CASE("stdatomic - test_8bit_atomics", "[newlib_stdatomic]")
{
unsigned int x8 = 0;
g_atomic8 = 0;
g_atomic8 = heap_caps_calloc(sizeof(*g_atomic8), 1, MALLOC_CAP_DEFAULT);
x8 += __atomic_or_fetch_1(&g_atomic8, 0x11, 0);
x8 += __atomic_xor_fetch_1(&g_atomic8, 0x33, 0);
x8 += __atomic_and_fetch_1(&g_atomic8, 0xf0, 0);
x8 += __atomic_sub_fetch_1(&g_atomic8, 0x0f, 0);
x8 += __atomic_add_fetch_1(&g_atomic8, 0x22, 0);
x8 += __atomic_nand_fetch_1(&g_atomic8, 0xAA, 0);
x8 += __atomic_or_fetch_1(g_atomic8, 0x11, 0);
x8 += __atomic_xor_fetch_1(g_atomic8, 0x33, 0);
x8 += __atomic_and_fetch_1(g_atomic8, 0xf0, 0);
x8 += __atomic_sub_fetch_1(g_atomic8, 0x0f, 0);
x8 += __atomic_add_fetch_1(g_atomic8, 0x22, 0);
x8 += __atomic_nand_fetch_1(g_atomic8, 0xAA, 0);
TEST_ASSERT_EQUAL_HEX8(0x74, x8);
TEST_ASSERT_EQUAL_HEX8(0xDD, g_atomic8);
TEST_ASSERT_EQUAL_HEX8(0xDD, *g_atomic8);
free(g_atomic8);
}
#endif // #ifndef __clang__
#define TEST_EXCLUSION(n) TEST_CASE("stdatomic - test_" #n "bit_exclusion", "[newlib_stdatomic]") \
{ \
g_atomic ## n = 0; \
pthread_t thread1; \
pthread_t thread2; \
esp_pthread_cfg_t cfg = esp_pthread_get_default_config(); \
cfg.pin_to_core = (xPortGetCoreID() + 1) % CONFIG_FREERTOS_NUMBER_OF_CORES; \
esp_pthread_set_cfg(&cfg); \
pthread_create(&thread1, NULL, exclusion_task_ ## n, (void*) 1); \
cfg.pin_to_core = xPortGetCoreID(); \
esp_pthread_set_cfg(&cfg); \
pthread_create(&thread2, NULL, exclusion_task_ ## n, (void*) 0); \
pthread_join(thread1, NULL); \
pthread_join(thread2, NULL); \
TEST_ASSERT_EQUAL(0, g_atomic ## n); \
#define TEST_EXCLUSION(n, POSTFIX) TEST_CASE("stdatomic - test_" #n #POSTFIX "bit_exclusion", "[newlib_stdatomic]") \
{ \
g_atomic ## n = heap_caps_calloc(sizeof(*g_atomic ## n), 1, MALLOC_CAP_ATOMIC); \
pthread_t thread1; \
pthread_t thread2; \
esp_pthread_cfg_t cfg = esp_pthread_get_default_config(); \
cfg.pin_to_core = (xPortGetCoreID() + 1) % portNUM_PROCESSORS; \
esp_pthread_set_cfg(&cfg); \
pthread_create(&thread1, NULL, exclusion_task_ ##n ##POSTFIX, (void*) 1); \
cfg.pin_to_core = xPortGetCoreID(); \
esp_pthread_set_cfg(&cfg); \
pthread_create(&thread2, NULL, exclusion_task_ ##n ##POSTFIX, (void*) 0); \
pthread_join(thread1, NULL); \
pthread_join(thread2, NULL); \
TEST_ASSERT_EQUAL(0, (*g_atomic ## n)); \
free(g_atomic ## n); \
}
#define TEST_EXCLUSION_TASK(n) static void* exclusion_task_ ## n(void *varg) \
{ \
int arg = (int) varg; \
for (int i = 0; i < 1000000; ++i) { \
if (arg == 0) { \
atomic_fetch_add(&g_atomic ## n, 1ULL); \
} else { \
atomic_fetch_sub(&g_atomic ## n, 1ULL); \
} \
} \
return NULL; \
#define TEST_EXCLUSION_TASK(n, POSTFIX) static void* exclusion_task_ ##n ##POSTFIX(void *varg) \
{ \
int arg = (int) varg; \
for (int i = 0; i < 1000000; ++i) { \
if (arg == 0) { \
atomic_fetch_add(g_atomic ## n, 1ULL); \
} else { \
atomic_fetch_sub(g_atomic ## n, 1ULL); \
} \
} \
return NULL; \
}
TEST_EXCLUSION_TASK(64)
TEST_EXCLUSION(64)
TEST_EXCLUSION_TASK(64, _default_mem)
TEST_EXCLUSION(64, _default_mem)
TEST_EXCLUSION_TASK(32)
TEST_EXCLUSION(32)
TEST_EXCLUSION_TASK(32, _default_mem)
TEST_EXCLUSION(32, _default_mem)
TEST_EXCLUSION_TASK(16)
TEST_EXCLUSION(16)
TEST_EXCLUSION_TASK(16, _default_mem)
TEST_EXCLUSION(16, _default_mem)
TEST_EXCLUSION_TASK(8)
TEST_EXCLUSION(8)
TEST_EXCLUSION_TASK(8, _default_mem)
TEST_EXCLUSION(8, _default_mem)
#define ITER_COUNT 20000
#define TEST_RACE_OPERATION(ASSERT_SUFFIX, NAME, LHSTYPE, PRE, POST, INIT, FINAL) \
\
static _Atomic LHSTYPE var_##NAME; \
static _Atomic LHSTYPE *var_##NAME; \
\
static void *test_thread_##NAME (void *arg) \
{ \
for (int i = 0; i < ITER_COUNT; i++) \
{ \
PRE var_##NAME POST; \
PRE (*var_##NAME) POST; \
} \
return NULL; \
} \
@ -233,9 +247,10 @@ TEST_CASE("stdatomic - test_" #NAME, "[newlib_stdatomic]")
{ \
pthread_t thread_id1; \
pthread_t thread_id2; \
var_##NAME = (INIT); \
var_##NAME = heap_caps_calloc(sizeof(*var_##NAME), 1, MALLOC_CAP_ATOMIC); \
*var_##NAME = (INIT); \
esp_pthread_cfg_t cfg = esp_pthread_get_default_config(); \
cfg.pin_to_core = (xPortGetCoreID() + 1) % CONFIG_FREERTOS_NUMBER_OF_CORES; \
cfg.pin_to_core = (xPortGetCoreID() + 1) % CONFIG_FREERTOS_NUMBER_OF_CORES; \
esp_pthread_set_cfg(&cfg); \
pthread_create (&thread_id1, NULL, test_thread_##NAME, NULL); \
cfg.pin_to_core = xPortGetCoreID(); \
@ -243,7 +258,8 @@ TEST_CASE("stdatomic - test_" #NAME, "[newlib_stdatomic]")
pthread_create (&thread_id2, NULL, test_thread_##NAME, NULL); \
pthread_join (thread_id1, NULL); \
pthread_join (thread_id2, NULL); \
TEST_ASSERT_EQUAL##ASSERT_SUFFIX((FINAL), var_##NAME); \
TEST_ASSERT_EQUAL##ASSERT_SUFFIX((FINAL), (*var_##NAME)); \
free(var_##NAME); \
}
// Note that the assert at the end is doing an excat bitwise comparison.
@ -251,13 +267,13 @@ TEST_CASE("stdatomic - test_" #NAME, "[newlib_stdatomic]")
// no corresponding Unity assert macro for long double. USE THIS WITH CARE!
#define TEST_RACE_OPERATION_LONG_DOUBLE(NAME, LHSTYPE, PRE, POST, INIT, FINAL) \
\
static _Atomic LHSTYPE var_##NAME; \
static _Atomic LHSTYPE *var_##NAME; \
\
static void *test_thread_##NAME (void *arg) \
{ \
for (int i = 0; i < ITER_COUNT; i++) \
{ \
PRE var_##NAME POST; \
PRE (*var_##NAME) POST; \
} \
return NULL; \
} \
@ -266,10 +282,11 @@ TEST_CASE("stdatomic - test_" #NAME, "[newlib_stdatomic]") \
{ \
pthread_t thread_id1; \
pthread_t thread_id2; \
var_##NAME = (INIT); \
var_##NAME = heap_caps_calloc(sizeof(*var_##NAME), 1, MALLOC_CAP_ATOMIC); \
*var_##NAME = (INIT); \
const LHSTYPE EXPECTED = (FINAL); \
esp_pthread_cfg_t cfg = esp_pthread_get_default_config(); \
cfg.pin_to_core = (xPortGetCoreID() + 1) % CONFIG_FREERTOS_NUMBER_OF_CORES; \
cfg.pin_to_core = (xPortGetCoreID() + 1) % CONFIG_FREERTOS_NUMBER_OF_CORES; \
esp_pthread_set_cfg(&cfg); \
pthread_create (&thread_id1, NULL, test_thread_##NAME, NULL); \
cfg.pin_to_core = xPortGetCoreID(); \
@ -277,7 +294,8 @@ TEST_CASE("stdatomic - test_" #NAME, "[newlib_stdatomic]") \
pthread_create (&thread_id2, NULL, test_thread_##NAME, NULL); \
pthread_join (thread_id1, NULL); \
pthread_join (thread_id2, NULL); \
TEST_ASSERT(EXPECTED == var_##NAME); \
TEST_ASSERT(EXPECTED == (*var_##NAME)); \
free(var_##NAME); \
}
TEST_RACE_OPERATION(, uint8_add, uint8_t,, += 1, 0, (uint8_t)(2 * ITER_COUNT))
@ -352,3 +370,93 @@ TEST_RACE_OPERATION_LONG_DOUBLE(long_double_preinc, long double, ++,, 0, (2 * IT
TEST_RACE_OPERATION_LONG_DOUBLE(complex_long_double_sub, _Complex long double,, -= 1, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE(long_double_postdec, long double,, --, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE(long_double_predec, long double, --,, 0, -(2 * ITER_COUNT))
#if CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND
#undef MALLOC_CAP_ATOMIC
#define MALLOC_CAP_ATOMIC MALLOC_CAP_SPIRAM
TEST_EXCLUSION_TASK(64, _ext_mem)
TEST_EXCLUSION(64, _ext_mem)
TEST_EXCLUSION_TASK(32, _ext_mem)
TEST_EXCLUSION(32, _ext_mem)
TEST_EXCLUSION_TASK(16, _ext_mem)
TEST_EXCLUSION(16, _ext_mem)
TEST_EXCLUSION_TASK(8, _ext_mem)
TEST_EXCLUSION(8, _ext_mem)
TEST_RACE_OPERATION(, uint8_add_ext, uint8_t,, += 1, 0, (uint8_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint8_add_3_ext, uint8_t,, += 3, 0, (uint8_t)(6 * ITER_COUNT))
TEST_RACE_OPERATION(, uint8_postinc_ext, uint8_t,, ++, 0, (uint8_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint8_preinc_ext, uint8_t, ++,, 0, (uint8_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint8_sub_ext, uint8_t,, -= 1, 0, (uint8_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint8_sub_3_ext, uint8_t,, -= 3, 0, (uint8_t) - (6 * ITER_COUNT))
TEST_RACE_OPERATION(, uint8_postdec_ext, uint8_t,, --, 0, (uint8_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint8_predec_ext, uint8_t, --,, 0, (uint8_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint8_mul_ext, uint8_t,, *= 3, 1, (uint8_t) 0x1)
TEST_RACE_OPERATION(, uint16_add_ext, uint16_t,, += 1, 0, (uint16_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint16_add_3_ext, uint16_t,, += 3, 0, (uint16_t)(6 * ITER_COUNT))
TEST_RACE_OPERATION(, uint16_postinc_ext, uint16_t,, ++, 0, (uint16_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint16_preinc_ext, uint16_t, ++,, 0, (uint16_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint16_sub_ext, uint16_t,, -= 1, 0, (uint16_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint16_sub_3_ext, uint16_t,, -= 3, 0, (uint16_t) - (6 * ITER_COUNT))
TEST_RACE_OPERATION(, uint16_postdec_ext, uint16_t,, --, 0, (uint16_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint16_predec_ext, uint16_t, --,, 0, (uint16_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint16_mul_ext, uint16_t,, *= 3, 1, (uint16_t) 0x6D01)
TEST_RACE_OPERATION(, uint32_add_ext, uint32_t,, += 1, 0, (uint32_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint32_add_3_ext, uint32_t,, += 3, 0, (uint32_t)(6 * ITER_COUNT))
TEST_RACE_OPERATION(, uint32_postinc_ext, uint32_t,, ++, 0, (uint32_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint32_preinc_ext, uint32_t, ++,, 0, (uint32_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint32_sub_ext, uint32_t,, -= 1, 0, (uint32_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint32_sub_3_ext, uint32_t,, -= 3, 0, (uint32_t) - (6 * ITER_COUNT))
TEST_RACE_OPERATION(, uint32_postdec_ext, uint32_t,, --, 0, (uint32_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint32_predec_ext, uint32_t, --,, 0, (uint32_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint32_mul_ext, uint32_t,, *= 3, 1, (uint32_t) 0xC1E36D01U)
TEST_RACE_OPERATION(, uint64_add_ext, uint64_t,, += 1, 0, (uint64_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint64_add_3_ext, uint64_t,, += 3, 0, (uint64_t)(6 * ITER_COUNT))
TEST_RACE_OPERATION(, uint64_add_neg_ext, uint64_t,, += 1, -10000, (uint64_t)(2 * ITER_COUNT - 10000))
TEST_RACE_OPERATION(, uint64_postinc_ext, uint64_t,, ++, 0, (uint64_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint64_postinc_neg_ext, uint64_t,, ++, -10000, (uint64_t)(2 * ITER_COUNT - 10000))
TEST_RACE_OPERATION(, uint64_preinc_ext, uint64_t, ++,, 0, (uint64_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint64_preinc_neg_ext, uint64_t, ++,, -10000, (uint64_t)(2 * ITER_COUNT - 10000))
TEST_RACE_OPERATION(, uint64_sub_ext, uint64_t,, -= 1, 0, (uint64_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint64_sub_3_ext, uint64_t,, -= 3, 0, (uint64_t) - (6 * ITER_COUNT))
TEST_RACE_OPERATION(, uint64_sub_neg_ext, uint64_t,, -= 1, 10000, (uint64_t)((-2 * ITER_COUNT) + 10000))
TEST_RACE_OPERATION(, uint64_postdec_ext, uint64_t,, --, 0, (uint64_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint64_postdec_neg_ext, uint64_t,, --, 10000, (uint64_t)((-2 * ITER_COUNT) + 10000))
TEST_RACE_OPERATION(, uint64_predec_ext, uint64_t, --,, 0, (uint64_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION(, uint64_predec_neg_ext, uint64_t, --,, 10000, (uint64_t)((-2 * ITER_COUNT) + 10000))
TEST_RACE_OPERATION(, uint64_mul_ext, uint64_t,, *= 3, 1, (uint64_t) 0x988EE974C1E36D01ULL)
TEST_RACE_OPERATION(_FLOAT, float_add_ext, float,, += 1, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION(_FLOAT, complex_float_add_ext, _Complex float,, += 1, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION(_FLOAT, float_postinc_ext, float,, ++, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION(_FLOAT, float_preinc_ext, float, ++,, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION(_FLOAT, float_sub_ext, float,, -= 1, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION(_FLOAT, complex_float_sub_ext, _Complex float,, -= 1, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION(_FLOAT, float_postdec_ext, float,, --, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION(_FLOAT, float_predec_ext, float, --,, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION(_DOUBLE, double_add_ext, double,, += 1, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION(_DOUBLE, complex_double_add_ext, _Complex double,, += 1, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION(_DOUBLE, double_postinc_ext, double,, ++, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION(_DOUBLE, double_preinc_ext, double, ++,, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION(_DOUBLE, double_sub_ext, double,, -= 1, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION(_DOUBLE, complex_double_sub_ext, _Complex double,, -= 1, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION(_DOUBLE, double_postdec_ext, double,, --, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION(_DOUBLE, double_predec_ext, double, --,, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE(long_double_add_ext, long double,, += 1, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE(complex_long_double_add_ext, _Complex long double,, += 1, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE(long_double_postinc_ext, long double,, ++, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE(long_double_sub_ext, long double,, -= 1, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE(long_double_preinc_ext, long double, ++,, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE(complex_long_double_sub_ext, _Complex long double,, -= 1, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE(long_double_postdec_ext, long double,, --, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE(long_double_predec_ext, long double, --,, 0, -(2 * ITER_COUNT))
#endif // CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND