refactor(core): reformat newlib and pthread with astyle

pull/13338/head
Marius Vikhammer 2024-02-27 10:00:06 +08:00
rodzic b39f13d685
commit f2fe408b99
39 zmienionych plików z 442 dodań i 460 usunięć

Wyświetl plik

@ -13,8 +13,8 @@
void __attribute__((noreturn)) abort(void) void __attribute__((noreturn)) abort(void)
{ {
#define ERR_STR1 "abort() was called at PC 0x" #define ERR_STR1 "abort() was called at PC 0x"
#define ERR_STR2 " on core " #define ERR_STR2 " on core "
_Static_assert(UINTPTR_MAX == 0xffffffff, "abort() assumes 32-bit addresses"); _Static_assert(UINTPTR_MAX == 0xffffffff, "abort() assumes 32-bit addresses");
_Static_assert(SOC_CPU_CORES_NUM < 10, "abort() assumes number of cores is 1 to 9"); _Static_assert(SOC_CPU_CORES_NUM < 10, "abort() assumes number of cores is 1 to 9");

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2021-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -51,18 +51,18 @@ void __attribute__((noreturn)) __assert_func(const char *file, int line, const c
if (!spi_flash_cache_enabled()) if (!spi_flash_cache_enabled())
#endif #endif
{ {
if (esp_ptr_in_drom(file)) { if (esp_ptr_in_drom(file)) {
file = CACHE_DISABLED_STR; file = CACHE_DISABLED_STR;
} }
if (esp_ptr_in_drom(func)) { if (esp_ptr_in_drom(func)) {
ra_to_str(addr); ra_to_str(addr);
func = addr; func = addr;
} }
if (esp_ptr_in_drom(expr)) { if (esp_ptr_in_drom(expr)) {
expr = CACHE_DISABLED_STR; expr = CACHE_DISABLED_STR;
} }
} }
const char *str[] = {ASSERT_STR, func ? func : "\b", " ", file, ":", lbuf, " (", expr, ")"}; const char *str[] = {ASSERT_STR, func ? func : "\b", " ", file, ":", lbuf, " (", expr, ")"};

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -11,14 +11,13 @@
#include <malloc.h> #include <malloc.h>
#include "esp_heap_caps.h" #include "esp_heap_caps.h"
/* /*
These contain the business logic for the malloc() and realloc() implementation. Because of heap tracing These contain the business logic for the malloc() and realloc() implementation. Because of heap tracing
wrapping reasons, we do not want these to be a public api, however, so they're not defined publicly. wrapping reasons, we do not want these to be a public api, however, so they're not defined publicly.
*/ */
extern void *heap_caps_malloc_default( size_t size ); extern void *heap_caps_malloc_default(size_t size);
extern void *heap_caps_realloc_default( void *ptr, size_t size ); extern void *heap_caps_realloc_default(void *ptr, size_t size);
extern void *heap_caps_aligned_alloc_default( size_t alignment, size_t size ); extern void *heap_caps_aligned_alloc_default(size_t alignment, size_t size);
void* malloc(size_t size) void* malloc(size_t size)
{ {
@ -52,7 +51,7 @@ void _free_r(struct _reent *r, void* ptr)
void* _realloc_r(struct _reent *r, void* ptr, size_t size) void* _realloc_r(struct _reent *r, void* ptr, size_t size)
{ {
return heap_caps_realloc_default( ptr, size ); return heap_caps_realloc_default(ptr, size);
} }
void* _calloc_r(struct _reent *r, size_t nmemb, size_t size) void* _calloc_r(struct _reent *r, size_t nmemb, size_t size)

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -38,15 +38,14 @@ static portMUX_TYPE lock_init_spinlock = portMUX_INITIALIZER_UNLOCKED;
Called by _lock_init*, also called by _lock_acquire* to lazily initialize locks that might have Called by _lock_init*, also called by _lock_acquire* to lazily initialize locks that might have
been initialised (to zero only) before the RTOS scheduler started. been initialised (to zero only) before the RTOS scheduler started.
*/ */
static void IRAM_ATTR lock_init_generic(_lock_t *lock, uint8_t mutex_type) { static void IRAM_ATTR lock_init_generic(_lock_t *lock, uint8_t mutex_type)
{
portENTER_CRITICAL(&lock_init_spinlock); portENTER_CRITICAL(&lock_init_spinlock);
if (*lock) { if (*lock) {
/* Lock already initialised (either we didn't check earlier, /* Lock already initialised (either we didn't check earlier,
or it got initialised while we were waiting for the or it got initialised while we were waiting for the
spinlock.) */ spinlock.) */
} } else {
else
{
/* Create a new semaphore /* Create a new semaphore
this is a bit of an API violation, as we're calling the this is a bit of an API violation, as we're calling the
@ -70,12 +69,14 @@ static void IRAM_ATTR lock_init_generic(_lock_t *lock, uint8_t mutex_type) {
portEXIT_CRITICAL(&lock_init_spinlock); portEXIT_CRITICAL(&lock_init_spinlock);
} }
void IRAM_ATTR _lock_init(_lock_t *lock) { void IRAM_ATTR _lock_init(_lock_t *lock)
{
*lock = 0; // In case lock's memory is uninitialized *lock = 0; // In case lock's memory is uninitialized
lock_init_generic(lock, queueQUEUE_TYPE_MUTEX); lock_init_generic(lock, queueQUEUE_TYPE_MUTEX);
} }
void IRAM_ATTR _lock_init_recursive(_lock_t *lock) { void IRAM_ATTR _lock_init_recursive(_lock_t *lock)
{
*lock = 0; // In case lock's memory is uninitialized *lock = 0; // In case lock's memory is uninitialized
lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX); lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
} }
@ -90,7 +91,8 @@ void IRAM_ATTR _lock_init_recursive(_lock_t *lock) {
re-initialised if it is used again. Caller has to avoid doing re-initialised if it is used again. Caller has to avoid doing
this! this!
*/ */
void IRAM_ATTR _lock_close(_lock_t *lock) { void IRAM_ATTR _lock_close(_lock_t *lock)
{
portENTER_CRITICAL(&lock_init_spinlock); portENTER_CRITICAL(&lock_init_spinlock);
if (*lock) { if (*lock) {
SemaphoreHandle_t h = (SemaphoreHandle_t)(*lock); SemaphoreHandle_t h = (SemaphoreHandle_t)(*lock);
@ -108,7 +110,8 @@ void _lock_close_recursive(_lock_t *lock) __attribute__((alias("_lock_close")));
/* Acquire the mutex semaphore for lock. wait up to delay ticks. /* Acquire the mutex semaphore for lock. wait up to delay ticks.
mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX
*/ */
static int IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t mutex_type) { static int IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t mutex_type)
{
SemaphoreHandle_t h = (SemaphoreHandle_t)(*lock); SemaphoreHandle_t h = (SemaphoreHandle_t)(*lock);
if (!h) { if (!h) {
if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) { if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
@ -137,8 +140,7 @@ static int IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t
if (higher_task_woken) { if (higher_task_woken) {
portYIELD_FROM_ISR(); portYIELD_FROM_ISR();
} }
} } else {
else {
/* In task context */ /* In task context */
if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) { if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) {
success = xSemaphoreTakeRecursive(h, delay); success = xSemaphoreTakeRecursive(h, delay);
@ -150,26 +152,31 @@ static int IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t
return (success == pdTRUE) ? 0 : -1; return (success == pdTRUE) ? 0 : -1;
} }
void IRAM_ATTR _lock_acquire(_lock_t *lock) { void IRAM_ATTR _lock_acquire(_lock_t *lock)
{
lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_MUTEX); lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_MUTEX);
} }
void IRAM_ATTR _lock_acquire_recursive(_lock_t *lock) { void IRAM_ATTR _lock_acquire_recursive(_lock_t *lock)
{
lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_RECURSIVE_MUTEX); lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_RECURSIVE_MUTEX);
} }
int IRAM_ATTR _lock_try_acquire(_lock_t *lock) { int IRAM_ATTR _lock_try_acquire(_lock_t *lock)
{
return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_MUTEX); return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_MUTEX);
} }
int IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock) { int IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock)
{
return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_RECURSIVE_MUTEX); return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_RECURSIVE_MUTEX);
} }
/* Release the mutex semaphore for lock. /* Release the mutex semaphore for lock.
mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX
*/ */
static void IRAM_ATTR lock_release_generic(_lock_t *lock, uint8_t mutex_type) { static void IRAM_ATTR lock_release_generic(_lock_t *lock, uint8_t mutex_type)
{
if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) { if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
return; /* locking is a no-op before scheduler is up */ return; /* locking is a no-op before scheduler is up */
} }
@ -194,11 +201,13 @@ static void IRAM_ATTR lock_release_generic(_lock_t *lock, uint8_t mutex_type) {
} }
} }
void IRAM_ATTR _lock_release(_lock_t *lock) { void IRAM_ATTR _lock_release(_lock_t *lock)
{
lock_release_generic(lock, queueQUEUE_TYPE_MUTEX); lock_release_generic(lock, queueQUEUE_TYPE_MUTEX);
} }
void IRAM_ATTR _lock_release_recursive(_lock_t *lock) { void IRAM_ATTR _lock_release_recursive(_lock_t *lock)
{
lock_release_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX); lock_release_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
} }
@ -242,7 +251,6 @@ _Static_assert(configSUPPORT_STATIC_ALLOCATION,
static StaticSemaphore_t s_common_mutex; static StaticSemaphore_t s_common_mutex;
static StaticSemaphore_t s_common_recursive_mutex; static StaticSemaphore_t s_common_recursive_mutex;
#if ESP_ROM_HAS_RETARGETABLE_LOCKING #if ESP_ROM_HAS_RETARGETABLE_LOCKING
/* C3 and S3 ROMs are built without Newlib static lock symbols exported, and /* C3 and S3 ROMs are built without Newlib static lock symbols exported, and
* with an extra level of _LOCK_T indirection in mind. * with an extra level of _LOCK_T indirection in mind.
@ -271,7 +279,6 @@ static StaticSemaphore_t s_common_recursive_mutex;
#define MAYBE_OVERRIDE_LOCK(_lock, _lock_to_use_instead) #define MAYBE_OVERRIDE_LOCK(_lock, _lock_to_use_instead)
#endif // ROM_NEEDS_MUTEX_OVERRIDE #endif // ROM_NEEDS_MUTEX_OVERRIDE
void IRAM_ATTR __retarget_lock_init(_LOCK_T *lock) void IRAM_ATTR __retarget_lock_init(_LOCK_T *lock)
{ {
*lock = NULL; /* In case lock's memory is uninitialized */ *lock = NULL; /* In case lock's memory is uninitialized */

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -43,23 +43,22 @@
#endif #endif
extern int _printf_float(struct _reent *rptr, extern int _printf_float(struct _reent *rptr,
void *pdata, void *pdata,
FILE * fp, FILE * fp,
int (*pfunc) (struct _reent *, FILE *, const char *, size_t len), int (*pfunc)(struct _reent *, FILE *, const char *, size_t len),
va_list * ap); va_list * ap);
extern int _scanf_float(struct _reent *rptr, extern int _scanf_float(struct _reent *rptr,
void *pdata, void *pdata,
FILE *fp, FILE *fp,
va_list *ap); va_list *ap);
static void raise_r_stub(struct _reent *rptr) static void raise_r_stub(struct _reent *rptr)
{ {
_raise_r(rptr, 0); _raise_r(rptr, 0);
} }
static void esp_cleanup_r (struct _reent *rptr) static void esp_cleanup_r(struct _reent *rptr)
{ {
if (_REENT_STDIN(rptr) != _REENT_STDIN(_GLOBAL_REENT)) { if (_REENT_STDIN(rptr) != _REENT_STDIN(_GLOBAL_REENT)) {
_fclose_r(rptr, _REENT_STDIN(rptr)); _fclose_r(rptr, _REENT_STDIN(rptr));
@ -69,7 +68,7 @@ static void esp_cleanup_r (struct _reent *rptr)
_fclose_r(rptr, _REENT_STDOUT(rptr)); _fclose_r(rptr, _REENT_STDOUT(rptr));
} }
if (_REENT_STDERR(rptr) !=_REENT_STDERR(_GLOBAL_REENT)) { if (_REENT_STDERR(rptr) != _REENT_STDERR(_GLOBAL_REENT)) {
_fclose_r(rptr, _REENT_STDERR(rptr)); _fclose_r(rptr, _REENT_STDERR(rptr));
} }
} }
@ -96,9 +95,9 @@ static struct syscall_stub_table s_stub_table = {
._exit_r = NULL, // never called in ROM ._exit_r = NULL, // never called in ROM
._close_r = &_close_r, ._close_r = &_close_r,
._open_r = &_open_r, ._open_r = &_open_r,
._write_r = (int (*)(struct _reent *r, int, const void *, int)) &_write_r, ._write_r = (int (*)(struct _reent * r, int, const void *, int)) &_write_r,
._lseek_r = (int (*)(struct _reent *r, int, int, int)) &_lseek_r, ._lseek_r = (int (*)(struct _reent * r, int, int, int)) &_lseek_r,
._read_r = (int (*)(struct _reent *r, int, void *, int)) &_read_r, ._read_r = (int (*)(struct _reent * r, int, void *, int)) &_read_r,
#if ESP_ROM_HAS_RETARGETABLE_LOCKING #if ESP_ROM_HAS_RETARGETABLE_LOCKING
._retarget_lock_init = &__retarget_lock_init, ._retarget_lock_init = &__retarget_lock_init,
._retarget_lock_init_recursive = &__retarget_lock_init_recursive, ._retarget_lock_init_recursive = &__retarget_lock_init_recursive,
@ -196,8 +195,7 @@ void esp_setup_newlib_syscalls(void) __attribute__((alias("esp_newlib_init")));
*/ */
void esp_newlib_init_global_stdio(const char *stdio_dev) void esp_newlib_init_global_stdio(const char *stdio_dev)
{ {
if (stdio_dev == NULL) if (stdio_dev == NULL) {
{
_GLOBAL_REENT->__cleanup = NULL; _GLOBAL_REENT->__cleanup = NULL;
_REENT_SDIDINIT(_GLOBAL_REENT) = 0; _REENT_SDIDINIT(_GLOBAL_REENT) = 0;
__sinit(_GLOBAL_REENT); __sinit(_GLOBAL_REENT);
@ -216,7 +214,7 @@ void esp_newlib_init_global_stdio(const char *stdio_dev)
file pointers. Thus, the ROM newlib code will never call the ROM version of __swsetup_r(). file pointers. Thus, the ROM newlib code will never call the ROM version of __swsetup_r().
- See IDFGH-7728 for more details - See IDFGH-7728 for more details
*/ */
extern int __swsetup_r (struct _reent *, FILE *); extern int __swsetup_r(struct _reent *, FILE *);
__swsetup_r(_GLOBAL_REENT, _REENT_STDIN(_GLOBAL_REENT)); __swsetup_r(_GLOBAL_REENT, _REENT_STDIN(_GLOBAL_REENT));
__swsetup_r(_GLOBAL_REENT, _REENT_STDOUT(_GLOBAL_REENT)); __swsetup_r(_GLOBAL_REENT, _REENT_STDOUT(_GLOBAL_REENT));
__swsetup_r(_GLOBAL_REENT, _REENT_STDERR(_GLOBAL_REENT)); __swsetup_r(_GLOBAL_REENT, _REENT_STDERR(_GLOBAL_REENT));

Wyświetl plik

@ -4,7 +4,6 @@
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
/* This header file wraps newlib's own unmodified assert.h and adds /* This header file wraps newlib's own unmodified assert.h and adds
support for silent assertion failure. support for silent assertion failure.
*/ */

Wyświetl plik

@ -7,7 +7,7 @@
*/ */
/*- /*-
* SPDX-FileCopyrightText: 2018-2021 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2018-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020 Francesco Giancane <francesco.giancane@accenture.com> * SPDX-FileCopyrightText: 2020 Francesco Giancane <francesco.giancane@accenture.com>
* SPDX-FileCopyrightText: 2002 Thomas Moestl <tmm@FreeBSD.org> * SPDX-FileCopyrightText: 2002 Thomas Moestl <tmm@FreeBSD.org>
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND Apache-2.0 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND Apache-2.0
@ -57,42 +57,42 @@ extern "C" {
/* /*
* General byte order swapping functions. * General byte order swapping functions.
*/ */
#define bswap16(x) __bswap16(x) #define bswap16(x) __bswap16(x)
#define bswap32(x) __bswap32(x) #define bswap32(x) __bswap32(x)
#define bswap64(x) __bswap64(x) #define bswap64(x) __bswap64(x)
/* /*
* Host to big endian, host to little endian, big endian to host, and little * Host to big endian, host to little endian, big endian to host, and little
* endian to host byte order functions as detailed in byteorder(9). * endian to host byte order functions as detailed in byteorder(9).
*/ */
#if _BYTE_ORDER == _LITTLE_ENDIAN #if _BYTE_ORDER == _LITTLE_ENDIAN
#define htobe16(x) bswap16((x)) #define htobe16(x) bswap16((x))
#define htobe32(x) bswap32((x)) #define htobe32(x) bswap32((x))
#define htobe64(x) bswap64((x)) #define htobe64(x) bswap64((x))
#define htole16(x) ((uint16_t)(x)) #define htole16(x) ((uint16_t)(x))
#define htole32(x) ((uint32_t)(x)) #define htole32(x) ((uint32_t)(x))
#define htole64(x) ((uint64_t)(x)) #define htole64(x) ((uint64_t)(x))
#define be16toh(x) bswap16((x)) #define be16toh(x) bswap16((x))
#define be32toh(x) bswap32((x)) #define be32toh(x) bswap32((x))
#define be64toh(x) bswap64((x)) #define be64toh(x) bswap64((x))
#define le16toh(x) ((uint16_t)(x)) #define le16toh(x) ((uint16_t)(x))
#define le32toh(x) ((uint32_t)(x)) #define le32toh(x) ((uint32_t)(x))
#define le64toh(x) ((uint64_t)(x)) #define le64toh(x) ((uint64_t)(x))
#else /* _BYTE_ORDER != _LITTLE_ENDIAN */ #else /* _BYTE_ORDER != _LITTLE_ENDIAN */
#define htobe16(x) ((uint16_t)(x)) #define htobe16(x) ((uint16_t)(x))
#define htobe32(x) ((uint32_t)(x)) #define htobe32(x) ((uint32_t)(x))
#define htobe64(x) ((uint64_t)(x)) #define htobe64(x) ((uint64_t)(x))
#define htole16(x) bswap16((x)) #define htole16(x) bswap16((x))
#define htole32(x) bswap32((x)) #define htole32(x) bswap32((x))
#define htole64(x) bswap64((x)) #define htole64(x) bswap64((x))
#define be16toh(x) ((uint16_t)(x)) #define be16toh(x) ((uint16_t)(x))
#define be32toh(x) ((uint32_t)(x)) #define be32toh(x) ((uint32_t)(x))
#define be64toh(x) ((uint64_t)(x)) #define be64toh(x) ((uint64_t)(x))
#define le16toh(x) bswap16((x)) #define le16toh(x) bswap16((x))
#define le32toh(x) bswap32((x)) #define le32toh(x) bswap32((x))
#define le64toh(x) bswap64((x)) #define le64toh(x) bswap64((x))
#endif /* _BYTE_ORDER == _LITTLE_ENDIAN */ #endif /* _BYTE_ORDER == _LITTLE_ENDIAN */
/* Alignment-agnostic encode/decode bytestream to/from little/big endian. */ /* Alignment-agnostic encode/decode bytestream to/from little/big endian. */
@ -100,107 +100,107 @@ extern "C" {
static __inline uint16_t static __inline uint16_t
be16dec(const void *pp) be16dec(const void *pp)
{ {
uint8_t const *p = (uint8_t const *)pp; uint8_t const *p = (uint8_t const *)pp;
return ((p[0] << 8) | p[1]); return ((p[0] << 8) | p[1]);
} }
static __inline uint32_t static __inline uint32_t
be32dec(const void *pp) be32dec(const void *pp)
{ {
uint8_t const *p = (uint8_t const *)pp; uint8_t const *p = (uint8_t const *)pp;
return (((unsigned)p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]); return (((unsigned)p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]);
} }
static __inline uint64_t static __inline uint64_t
be64dec(const void *pp) be64dec(const void *pp)
{ {
uint8_t const *p = (uint8_t const *)pp; uint8_t const *p = (uint8_t const *)pp;
return (((uint64_t)be32dec(p) << 32) | be32dec(p + 4)); return (((uint64_t)be32dec(p) << 32) | be32dec(p + 4));
} }
static __inline uint16_t static __inline uint16_t
le16dec(const void *pp) le16dec(const void *pp)
{ {
uint8_t const *p = (uint8_t const *)pp; uint8_t const *p = (uint8_t const *)pp;
return ((p[1] << 8) | p[0]); return ((p[1] << 8) | p[0]);
} }
static __inline uint32_t static __inline uint32_t
le32dec(const void *pp) le32dec(const void *pp)
{ {
uint8_t const *p = (uint8_t const *)pp; uint8_t const *p = (uint8_t const *)pp;
return (((unsigned)p[3] << 24) | (p[2] << 16) | (p[1] << 8) | p[0]); return (((unsigned)p[3] << 24) | (p[2] << 16) | (p[1] << 8) | p[0]);
} }
static __inline uint64_t static __inline uint64_t
le64dec(const void *pp) le64dec(const void *pp)
{ {
uint8_t const *p = (uint8_t const *)pp; uint8_t const *p = (uint8_t const *)pp;
return (((uint64_t)le32dec(p + 4) << 32) | le32dec(p)); return (((uint64_t)le32dec(p + 4) << 32) | le32dec(p));
} }
static __inline void static __inline void
be16enc(void *pp, uint16_t u) be16enc(void *pp, uint16_t u)
{ {
uint8_t *p = (uint8_t *)pp; uint8_t *p = (uint8_t *)pp;
p[0] = (u >> 8) & 0xff; p[0] = (u >> 8) & 0xff;
p[1] = u & 0xff; p[1] = u & 0xff;
} }
static __inline void static __inline void
be32enc(void *pp, uint32_t u) be32enc(void *pp, uint32_t u)
{ {
uint8_t *p = (uint8_t *)pp; uint8_t *p = (uint8_t *)pp;
p[0] = (u >> 24) & 0xff; p[0] = (u >> 24) & 0xff;
p[1] = (u >> 16) & 0xff; p[1] = (u >> 16) & 0xff;
p[2] = (u >> 8) & 0xff; p[2] = (u >> 8) & 0xff;
p[3] = u & 0xff; p[3] = u & 0xff;
} }
static __inline void static __inline void
be64enc(void *pp, uint64_t u) be64enc(void *pp, uint64_t u)
{ {
uint8_t *p = (uint8_t *)pp; uint8_t *p = (uint8_t *)pp;
be32enc(p, (uint32_t)(u >> 32)); be32enc(p, (uint32_t)(u >> 32));
be32enc(p + 4, (uint32_t)(u & 0xffffffffU)); be32enc(p + 4, (uint32_t)(u & 0xffffffffU));
} }
static __inline void static __inline void
le16enc(void *pp, uint16_t u) le16enc(void *pp, uint16_t u)
{ {
uint8_t *p = (uint8_t *)pp; uint8_t *p = (uint8_t *)pp;
p[0] = u & 0xff; p[0] = u & 0xff;
p[1] = (u >> 8) & 0xff; p[1] = (u >> 8) & 0xff;
} }
static __inline void static __inline void
le32enc(void *pp, uint32_t u) le32enc(void *pp, uint32_t u)
{ {
uint8_t *p = (uint8_t *)pp; uint8_t *p = (uint8_t *)pp;
p[0] = u & 0xff; p[0] = u & 0xff;
p[1] = (u >> 8) & 0xff; p[1] = (u >> 8) & 0xff;
p[2] = (u >> 16) & 0xff; p[2] = (u >> 16) & 0xff;
p[3] = (u >> 24) & 0xff; p[3] = (u >> 24) & 0xff;
} }
static __inline void static __inline void
le64enc(void *pp, uint64_t u) le64enc(void *pp, uint64_t u)
{ {
uint8_t *p = (uint8_t *)pp; uint8_t *p = (uint8_t *)pp;
le32enc(p, (uint32_t)(u & 0xffffffffU)); le32enc(p, (uint32_t)(u & 0xffffffffU));
le32enc(p + 4, (uint32_t)(u >> 32)); le32enc(p + 4, (uint32_t)(u >> 32));
} }
#ifdef __cplusplus #ifdef __cplusplus

Wyświetl plik

@ -1,6 +1,6 @@
/* /*
* SPDX-FileCopyrightText: 2018-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2018-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -17,15 +17,15 @@
#endif #endif
#ifndef EAI_SOCKTYPE #ifndef EAI_SOCKTYPE
#define EAI_SOCKTYPE 10 /* ai_socktype not supported */ #define EAI_SOCKTYPE 10 /* ai_socktype not supported */
#endif #endif
#ifndef EAI_AGAIN #ifndef EAI_AGAIN
#define EAI_AGAIN 2 /* temporary failure in name resolution */ #define EAI_AGAIN 2 /* temporary failure in name resolution */
#endif #endif
#ifndef EAI_BADFLAGS #ifndef EAI_BADFLAGS
#define EAI_BADFLAGS 3 /* invalid value for ai_flags */ #define EAI_BADFLAGS 3 /* invalid value for ai_flags */
#endif #endif
#endif // _ESP_PLATFORM_ERRNO_H_ #endif // _ESP_PLATFORM_ERRNO_H_

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2018-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2018-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -13,22 +13,21 @@ extern "C" {
#include "lwip/sockets.h" #include "lwip/sockets.h"
#include "lwip/if_api.h" #include "lwip/if_api.h"
#define MSG_DONTROUTE 0x4 /* send without using routing tables */ #define MSG_DONTROUTE 0x4 /* send without using routing tables */
#define SOCK_SEQPACKET 5 /* sequenced packet stream */ #define SOCK_SEQPACKET 5 /* sequenced packet stream */
#define MSG_EOR 0x8 /* data completes record */ #define MSG_EOR 0x8 /* data completes record */
#define SOCK_SEQPACKET 5 /* sequenced packet stream */ #define SOCK_SEQPACKET 5 /* sequenced packet stream */
#define SOMAXCONN 128 #define SOMAXCONN 128
#define IPV6_UNICAST_HOPS 4 /* int; IP6 hops */ #define IPV6_UNICAST_HOPS 4 /* int; IP6 hops */
#define NI_MAXHOST 1025 #define NI_MAXHOST 1025
#define NI_MAXSERV 32 #define NI_MAXSERV 32
#define NI_NUMERICSERV 0x00000008 #define NI_NUMERICSERV 0x00000008
#define NI_DGRAM 0x00000010 #define NI_DGRAM 0x00000010
typedef u32_t socklen_t; typedef u32_t socklen_t;
unsigned int if_nametoindex(const char *ifname); unsigned int if_nametoindex(const char *ifname);
char *if_indextoname(unsigned int ifindex, char *ifname); char *if_indextoname(unsigned int ifindex, char *ifname);

Wyświetl plik

@ -42,7 +42,7 @@ struct dirent {
#define DT_DIR 2 #define DT_DIR 2
#if __BSD_VISIBLE #if __BSD_VISIBLE
#define MAXNAMLEN 255 #define MAXNAMLEN 255
char d_name[MAXNAMLEN+1]; /*!< zero-terminated file name */ char d_name[MAXNAMLEN + 1]; /*!< zero-terminated file name */
#else #else
char d_name[256]; char d_name[256];
#endif #endif
@ -56,8 +56,8 @@ void rewinddir(DIR* pdir);
int closedir(DIR* pdir); int closedir(DIR* pdir);
int readdir_r(DIR* pdir, struct dirent* entry, struct dirent** out_dirent); int readdir_r(DIR* pdir, struct dirent* entry, struct dirent** out_dirent);
int scandir(const char *dirname, struct dirent ***out_dirlist, int scandir(const char *dirname, struct dirent ***out_dirlist,
int (*select_func)(const struct dirent *), int (*select_func)(const struct dirent *),
int (*cmp_func)(const struct dirent **, const struct dirent **)); int (*cmp_func)(const struct dirent **, const struct dirent **));
int alphasort(const struct dirent **d1, const struct dirent **d2); int alphasort(const struct dirent **d1, const struct dirent **d2);
#ifdef __cplusplus #ifdef __cplusplus

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -19,13 +19,13 @@
*/ */
struct __lock { struct __lock {
#if (CONFIG_FREERTOS_USE_LIST_DATA_INTEGRITY_CHECK_BYTES && CONFIG_FREERTOS_USE_TRACE_FACILITY) #if (CONFIG_FREERTOS_USE_LIST_DATA_INTEGRITY_CHECK_BYTES && CONFIG_FREERTOS_USE_TRACE_FACILITY)
int reserved[29]; int reserved[29];
#elif (CONFIG_FREERTOS_USE_LIST_DATA_INTEGRITY_CHECK_BYTES && !CONFIG_FREERTOS_USE_TRACE_FACILITY) #elif (CONFIG_FREERTOS_USE_LIST_DATA_INTEGRITY_CHECK_BYTES && !CONFIG_FREERTOS_USE_TRACE_FACILITY)
int reserved[27]; int reserved[27];
#elif (!CONFIG_FREERTOS_USE_LIST_DATA_INTEGRITY_CHECK_BYTES && CONFIG_FREERTOS_USE_TRACE_FACILITY) #elif (!CONFIG_FREERTOS_USE_LIST_DATA_INTEGRITY_CHECK_BYTES && CONFIG_FREERTOS_USE_TRACE_FACILITY)
int reserved[23]; int reserved[23];
#else #else
int reserved[21]; int reserved[21];
#endif /* #if (CONFIG_FREERTOS_USE_LIST_DATA_INTEGRITY_CHECK_BYTES && CONFIG_FREERTOS_USE_TRACE_FACILITY) */ #endif /* #if (CONFIG_FREERTOS_USE_LIST_DATA_INTEGRITY_CHECK_BYTES && CONFIG_FREERTOS_USE_TRACE_FACILITY) */
}; };

Wyświetl plik

@ -21,14 +21,13 @@
#include_next<sys/reent.h> #include_next<sys/reent.h>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
#if __NEWLIB__ > 4 || ( __NEWLIB__ == 4 && __NEWLIB_MINOR__ > 1 ) /* TODO: IDF-8134 */ #if __NEWLIB__ > 4 || ( __NEWLIB__ == 4 && __NEWLIB_MINOR__ > 1 ) /* TODO: IDF-8134 */
extern void __sinit (struct _reent *); extern void __sinit(struct _reent *);
extern struct _glue __sglue; extern struct _glue __sglue;
extern struct _reent * _global_impure_ptr; extern struct _reent * _global_impure_ptr;

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2018-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2018-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -34,9 +34,9 @@ int select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *errorfds, struct
#define __FD_SAFE_SET(n, code) do { if ((unsigned)(n) < FD_SETSIZE) { code; } } while(0) #define __FD_SAFE_SET(n, code) do { if ((unsigned)(n) < FD_SETSIZE) { code; } } while(0)
#define __FD_SAFE_GET(n, code) (((unsigned)(n) < FD_SETSIZE) ? (code) : 0) #define __FD_SAFE_GET(n, code) (((unsigned)(n) < FD_SETSIZE) ? (code) : 0)
#define FD_SET(n, p) __FD_SAFE_SET(n, ((p)->fds_bits[(n) / NFDBITS] |= (1L << ((n) % NFDBITS)))) #define FD_SET(n, p) __FD_SAFE_SET(n, ((p)->fds_bits[(n) / NFDBITS] |= (1L << ((n) % NFDBITS))))
#define FD_CLR(n, p) __FD_SAFE_SET(n, ((p)->fds_bits[(n) / NFDBITS] &= ~(1L << ((n) % NFDBITS)))) #define FD_CLR(n, p) __FD_SAFE_SET(n, ((p)->fds_bits[(n) / NFDBITS] &= ~(1L << ((n) % NFDBITS))))
#define FD_ISSET(n, p) __FD_SAFE_GET(n, ((p)->fds_bits[(n) / NFDBITS] & (1L << ((n) % NFDBITS)))) #define FD_ISSET(n, p) __FD_SAFE_GET(n, ((p)->fds_bits[(n) / NFDBITS] & (1L << ((n) % NFDBITS))))
#endif // FD_ISSET || FD_SET || FD_CLR #endif // FD_ISSET || FD_SET || FD_CLR
#endif //__ESP_SYS_SELECT_H__ #endif //__ESP_SYS_SELECT_H__

Wyświetl plik

@ -14,7 +14,6 @@
// Not everything has a defined meaning for ESP-IDF (e.g. process leader IDs) and therefore are likely to be stubbed // Not everything has a defined meaning for ESP-IDF (e.g. process leader IDs) and therefore are likely to be stubbed
// in actual implementations. // in actual implementations.
#include <stdint.h> #include <stdint.h>
#include <sys/types.h> #include <sys/types.h>
#include "sdkconfig.h" #include "sdkconfig.h"
@ -164,8 +163,7 @@ typedef uint8_t cc_t;
typedef uint32_t speed_t; typedef uint32_t speed_t;
typedef uint16_t tcflag_t; typedef uint16_t tcflag_t;
struct termios struct termios {
{
tcflag_t c_iflag; /** Input modes */ tcflag_t c_iflag; /** Input modes */
tcflag_t c_oflag; /** Output modes */ tcflag_t c_oflag; /** Output modes */
tcflag_t c_cflag; /** Control modes */ tcflag_t c_cflag; /** Control modes */

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2018-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2018-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -9,11 +9,11 @@
extern "C" { extern "C" {
#endif #endif
#define AF_UNIX 1 /* local to host (pipes) */ #define AF_UNIX 1 /* local to host (pipes) */
struct sockaddr_un { struct sockaddr_un {
short sun_family; /*AF_UNIX*/ short sun_family; /*AF_UNIX*/
char sun_path[108]; /*path name */ char sun_path[108]; /*path name */
}; };
#ifdef __cplusplus #ifdef __cplusplus

Wyświetl plik

@ -60,7 +60,7 @@ int poll(struct pollfd *fds, nfds_t nfds, int timeout)
} }
} }
const int select_ret = select(max_fd + 1, &readfds, &writefds, &errorfds, timeout < 0 ? NULL: &tv); const int select_ret = select(max_fd + 1, &readfds, &writefds, &errorfds, timeout < 0 ? NULL : &tv);
if (select_ret > 0) { if (select_ret > 0) {
ret += select_ret; ret += select_ret;

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2020-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -50,8 +50,6 @@
#include "esp32p4/rtc.h" #include "esp32p4/rtc.h"
#endif #endif
// Offset between High resolution timer and the RTC. // Offset between High resolution timer and the RTC.
// Initialized after reset or light sleep. // Initialized after reset or light sleep.
#if defined(CONFIG_ESP_TIME_FUNCS_USE_RTC_TIMER) && defined(CONFIG_ESP_TIME_FUNCS_USE_ESP_TIMER) #if defined(CONFIG_ESP_TIME_FUNCS_USE_RTC_TIMER) && defined(CONFIG_ESP_TIME_FUNCS_USE_ESP_TIMER)
@ -94,13 +92,12 @@ uint64_t esp_time_impl_get_time(void)
#endif // defined( CONFIG_ESP_TIME_FUNCS_USE_ESP_TIMER ) || defined( CONFIG_ESP_TIME_FUNCS_USE_RTC_TIMER ) #endif // defined( CONFIG_ESP_TIME_FUNCS_USE_ESP_TIMER ) || defined( CONFIG_ESP_TIME_FUNCS_USE_RTC_TIMER )
void esp_time_impl_set_boot_time(uint64_t time_us) void esp_time_impl_set_boot_time(uint64_t time_us)
{ {
_lock_acquire(&s_boot_time_lock); _lock_acquire(&s_boot_time_lock);
#ifdef CONFIG_ESP_TIME_FUNCS_USE_RTC_TIMER #ifdef CONFIG_ESP_TIME_FUNCS_USE_RTC_TIMER
REG_WRITE(RTC_BOOT_TIME_LOW_REG, (uint32_t) (time_us & 0xffffffff)); REG_WRITE(RTC_BOOT_TIME_LOW_REG, (uint32_t)(time_us & 0xffffffff));
REG_WRITE(RTC_BOOT_TIME_HIGH_REG, (uint32_t) (time_us >> 32)); REG_WRITE(RTC_BOOT_TIME_HIGH_REG, (uint32_t)(time_us >> 32));
#else #else
s_boot_time = time_us; s_boot_time = time_us;
#endif #endif

Wyświetl plik

@ -43,7 +43,6 @@ char * realpath(const char *file_name, char *resolved_name)
/* number of path components in the output buffer */ /* number of path components in the output buffer */
size_t out_depth = 0; size_t out_depth = 0;
while (*in_ptr) { while (*in_ptr) {
/* "path component" is the part between two '/' path separators. /* "path component" is the part between two '/' path separators.
* locate the next path component in the input path: * locate the next path component in the input path:
@ -52,7 +51,7 @@ char * realpath(const char *file_name, char *resolved_name)
size_t path_component_len = end_of_path_component - in_ptr; size_t path_component_len = end_of_path_component - in_ptr;
if (path_component_len == 0 || if (path_component_len == 0 ||
(path_component_len == 1 && in_ptr[0] == '.')) { (path_component_len == 1 && in_ptr[0] == '.')) {
/* empty path component or '.' - nothing to do */ /* empty path component or '.' - nothing to do */
} else if (path_component_len == 2 && in_ptr[0] == '.' && in_ptr[1] == '.') { } else if (path_component_len == 2 && in_ptr[0] == '.' && in_ptr[1] == '.') {
/* '..' - remove one path component from the output */ /* '..' - remove one path component from the output */

Wyświetl plik

@ -49,7 +49,7 @@ void esp_reent_cleanup(void)
/* Clean up "glue" (lazily-allocated FILE objects) */ /* Clean up "glue" (lazily-allocated FILE objects) */
struct _glue* prev = &_REENT_SGLUE(_GLOBAL_REENT); struct _glue* prev = &_REENT_SGLUE(_GLOBAL_REENT);
for (struct _glue* cur = _REENT_SGLUE(_GLOBAL_REENT)._next; cur != NULL;) { for (struct _glue * cur = _REENT_SGLUE(_GLOBAL_REENT)._next; cur != NULL;) {
if (cur->_niobs == 0) { if (cur->_niobs == 0) {
cur = cur->_next; cur = cur->_next;
continue; continue;
@ -67,7 +67,7 @@ void esp_reent_cleanup(void)
cur = cur->_next; cur = cur->_next;
continue; continue;
} }
struct _glue* next = cur->_next; struct _glue * next = cur->_next;
prev->_next = next; prev->_next = next;
free(cur); free(cur);
cur = next; cur = next;

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -297,7 +297,6 @@ CLANG_DECLARE_ALIAS( __sync_lock_test_and_set_ ## n )
} \ } \
CLANG_DECLARE_ALIAS( __sync_lock_release_ ## n ) CLANG_DECLARE_ALIAS( __sync_lock_release_ ## n )
#if !HAS_ATOMICS_32 #if !HAS_ATOMICS_32
_Static_assert(sizeof(unsigned char) == 1, "atomics require a 1-byte type"); _Static_assert(sizeof(unsigned char) == 1, "atomics require a 1-byte type");
@ -376,21 +375,21 @@ SYNC_OP_FETCH(sub, 1, unsigned char)
SYNC_OP_FETCH(sub, 2, short unsigned int) SYNC_OP_FETCH(sub, 2, short unsigned int)
SYNC_OP_FETCH(sub, 4, unsigned int) SYNC_OP_FETCH(sub, 4, unsigned int)
SYNC_FETCH_OP(and, 1, unsigned char) SYNC_FETCH_OP( and, 1, unsigned char)
SYNC_FETCH_OP(and, 2, short unsigned int) SYNC_FETCH_OP( and, 2, short unsigned int)
SYNC_FETCH_OP(and, 4, unsigned int) SYNC_FETCH_OP( and, 4, unsigned int)
SYNC_OP_FETCH(and, 1, unsigned char) SYNC_OP_FETCH( and, 1, unsigned char)
SYNC_OP_FETCH(and, 2, short unsigned int) SYNC_OP_FETCH( and, 2, short unsigned int)
SYNC_OP_FETCH(and, 4, unsigned int) SYNC_OP_FETCH( and, 4, unsigned int)
SYNC_FETCH_OP(or, 1, unsigned char) SYNC_FETCH_OP( or, 1, unsigned char)
SYNC_FETCH_OP(or, 2, short unsigned int) SYNC_FETCH_OP( or, 2, short unsigned int)
SYNC_FETCH_OP(or, 4, unsigned int) SYNC_FETCH_OP( or, 4, unsigned int)
SYNC_OP_FETCH(or, 1, unsigned char) SYNC_OP_FETCH( or, 1, unsigned char)
SYNC_OP_FETCH(or, 2, short unsigned int) SYNC_OP_FETCH( or, 2, short unsigned int)
SYNC_OP_FETCH(or, 4, unsigned int) SYNC_OP_FETCH( or, 4, unsigned int)
SYNC_FETCH_OP(xor, 1, unsigned char) SYNC_FETCH_OP(xor, 1, unsigned char)
SYNC_FETCH_OP(xor, 2, short unsigned int) SYNC_FETCH_OP(xor, 2, short unsigned int)
@ -416,7 +415,6 @@ SYNC_VAL_CMP_EXCHANGE(1, unsigned char)
SYNC_VAL_CMP_EXCHANGE(2, short unsigned int) SYNC_VAL_CMP_EXCHANGE(2, short unsigned int)
SYNC_VAL_CMP_EXCHANGE(4, unsigned int) SYNC_VAL_CMP_EXCHANGE(4, unsigned int)
SYNC_LOCK_TEST_AND_SET(1, unsigned char) SYNC_LOCK_TEST_AND_SET(1, unsigned char)
SYNC_LOCK_TEST_AND_SET(2, short unsigned int) SYNC_LOCK_TEST_AND_SET(2, short unsigned int)
SYNC_LOCK_TEST_AND_SET(4, unsigned int) SYNC_LOCK_TEST_AND_SET(4, unsigned int)
@ -436,15 +434,17 @@ ATOMIC_STORE(4, unsigned int)
#elif __riscv_atomic == 1 #elif __riscv_atomic == 1
bool CLANG_ATOMIC_SUFFIX(__atomic_always_lock_free) (unsigned int size, const volatile void *) { bool CLANG_ATOMIC_SUFFIX(__atomic_always_lock_free)(unsigned int size, const volatile void *)
return size <= sizeof(int); {
return size <= sizeof(int);
} }
CLANG_DECLARE_ALIAS( __atomic_always_lock_free) CLANG_DECLARE_ALIAS(__atomic_always_lock_free)
bool CLANG_ATOMIC_SUFFIX(__atomic_is_lock_free) (unsigned int size, const volatile void *) { bool CLANG_ATOMIC_SUFFIX(__atomic_is_lock_free)(unsigned int size, const volatile void *)
return size <= sizeof(int); {
return size <= sizeof(int);
} }
CLANG_DECLARE_ALIAS( __atomic_is_lock_free) CLANG_DECLARE_ALIAS(__atomic_is_lock_free)
#endif // !HAS_ATOMICS_32 #endif // !HAS_ATOMICS_32
@ -484,9 +484,9 @@ SYNC_FETCH_OP(add, 8, long long unsigned int)
SYNC_FETCH_OP(sub, 8, long long unsigned int) SYNC_FETCH_OP(sub, 8, long long unsigned int)
SYNC_FETCH_OP(and, 8, long long unsigned int) SYNC_FETCH_OP( and, 8, long long unsigned int)
SYNC_FETCH_OP(or, 8, long long unsigned int) SYNC_FETCH_OP( or, 8, long long unsigned int)
SYNC_FETCH_OP(xor, 8, long long unsigned int) SYNC_FETCH_OP(xor, 8, long long unsigned int)
@ -496,9 +496,9 @@ SYNC_OP_FETCH(add, 8, long long unsigned int)
SYNC_OP_FETCH(sub, 8, long long unsigned int) SYNC_OP_FETCH(sub, 8, long long unsigned int)
SYNC_OP_FETCH(and, 8, long long unsigned int) SYNC_OP_FETCH( and, 8, long long unsigned int)
SYNC_OP_FETCH(or, 8, long long unsigned int) SYNC_OP_FETCH( or, 8, long long unsigned int)
SYNC_OP_FETCH(xor, 8, long long unsigned int) SYNC_OP_FETCH(xor, 8, long long unsigned int)
@ -519,21 +519,24 @@ ATOMIC_STORE(8, long long unsigned int)
#endif // !HAS_ATOMICS_64 #endif // !HAS_ATOMICS_64
// Clang generates calls to the __atomic_load/__atomic_store functions for object size more then 4 bytes // Clang generates calls to the __atomic_load/__atomic_store functions for object size more then 4 bytes
void CLANG_ATOMIC_SUFFIX( __atomic_load ) (size_t size, const volatile void *src, void *dest, int model) { void CLANG_ATOMIC_SUFFIX(__atomic_load)(size_t size, const volatile void *src, void *dest, int model)
{
unsigned state = _ATOMIC_ENTER_CRITICAL(); unsigned state = _ATOMIC_ENTER_CRITICAL();
memcpy(dest, (const void *)src, size); memcpy(dest, (const void *)src, size);
_ATOMIC_EXIT_CRITICAL(state); _ATOMIC_EXIT_CRITICAL(state);
} }
CLANG_DECLARE_ALIAS( __atomic_load ) CLANG_DECLARE_ALIAS(__atomic_load)
void CLANG_ATOMIC_SUFFIX( __atomic_store ) (size_t size, volatile void *dest, void *src, int model) { void CLANG_ATOMIC_SUFFIX(__atomic_store)(size_t size, volatile void *dest, void *src, int model)
{
unsigned state = _ATOMIC_ENTER_CRITICAL(); unsigned state = _ATOMIC_ENTER_CRITICAL();
memcpy((void *)dest, (const void *)src, size); memcpy((void *)dest, (const void *)src, size);
_ATOMIC_EXIT_CRITICAL(state); _ATOMIC_EXIT_CRITICAL(state);
} }
CLANG_DECLARE_ALIAS( __atomic_store) CLANG_DECLARE_ALIAS(__atomic_store)
bool CLANG_ATOMIC_SUFFIX(__atomic_compare_exchange) (size_t size, volatile void *ptr, void *expected, void *desired, int success_memorder, int failure_memorder) { bool CLANG_ATOMIC_SUFFIX(__atomic_compare_exchange)(size_t size, volatile void *ptr, void *expected, void *desired, int success_memorder, int failure_memorder)
{
bool ret = false; bool ret = false;
unsigned state = _ATOMIC_ENTER_CRITICAL(); unsigned state = _ATOMIC_ENTER_CRITICAL();
if (!memcmp((void *)ptr, expected, size)) { if (!memcmp((void *)ptr, expected, size)) {
@ -545,4 +548,4 @@ bool CLANG_ATOMIC_SUFFIX(__atomic_compare_exchange) (size_t size, volatile void
_ATOMIC_EXIT_CRITICAL(state); _ATOMIC_EXIT_CRITICAL(state);
return ret; return ret;
} }
CLANG_DECLARE_ALIAS( __atomic_compare_exchange) CLANG_DECLARE_ALIAS(__atomic_compare_exchange)

Wyświetl plik

@ -83,20 +83,18 @@ static int _fsync_console(int fd)
return -1; return -1;
} }
/* The following weak definitions of syscalls will be used unless /* The following weak definitions of syscalls will be used unless
* another definition is provided. That definition may come from * another definition is provided. That definition may come from
* VFS, LWIP, or the application. * VFS, LWIP, or the application.
*/ */
ssize_t _read_r(struct _reent *r, int fd, void * dst, size_t size) ssize_t _read_r(struct _reent *r, int fd, void * dst, size_t size)
__attribute__((weak,alias("_read_r_console"))); __attribute__((weak, alias("_read_r_console")));
ssize_t _write_r(struct _reent *r, int fd, const void * data, size_t size) ssize_t _write_r(struct _reent *r, int fd, const void * data, size_t size)
__attribute__((weak,alias("_write_r_console"))); __attribute__((weak, alias("_write_r_console")));
int _fstat_r (struct _reent *r, int fd, struct stat *st) int _fstat_r(struct _reent *r, int fd, struct stat *st)
__attribute__((weak,alias("_fstat_r_console"))); __attribute__((weak, alias("_fstat_r_console")));
int fsync(int fd) int fsync(int fd)
__attribute__((weak,alias("_fsync_console"))); __attribute__((weak, alias("_fsync_console")));
/* The aliases below are to "syscall_not_implemented", which /* The aliases below are to "syscall_not_implemented", which
* doesn't have the same signature as the original function. * doesn't have the same signature as the original function.
@ -108,40 +106,39 @@ int fsync(int fd)
#endif #endif
int _open_r(struct _reent *r, const char * path, int flags, int mode) int _open_r(struct _reent *r, const char * path, int flags, int mode)
__attribute__((weak,alias("syscall_not_implemented"))); __attribute__((weak, alias("syscall_not_implemented")));
int _close_r(struct _reent *r, int fd) int _close_r(struct _reent *r, int fd)
__attribute__((weak,alias("syscall_not_implemented"))); __attribute__((weak, alias("syscall_not_implemented")));
off_t _lseek_r(struct _reent *r, int fd, off_t size, int mode) off_t _lseek_r(struct _reent *r, int fd, off_t size, int mode)
__attribute__((weak,alias("syscall_not_implemented"))); __attribute__((weak, alias("syscall_not_implemented")));
int _fcntl_r(struct _reent *r, int fd, int cmd, int arg) int _fcntl_r(struct _reent *r, int fd, int cmd, int arg)
__attribute__((weak,alias("syscall_not_implemented"))); __attribute__((weak, alias("syscall_not_implemented")));
int _stat_r(struct _reent *r, const char * path, struct stat * st) int _stat_r(struct _reent *r, const char * path, struct stat * st)
__attribute__((weak,alias("syscall_not_implemented"))); __attribute__((weak, alias("syscall_not_implemented")));
int _link_r(struct _reent *r, const char* n1, const char* n2) int _link_r(struct _reent *r, const char* n1, const char* n2)
__attribute__((weak,alias("syscall_not_implemented"))); __attribute__((weak, alias("syscall_not_implemented")));
int _unlink_r(struct _reent *r, const char *path) int _unlink_r(struct _reent *r, const char *path)
__attribute__((weak,alias("syscall_not_implemented"))); __attribute__((weak, alias("syscall_not_implemented")));
int _rename_r(struct _reent *r, const char *src, const char *dst) int _rename_r(struct _reent *r, const char *src, const char *dst)
__attribute__((weak,alias("syscall_not_implemented"))); __attribute__((weak, alias("syscall_not_implemented")));
int _isatty_r(struct _reent *r, int fd) int _isatty_r(struct _reent *r, int fd)
__attribute__((weak,alias("syscall_not_implemented"))); __attribute__((weak, alias("syscall_not_implemented")));
/* These functions are not expected to be overridden */ /* These functions are not expected to be overridden */
int _system_r(struct _reent *r, const char *str) int _system_r(struct _reent *r, const char *str)
__attribute__((alias("syscall_not_implemented"))); __attribute__((alias("syscall_not_implemented")));
int raise(int sig) int raise(int sig)
__attribute__((alias("syscall_not_implemented_aborts"))); __attribute__((alias("syscall_not_implemented_aborts")));
int _raise_r(struct _reent *r, int sig) int _raise_r(struct _reent *r, int sig)
__attribute__((alias("syscall_not_implemented_aborts"))); __attribute__((alias("syscall_not_implemented_aborts")));
void* _sbrk_r(struct _reent *r, ptrdiff_t sz) void* _sbrk_r(struct _reent *r, ptrdiff_t sz)
__attribute__((alias("syscall_not_implemented_aborts"))); __attribute__((alias("syscall_not_implemented_aborts")));
int _getpid_r(struct _reent *r) int _getpid_r(struct _reent *r)
__attribute__((alias("syscall_not_implemented"))); __attribute__((alias("syscall_not_implemented")));
int _kill_r(struct _reent *r, int pid, int sig) int _kill_r(struct _reent *r, int pid, int sig)
__attribute__((alias("syscall_not_implemented"))); __attribute__((alias("syscall_not_implemented")));
void _exit(int __status) void _exit(int __status)
__attribute__((alias("syscall_not_implemented_aborts"))); __attribute__((alias("syscall_not_implemented_aborts")));
#if defined(__GNUC__) && !defined(__clang__) #if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic pop #pragma GCC diagnostic pop

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Unlicense OR CC0-1.0 * SPDX-License-Identifier: Unlicense OR CC0-1.0
*/ */
@ -14,7 +14,6 @@
#define RECORD_TIME_START() do {__t1 = esp_cpu_get_cycle_count();}while(0) #define RECORD_TIME_START() do {__t1 = esp_cpu_get_cycle_count();}while(0)
#define RECORD_TIME_END(p_time) do{__t2 = esp_cpu_get_cycle_count(); *p_time = (__t2-__t1);}while(0) #define RECORD_TIME_END(p_time) do{__t2 = esp_cpu_get_cycle_count(); *p_time = (__t2-__t1);}while(0)
#define TEST_TIMES 11 #define TEST_TIMES 11
//Test twice, and only get the result of second time, to avoid influence of cache miss //Test twice, and only get the result of second time, to avoid influence of cache miss
@ -37,11 +36,13 @@ static uint32_t s_t_ref;
static void sorted_array_insert(uint32_t* array, int* size, uint32_t item) static void sorted_array_insert(uint32_t* array, int* size, uint32_t item)
{ {
int pos; int pos;
for (pos = *size; pos>0; pos--) { for (pos = *size; pos > 0; pos--) {
if (array[pos-1] < item) break; if (array[pos - 1] < item) {
array[pos] = array[pos-1]; break;
}
array[pos] = array[pos - 1];
} }
array[pos]=item; array[pos] = item;
(*size)++; (*size)++;
} }
@ -56,7 +57,7 @@ static void test_flow(const char* name, test_f func)
sorted_array_insert(t_flight_sorted, &t_flight_num, t_op); sorted_array_insert(t_flight_sorted, &t_flight_num, t_op);
} }
for (int i = 0; i < TEST_TIMES; i++) { for (int i = 0; i < TEST_TIMES; i++) {
ESP_LOGI(TAG, "%s: %" PRIu32 " ops", name, t_flight_sorted[i]-s_t_ref); ESP_LOGI(TAG, "%s: %" PRIu32 " ops", name, t_flight_sorted[i] - s_t_ref);
} }
} }
@ -126,7 +127,7 @@ static IRAM_ATTR void test_atomic_compare_exchange(uint32_t* t_op)
(void) res; (void) res;
} }
TEST_CASE("test atomic","[atomic]") TEST_CASE("test atomic", "[atomic]")
{ {
test_flow("ref", test_ref); test_flow("ref", test_ref);

Wyświetl plik

@ -18,7 +18,6 @@
#include "esp_heap_caps.h" #include "esp_heap_caps.h"
#include "esp_vfs.h" #include "esp_vfs.h"
TEST_CASE("misc - posix_memalign", "[newlib_misc]") TEST_CASE("misc - posix_memalign", "[newlib_misc]")
{ {
void* outptr = NULL; void* outptr = NULL;

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Unlicense OR CC0-1.0 * SPDX-License-Identifier: Unlicense OR CC0-1.0
*/ */
@ -18,12 +18,12 @@
TEST_CASE("test ctype functions", "[newlib]") TEST_CASE("test ctype functions", "[newlib]")
{ {
TEST_ASSERT_TRUE( isalnum('a') && isalnum('A') && isalnum('z') && isalnum('Z') && isalnum('0') && isalnum('9') ); TEST_ASSERT_TRUE(isalnum('a') && isalnum('A') && isalnum('z') && isalnum('Z') && isalnum('0') && isalnum('9'));
TEST_ASSERT_FALSE( isalnum('(') || isalnum('-') || isalnum(' ') || isalnum('\x81') || isalnum('.') || isalnum('\\') ); TEST_ASSERT_FALSE(isalnum('(') || isalnum('-') || isalnum(' ') || isalnum('\x81') || isalnum('.') || isalnum('\\'));
TEST_ASSERT_TRUE( isalpha('a') && isalpha('A') && isalpha('z') && isalpha('Z') ); TEST_ASSERT_TRUE(isalpha('a') && isalpha('A') && isalpha('z') && isalpha('Z'));
TEST_ASSERT_FALSE( isalpha('0') || isalpha('9') || isalpha(')') || isalpha('\t') || isalpha(' ') || isalpha('\x81') ); TEST_ASSERT_FALSE(isalpha('0') || isalpha('9') || isalpha(')') || isalpha('\t') || isalpha(' ') || isalpha('\x81'));
TEST_ASSERT_TRUE( isspace(' ') && isspace('\t') && isspace('\n') && isspace('\r') ); TEST_ASSERT_TRUE(isspace(' ') && isspace('\t') && isspace('\n') && isspace('\r'));
TEST_ASSERT_FALSE( isspace('0') || isspace('9') || isspace(')') || isspace('A') || isspace('*') || isspace('\x81') || isspace('a')); TEST_ASSERT_FALSE(isspace('0') || isspace('9') || isspace(')') || isspace('A') || isspace('*') || isspace('\x81') || isspace('a'));
} }
TEST_CASE("test atoX functions", "[newlib]") TEST_CASE("test atoX functions", "[newlib]")
@ -74,28 +74,27 @@ TEST_CASE("test time functions", "[newlib]")
setenv("TZ", "UTC-8", 1); setenv("TZ", "UTC-8", 1);
tzset(); tzset();
struct tm *tm_utc = gmtime(&now); struct tm *tm_utc = gmtime(&now);
TEST_ASSERT_EQUAL( 28, tm_utc->tm_sec); TEST_ASSERT_EQUAL(28, tm_utc->tm_sec);
TEST_ASSERT_EQUAL( 41, tm_utc->tm_min); TEST_ASSERT_EQUAL(41, tm_utc->tm_min);
TEST_ASSERT_EQUAL( 7, tm_utc->tm_hour); TEST_ASSERT_EQUAL(7, tm_utc->tm_hour);
TEST_ASSERT_EQUAL( 26, tm_utc->tm_mday); TEST_ASSERT_EQUAL(26, tm_utc->tm_mday);
TEST_ASSERT_EQUAL( 4, tm_utc->tm_mon); TEST_ASSERT_EQUAL(4, tm_utc->tm_mon);
TEST_ASSERT_EQUAL(116, tm_utc->tm_year); TEST_ASSERT_EQUAL(116, tm_utc->tm_year);
TEST_ASSERT_EQUAL( 4, tm_utc->tm_wday); TEST_ASSERT_EQUAL(4, tm_utc->tm_wday);
TEST_ASSERT_EQUAL(146, tm_utc->tm_yday); TEST_ASSERT_EQUAL(146, tm_utc->tm_yday);
struct tm *tm_local = localtime(&now); struct tm *tm_local = localtime(&now);
TEST_ASSERT_EQUAL( 28, tm_local->tm_sec); TEST_ASSERT_EQUAL(28, tm_local->tm_sec);
TEST_ASSERT_EQUAL( 41, tm_local->tm_min); TEST_ASSERT_EQUAL(41, tm_local->tm_min);
TEST_ASSERT_EQUAL( 15, tm_local->tm_hour); TEST_ASSERT_EQUAL(15, tm_local->tm_hour);
TEST_ASSERT_EQUAL( 26, tm_local->tm_mday); TEST_ASSERT_EQUAL(26, tm_local->tm_mday);
TEST_ASSERT_EQUAL( 4, tm_local->tm_mon); TEST_ASSERT_EQUAL(4, tm_local->tm_mon);
TEST_ASSERT_EQUAL(116, tm_local->tm_year); TEST_ASSERT_EQUAL(116, tm_local->tm_year);
TEST_ASSERT_EQUAL( 4, tm_local->tm_wday); TEST_ASSERT_EQUAL(4, tm_local->tm_wday);
TEST_ASSERT_EQUAL(146, tm_local->tm_yday); TEST_ASSERT_EQUAL(146, tm_local->tm_yday);
} }
TEST_CASE("test asctime", "[newlib]") TEST_CASE("test asctime", "[newlib]")
{ {
char buf[64]; char buf[64];
@ -204,7 +203,6 @@ TEST_CASE("test 64bit int formats", "[newlib]")
} }
#endif // CONFIG_NEWLIB_NANO_FORMAT #endif // CONFIG_NEWLIB_NANO_FORMAT
TEST_CASE("fmod and fmodf work as expected", "[newlib]") TEST_CASE("fmod and fmodf work as expected", "[newlib]")
{ {
TEST_ASSERT_EQUAL(0.1, fmod(10.1, 2.0)); TEST_ASSERT_EQUAL(0.1, fmod(10.1, 2.0));
@ -216,7 +214,6 @@ TEST_CASE("newlib: can link 'system', 'raise'", "[newlib]")
printf("system: %p, raise: %p\n", &system, &raise); printf("system: %p, raise: %p\n", &system, &raise);
} }
TEST_CASE("newlib: rom and toolchain localtime func gives the same result", "[newlib]") TEST_CASE("newlib: rom and toolchain localtime func gives the same result", "[newlib]")
{ {
// This UNIX time represents 2020-03-12 15:00:00 EDT (19:00 GMT) // This UNIX time represents 2020-03-12 15:00:00 EDT (19:00 GMT)

Wyświetl plik

@ -9,7 +9,6 @@
#include "unity.h" #include "unity.h"
#include "esp_system.h" #include "esp_system.h"
typedef struct { typedef struct {
jmp_buf jmp_env; jmp_buf jmp_env;
uint32_t retval; uint32_t retval;

Wyświetl plik

@ -21,19 +21,18 @@ atomic_uint g_atomic32;
atomic_ushort g_atomic16; atomic_ushort g_atomic16;
atomic_uchar g_atomic8; atomic_uchar g_atomic8;
TEST_CASE("stdatomic - test_64bit_atomics", "[newlib_stdatomic]") TEST_CASE("stdatomic - test_64bit_atomics", "[newlib_stdatomic]")
{ {
unsigned long long x64 = 0; unsigned long long x64 = 0;
g_atomic64 = 0; // calls atomic_store g_atomic64 = 0; // calls atomic_store
x64 += atomic_fetch_or (&g_atomic64, 0x1111111111111111ULL); x64 += atomic_fetch_or(&g_atomic64, 0x1111111111111111ULL);
x64 += atomic_fetch_xor(&g_atomic64, 0x3333333333333333ULL); x64 += atomic_fetch_xor(&g_atomic64, 0x3333333333333333ULL);
x64 += atomic_fetch_and(&g_atomic64, 0xf0f0f0f0f0f0f0f0ULL); x64 += atomic_fetch_and(&g_atomic64, 0xf0f0f0f0f0f0f0f0ULL);
x64 += atomic_fetch_sub(&g_atomic64, 0x0f0f0f0f0f0f0f0fULL); x64 += atomic_fetch_sub(&g_atomic64, 0x0f0f0f0f0f0f0f0fULL);
x64 += atomic_fetch_add(&g_atomic64, 0x2222222222222222ULL); x64 += atomic_fetch_add(&g_atomic64, 0x2222222222222222ULL);
#ifndef __clang__ #ifndef __clang__
x64 += __atomic_fetch_nand_8 (&g_atomic64, 0xAAAAAAAAAAAAAAAAULL, 0); x64 += __atomic_fetch_nand_8(&g_atomic64, 0xAAAAAAAAAAAAAAAAULL, 0);
TEST_ASSERT_EQUAL_HEX64(0x9797979797979797ULL, x64); TEST_ASSERT_EQUAL_HEX64(0x9797979797979797ULL, x64);
TEST_ASSERT_EQUAL_HEX64(0xDDDDDDDDDDDDDDDDULL, g_atomic64); // calls atomic_load TEST_ASSERT_EQUAL_HEX64(0xDDDDDDDDDDDDDDDDULL, g_atomic64); // calls atomic_load
@ -48,13 +47,13 @@ TEST_CASE("stdatomic - test_32bit_atomics", "[newlib_stdatomic]")
unsigned int x32 = 0; unsigned int x32 = 0;
g_atomic32 = 0; g_atomic32 = 0;
x32 += atomic_fetch_or (&g_atomic32, 0x11111111U); x32 += atomic_fetch_or(&g_atomic32, 0x11111111U);
x32 += atomic_fetch_xor(&g_atomic32, 0x33333333U); x32 += atomic_fetch_xor(&g_atomic32, 0x33333333U);
x32 += atomic_fetch_and(&g_atomic32, 0xf0f0f0f0U); x32 += atomic_fetch_and(&g_atomic32, 0xf0f0f0f0U);
x32 += atomic_fetch_sub(&g_atomic32, 0x0f0f0f0fU); x32 += atomic_fetch_sub(&g_atomic32, 0x0f0f0f0fU);
x32 += atomic_fetch_add(&g_atomic32, 0x22222222U); x32 += atomic_fetch_add(&g_atomic32, 0x22222222U);
#ifndef __clang__ #ifndef __clang__
x32 += __atomic_fetch_nand_4 (&g_atomic32, 0xAAAAAAAAU, 0); x32 += __atomic_fetch_nand_4(&g_atomic32, 0xAAAAAAAAU, 0);
TEST_ASSERT_EQUAL_HEX32(0x97979797U, x32); TEST_ASSERT_EQUAL_HEX32(0x97979797U, x32);
TEST_ASSERT_EQUAL_HEX32(0xDDDDDDDDU, g_atomic32); TEST_ASSERT_EQUAL_HEX32(0xDDDDDDDDU, g_atomic32);
@ -69,13 +68,13 @@ TEST_CASE("stdatomic - test_16bit_atomics", "[newlib_stdatomic]")
unsigned int x16 = 0; unsigned int x16 = 0;
g_atomic16 = 0; g_atomic16 = 0;
x16 += atomic_fetch_or (&g_atomic16, 0x1111); x16 += atomic_fetch_or(&g_atomic16, 0x1111);
x16 += atomic_fetch_xor(&g_atomic16, 0x3333); x16 += atomic_fetch_xor(&g_atomic16, 0x3333);
x16 += atomic_fetch_and(&g_atomic16, 0xf0f0); x16 += atomic_fetch_and(&g_atomic16, 0xf0f0);
x16 += atomic_fetch_sub(&g_atomic16, 0x0f0f); x16 += atomic_fetch_sub(&g_atomic16, 0x0f0f);
x16 += atomic_fetch_add(&g_atomic16, 0x2222); x16 += atomic_fetch_add(&g_atomic16, 0x2222);
#ifndef __clang__ #ifndef __clang__
x16 += __atomic_fetch_nand_2 (&g_atomic16, 0xAAAA, 0); x16 += __atomic_fetch_nand_2(&g_atomic16, 0xAAAA, 0);
TEST_ASSERT_EQUAL_HEX16(0x9797, x16); TEST_ASSERT_EQUAL_HEX16(0x9797, x16);
TEST_ASSERT_EQUAL_HEX16(0xDDDD, g_atomic16); TEST_ASSERT_EQUAL_HEX16(0xDDDD, g_atomic16);
@ -90,13 +89,13 @@ TEST_CASE("stdatomic - test_8bit_atomics", "[newlib_stdatomic]")
unsigned int x8 = 0; unsigned int x8 = 0;
g_atomic8 = 0; g_atomic8 = 0;
x8 += atomic_fetch_or (&g_atomic8, 0x11); x8 += atomic_fetch_or(&g_atomic8, 0x11);
x8 += atomic_fetch_xor(&g_atomic8, 0x33); x8 += atomic_fetch_xor(&g_atomic8, 0x33);
x8 += atomic_fetch_and(&g_atomic8, 0xf0); x8 += atomic_fetch_and(&g_atomic8, 0xf0);
x8 += atomic_fetch_sub(&g_atomic8, 0x0f); x8 += atomic_fetch_sub(&g_atomic8, 0x0f);
x8 += atomic_fetch_add(&g_atomic8, 0x22); x8 += atomic_fetch_add(&g_atomic8, 0x22);
#ifndef __clang__ #ifndef __clang__
x8 += __atomic_fetch_nand_1 (&g_atomic8, 0xAA, 0); x8 += __atomic_fetch_nand_1(&g_atomic8, 0xAA, 0);
TEST_ASSERT_EQUAL_HEX8(0x97, x8); TEST_ASSERT_EQUAL_HEX8(0x97, x8);
TEST_ASSERT_EQUAL_HEX8(0xDD, g_atomic8); TEST_ASSERT_EQUAL_HEX8(0xDD, g_atomic8);
@ -112,7 +111,7 @@ TEST_CASE("stdatomic - test_64bit_atomics", "[newlib_stdatomic]")
unsigned long long x64 = 0; unsigned long long x64 = 0;
g_atomic64 = 0; // calls atomic_store g_atomic64 = 0; // calls atomic_store
x64 += __atomic_or_fetch_8 (&g_atomic64, 0x1111111111111111ULL, 0); x64 += __atomic_or_fetch_8(&g_atomic64, 0x1111111111111111ULL, 0);
x64 += __atomic_xor_fetch_8(&g_atomic64, 0x3333333333333333ULL, 0); x64 += __atomic_xor_fetch_8(&g_atomic64, 0x3333333333333333ULL, 0);
x64 += __atomic_and_fetch_8(&g_atomic64, 0xf0f0f0f0f0f0f0f0ULL, 0); x64 += __atomic_and_fetch_8(&g_atomic64, 0xf0f0f0f0f0f0f0f0ULL, 0);
x64 += __atomic_sub_fetch_8(&g_atomic64, 0x0f0f0f0f0f0f0f0fULL, 0); x64 += __atomic_sub_fetch_8(&g_atomic64, 0x0f0f0f0f0f0f0f0fULL, 0);
@ -128,12 +127,12 @@ TEST_CASE("stdatomic - test_32bit_atomics", "[newlib_stdatomic]")
unsigned int x32 = 0; unsigned int x32 = 0;
g_atomic32 = 0; g_atomic32 = 0;
x32 += __atomic_or_fetch_4 (&g_atomic32, 0x11111111U, 0); x32 += __atomic_or_fetch_4(&g_atomic32, 0x11111111U, 0);
x32 += __atomic_xor_fetch_4(&g_atomic32, 0x33333333U, 0); x32 += __atomic_xor_fetch_4(&g_atomic32, 0x33333333U, 0);
x32 += __atomic_and_fetch_4(&g_atomic32, 0xf0f0f0f0U, 0); x32 += __atomic_and_fetch_4(&g_atomic32, 0xf0f0f0f0U, 0);
x32 += __atomic_sub_fetch_4(&g_atomic32, 0x0f0f0f0fU, 0); x32 += __atomic_sub_fetch_4(&g_atomic32, 0x0f0f0f0fU, 0);
x32 += __atomic_add_fetch_4(&g_atomic32, 0x22222222U, 0); x32 += __atomic_add_fetch_4(&g_atomic32, 0x22222222U, 0);
x32 += __atomic_nand_fetch_4 (&g_atomic32, 0xAAAAAAAAU, 0); x32 += __atomic_nand_fetch_4(&g_atomic32, 0xAAAAAAAAU, 0);
TEST_ASSERT_EQUAL_HEX32(0x75757574U, x32); TEST_ASSERT_EQUAL_HEX32(0x75757574U, x32);
TEST_ASSERT_EQUAL_HEX32(0xDDDDDDDDU, g_atomic32); TEST_ASSERT_EQUAL_HEX32(0xDDDDDDDDU, g_atomic32);
@ -144,12 +143,12 @@ TEST_CASE("stdatomic - test_16bit_atomics", "[newlib_stdatomic]")
unsigned int x16 = 0; unsigned int x16 = 0;
g_atomic16 = 0; g_atomic16 = 0;
x16 += __atomic_or_fetch_2 (&g_atomic16, 0x1111, 0); x16 += __atomic_or_fetch_2(&g_atomic16, 0x1111, 0);
x16 += __atomic_xor_fetch_2(&g_atomic16, 0x3333, 0); x16 += __atomic_xor_fetch_2(&g_atomic16, 0x3333, 0);
x16 += __atomic_and_fetch_2(&g_atomic16, 0xf0f0, 0); x16 += __atomic_and_fetch_2(&g_atomic16, 0xf0f0, 0);
x16 += __atomic_sub_fetch_2(&g_atomic16, 0x0f0f, 0); x16 += __atomic_sub_fetch_2(&g_atomic16, 0x0f0f, 0);
x16 += __atomic_add_fetch_2(&g_atomic16, 0x2222, 0); x16 += __atomic_add_fetch_2(&g_atomic16, 0x2222, 0);
x16 += __atomic_nand_fetch_2 (&g_atomic16, 0xAAAA, 0); x16 += __atomic_nand_fetch_2(&g_atomic16, 0xAAAA, 0);
TEST_ASSERT_EQUAL_HEX16(0x7574, x16); TEST_ASSERT_EQUAL_HEX16(0x7574, x16);
TEST_ASSERT_EQUAL_HEX16(0xDDDD, g_atomic16); TEST_ASSERT_EQUAL_HEX16(0xDDDD, g_atomic16);
@ -160,12 +159,12 @@ TEST_CASE("stdatomic - test_8bit_atomics", "[newlib_stdatomic]")
unsigned int x8 = 0; unsigned int x8 = 0;
g_atomic8 = 0; g_atomic8 = 0;
x8 += __atomic_or_fetch_1 (&g_atomic8, 0x11, 0); x8 += __atomic_or_fetch_1(&g_atomic8, 0x11, 0);
x8 += __atomic_xor_fetch_1(&g_atomic8, 0x33, 0); x8 += __atomic_xor_fetch_1(&g_atomic8, 0x33, 0);
x8 += __atomic_and_fetch_1(&g_atomic8, 0xf0, 0); x8 += __atomic_and_fetch_1(&g_atomic8, 0xf0, 0);
x8 += __atomic_sub_fetch_1(&g_atomic8, 0x0f, 0); x8 += __atomic_sub_fetch_1(&g_atomic8, 0x0f, 0);
x8 += __atomic_add_fetch_1(&g_atomic8, 0x22, 0); x8 += __atomic_add_fetch_1(&g_atomic8, 0x22, 0);
x8 += __atomic_nand_fetch_1 (&g_atomic8, 0xAA, 0); x8 += __atomic_nand_fetch_1(&g_atomic8, 0xAA, 0);
TEST_ASSERT_EQUAL_HEX8(0x74, x8); TEST_ASSERT_EQUAL_HEX8(0x74, x8);
TEST_ASSERT_EQUAL_HEX8(0xDD, g_atomic8); TEST_ASSERT_EQUAL_HEX8(0xDD, g_atomic8);
@ -173,7 +172,6 @@ TEST_CASE("stdatomic - test_8bit_atomics", "[newlib_stdatomic]")
#endif // #ifndef __clang__ #endif // #ifndef __clang__
#define TEST_EXCLUSION(n) TEST_CASE("stdatomic - test_" #n "bit_exclusion", "[newlib_stdatomic]") \ #define TEST_EXCLUSION(n) TEST_CASE("stdatomic - test_" #n "bit_exclusion", "[newlib_stdatomic]") \
{ \ { \
g_atomic ## n = 0; \ g_atomic ## n = 0; \
@ -216,7 +214,6 @@ TEST_EXCLUSION(16)
TEST_EXCLUSION_TASK(8) TEST_EXCLUSION_TASK(8)
TEST_EXCLUSION(8) TEST_EXCLUSION(8)
#define ITER_COUNT 20000 #define ITER_COUNT 20000
#define TEST_RACE_OPERATION(ASSERT_SUFFIX, NAME, LHSTYPE, PRE, POST, INIT, FINAL) \ #define TEST_RACE_OPERATION(ASSERT_SUFFIX, NAME, LHSTYPE, PRE, POST, INIT, FINAL) \
@ -283,75 +280,75 @@ TEST_CASE("stdatomic - test_" #NAME, "[newlib_stdatomic]") \
TEST_ASSERT(EXPECTED == var_##NAME); \ TEST_ASSERT(EXPECTED == var_##NAME); \
} }
TEST_RACE_OPERATION ( ,uint8_add, uint8_t, , += 1, 0, (uint8_t) (2*ITER_COUNT)) TEST_RACE_OPERATION(, uint8_add, uint8_t,, += 1, 0, (uint8_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint8_add_3, uint8_t, , += 3, 0, (uint8_t) (6*ITER_COUNT)) TEST_RACE_OPERATION(, uint8_add_3, uint8_t,, += 3, 0, (uint8_t)(6 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint8_postinc, uint8_t, , ++, 0, (uint8_t) (2*ITER_COUNT)) TEST_RACE_OPERATION(, uint8_postinc, uint8_t,, ++, 0, (uint8_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint8_preinc, uint8_t, ++, , 0, (uint8_t) (2*ITER_COUNT)) TEST_RACE_OPERATION(, uint8_preinc, uint8_t, ++,, 0, (uint8_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint8_sub, uint8_t, , -= 1, 0, (uint8_t) -(2*ITER_COUNT)) TEST_RACE_OPERATION(, uint8_sub, uint8_t,, -= 1, 0, (uint8_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint8_sub_3, uint8_t, , -= 3, 0, (uint8_t) -(6*ITER_COUNT)) TEST_RACE_OPERATION(, uint8_sub_3, uint8_t,, -= 3, 0, (uint8_t) - (6 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint8_postdec, uint8_t, , --, 0, (uint8_t) -(2*ITER_COUNT)) TEST_RACE_OPERATION(, uint8_postdec, uint8_t,, --, 0, (uint8_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint8_predec, uint8_t, --, , 0, (uint8_t) -(2*ITER_COUNT)) TEST_RACE_OPERATION(, uint8_predec, uint8_t, --,, 0, (uint8_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint8_mul, uint8_t, , *= 3, 1, (uint8_t) 0x1) TEST_RACE_OPERATION(, uint8_mul, uint8_t,, *= 3, 1, (uint8_t) 0x1)
TEST_RACE_OPERATION ( ,uint16_add, uint16_t, , += 1, 0, (uint16_t) (2*ITER_COUNT)) TEST_RACE_OPERATION(, uint16_add, uint16_t,, += 1, 0, (uint16_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint16_add_3, uint16_t, , += 3, 0, (uint16_t) (6*ITER_COUNT)) TEST_RACE_OPERATION(, uint16_add_3, uint16_t,, += 3, 0, (uint16_t)(6 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint16_postinc, uint16_t, , ++, 0, (uint16_t) (2*ITER_COUNT)) TEST_RACE_OPERATION(, uint16_postinc, uint16_t,, ++, 0, (uint16_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint16_preinc, uint16_t, ++, , 0, (uint16_t) (2*ITER_COUNT)) TEST_RACE_OPERATION(, uint16_preinc, uint16_t, ++,, 0, (uint16_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint16_sub, uint16_t, , -= 1, 0, (uint16_t) -(2*ITER_COUNT)) TEST_RACE_OPERATION(, uint16_sub, uint16_t,, -= 1, 0, (uint16_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint16_sub_3, uint16_t, , -= 3, 0, (uint16_t) -(6*ITER_COUNT)) TEST_RACE_OPERATION(, uint16_sub_3, uint16_t,, -= 3, 0, (uint16_t) - (6 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint16_postdec, uint16_t, , --, 0, (uint16_t) -(2*ITER_COUNT)) TEST_RACE_OPERATION(, uint16_postdec, uint16_t,, --, 0, (uint16_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint16_predec, uint16_t, --, , 0, (uint16_t) -(2*ITER_COUNT)) TEST_RACE_OPERATION(, uint16_predec, uint16_t, --,, 0, (uint16_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint16_mul, uint16_t, , *= 3, 1, (uint16_t) 0x6D01) TEST_RACE_OPERATION(, uint16_mul, uint16_t,, *= 3, 1, (uint16_t) 0x6D01)
TEST_RACE_OPERATION ( ,uint32_add, uint32_t, , += 1, 0, (uint32_t) (2*ITER_COUNT)) TEST_RACE_OPERATION(, uint32_add, uint32_t,, += 1, 0, (uint32_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint32_add_3, uint32_t, , += 3, 0, (uint32_t) (6*ITER_COUNT)) TEST_RACE_OPERATION(, uint32_add_3, uint32_t,, += 3, 0, (uint32_t)(6 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint32_postinc, uint32_t, , ++, 0, (uint32_t) (2*ITER_COUNT)) TEST_RACE_OPERATION(, uint32_postinc, uint32_t,, ++, 0, (uint32_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint32_preinc, uint32_t, ++, , 0, (uint32_t) (2*ITER_COUNT)) TEST_RACE_OPERATION(, uint32_preinc, uint32_t, ++,, 0, (uint32_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint32_sub, uint32_t, , -= 1, 0, (uint32_t) -(2*ITER_COUNT)) TEST_RACE_OPERATION(, uint32_sub, uint32_t,, -= 1, 0, (uint32_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint32_sub_3, uint32_t, , -= 3, 0, (uint32_t) -(6*ITER_COUNT)) TEST_RACE_OPERATION(, uint32_sub_3, uint32_t,, -= 3, 0, (uint32_t) - (6 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint32_postdec, uint32_t, , --, 0, (uint32_t) -(2*ITER_COUNT)) TEST_RACE_OPERATION(, uint32_postdec, uint32_t,, --, 0, (uint32_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint32_predec, uint32_t, --, , 0, (uint32_t) -(2*ITER_COUNT)) TEST_RACE_OPERATION(, uint32_predec, uint32_t, --,, 0, (uint32_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint32_mul, uint32_t, , *= 3, 1, (uint32_t) 0xC1E36D01U) TEST_RACE_OPERATION(, uint32_mul, uint32_t,, *= 3, 1, (uint32_t) 0xC1E36D01U)
TEST_RACE_OPERATION ( ,uint64_add, uint64_t, , += 1, 0, (uint64_t) (2*ITER_COUNT)) TEST_RACE_OPERATION(, uint64_add, uint64_t,, += 1, 0, (uint64_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint64_add_3, uint64_t, , += 3, 0, (uint64_t) (6*ITER_COUNT)) TEST_RACE_OPERATION(, uint64_add_3, uint64_t,, += 3, 0, (uint64_t)(6 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint64_add_neg, uint64_t, , += 1, -10000, (uint64_t) (2*ITER_COUNT-10000)) TEST_RACE_OPERATION(, uint64_add_neg, uint64_t,, += 1, -10000, (uint64_t)(2 * ITER_COUNT - 10000))
TEST_RACE_OPERATION ( ,uint64_postinc, uint64_t, , ++, 0, (uint64_t) (2*ITER_COUNT)) TEST_RACE_OPERATION(, uint64_postinc, uint64_t,, ++, 0, (uint64_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint64_postinc_neg, uint64_t, , ++, -10000, (uint64_t) (2*ITER_COUNT-10000)) TEST_RACE_OPERATION(, uint64_postinc_neg, uint64_t,, ++, -10000, (uint64_t)(2 * ITER_COUNT - 10000))
TEST_RACE_OPERATION ( ,uint64_preinc, uint64_t, ++, , 0, (uint64_t) (2*ITER_COUNT)) TEST_RACE_OPERATION(, uint64_preinc, uint64_t, ++,, 0, (uint64_t)(2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint64_preinc_neg, uint64_t, ++, , -10000, (uint64_t) (2*ITER_COUNT-10000)) TEST_RACE_OPERATION(, uint64_preinc_neg, uint64_t, ++,, -10000, (uint64_t)(2 * ITER_COUNT - 10000))
TEST_RACE_OPERATION ( ,uint64_sub, uint64_t, , -= 1, 0, (uint64_t) -(2*ITER_COUNT)) TEST_RACE_OPERATION(, uint64_sub, uint64_t,, -= 1, 0, (uint64_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint64_sub_3, uint64_t, , -= 3, 0, (uint64_t) -(6*ITER_COUNT)) TEST_RACE_OPERATION(, uint64_sub_3, uint64_t,, -= 3, 0, (uint64_t) - (6 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint64_sub_neg, uint64_t, , -= 1, 10000, (uint64_t) ((-2*ITER_COUNT)+10000)) TEST_RACE_OPERATION(, uint64_sub_neg, uint64_t,, -= 1, 10000, (uint64_t)((-2 * ITER_COUNT) + 10000))
TEST_RACE_OPERATION ( ,uint64_postdec, uint64_t, , --, 0, (uint64_t) -(2*ITER_COUNT)) TEST_RACE_OPERATION(, uint64_postdec, uint64_t,, --, 0, (uint64_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint64_postdec_neg, uint64_t, , --, 10000, (uint64_t) ((-2*ITER_COUNT)+10000)) TEST_RACE_OPERATION(, uint64_postdec_neg, uint64_t,, --, 10000, (uint64_t)((-2 * ITER_COUNT) + 10000))
TEST_RACE_OPERATION ( ,uint64_predec, uint64_t, --, , 0, (uint64_t) -(2*ITER_COUNT)) TEST_RACE_OPERATION(, uint64_predec, uint64_t, --,, 0, (uint64_t) - (2 * ITER_COUNT))
TEST_RACE_OPERATION ( ,uint64_predec_neg, uint64_t, --, , 10000, (uint64_t) ((-2*ITER_COUNT)+10000)) TEST_RACE_OPERATION(, uint64_predec_neg, uint64_t, --,, 10000, (uint64_t)((-2 * ITER_COUNT) + 10000))
TEST_RACE_OPERATION ( ,uint64_mul, uint64_t, , *= 3, 1, (uint64_t) 0x988EE974C1E36D01ULL) TEST_RACE_OPERATION(, uint64_mul, uint64_t,, *= 3, 1, (uint64_t) 0x988EE974C1E36D01ULL)
TEST_RACE_OPERATION (_FLOAT ,float_add, float, , += 1, 0, (2*ITER_COUNT)) TEST_RACE_OPERATION(_FLOAT, float_add, float,, += 1, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION (_FLOAT ,complex_float_add, _Complex float, , += 1, 0, (2*ITER_COUNT)) TEST_RACE_OPERATION(_FLOAT, complex_float_add, _Complex float,, += 1, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION (_FLOAT ,float_postinc, float, , ++, 0, (2*ITER_COUNT)) TEST_RACE_OPERATION(_FLOAT, float_postinc, float,, ++, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION (_FLOAT ,float_preinc, float, ++, , 0, (2*ITER_COUNT)) TEST_RACE_OPERATION(_FLOAT, float_preinc, float, ++,, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION (_FLOAT ,float_sub, float, , -= 1, 0, -(2*ITER_COUNT)) TEST_RACE_OPERATION(_FLOAT, float_sub, float,, -= 1, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION (_FLOAT ,complex_float_sub, _Complex float, , -= 1, 0, -(2*ITER_COUNT)) TEST_RACE_OPERATION(_FLOAT, complex_float_sub, _Complex float,, -= 1, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION (_FLOAT ,float_postdec, float, , --, 0, -(2*ITER_COUNT)) TEST_RACE_OPERATION(_FLOAT, float_postdec, float,, --, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION (_FLOAT ,float_predec, float, --, , 0, -(2*ITER_COUNT)) TEST_RACE_OPERATION(_FLOAT, float_predec, float, --,, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION (_DOUBLE ,double_add, double, , += 1, 0, (2*ITER_COUNT)) TEST_RACE_OPERATION(_DOUBLE, double_add, double,, += 1, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION (_DOUBLE ,complex_double_add, _Complex double, , += 1, 0, (2*ITER_COUNT)) TEST_RACE_OPERATION(_DOUBLE, complex_double_add, _Complex double,, += 1, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION (_DOUBLE ,double_postinc, double, , ++, 0, (2*ITER_COUNT)) TEST_RACE_OPERATION(_DOUBLE, double_postinc, double,, ++, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION (_DOUBLE ,double_preinc, double, ++, , 0, (2*ITER_COUNT)) TEST_RACE_OPERATION(_DOUBLE, double_preinc, double, ++,, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION (_DOUBLE ,double_sub, double, , -= 1, 0, -(2*ITER_COUNT)) TEST_RACE_OPERATION(_DOUBLE, double_sub, double,, -= 1, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION (_DOUBLE ,complex_double_sub, _Complex double, , -= 1, 0, -(2*ITER_COUNT)) TEST_RACE_OPERATION(_DOUBLE, complex_double_sub, _Complex double,, -= 1, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION (_DOUBLE ,double_postdec, double, , --, 0, -(2*ITER_COUNT)) TEST_RACE_OPERATION(_DOUBLE, double_postdec, double,, --, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION (_DOUBLE ,double_predec, double, --, , 0, -(2*ITER_COUNT)) TEST_RACE_OPERATION(_DOUBLE, double_predec, double, --,, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE (long_double_add, long double, , += 1, 0, (2*ITER_COUNT)) TEST_RACE_OPERATION_LONG_DOUBLE(long_double_add, long double,, += 1, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE (complex_long_double_add, _Complex long double, , += 1, 0, (2*ITER_COUNT)) TEST_RACE_OPERATION_LONG_DOUBLE(complex_long_double_add, _Complex long double,, += 1, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE (long_double_postinc, long double, , ++, 0, (2*ITER_COUNT)) TEST_RACE_OPERATION_LONG_DOUBLE(long_double_postinc, long double,, ++, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE (long_double_sub, long double, , -= 1, 0, -(2*ITER_COUNT)) TEST_RACE_OPERATION_LONG_DOUBLE(long_double_sub, long double,, -= 1, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE (long_double_preinc, long double, ++, , 0, (2*ITER_COUNT)) TEST_RACE_OPERATION_LONG_DOUBLE(long_double_preinc, long double, ++,, 0, (2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE (complex_long_double_sub, _Complex long double, , -= 1, 0, -(2*ITER_COUNT)) TEST_RACE_OPERATION_LONG_DOUBLE(complex_long_double_sub, _Complex long double,, -= 1, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE (long_double_postdec, long double, , --, 0, -(2*ITER_COUNT)) TEST_RACE_OPERATION_LONG_DOUBLE(long_double_postdec, long double,, --, 0, -(2 * ITER_COUNT))
TEST_RACE_OPERATION_LONG_DOUBLE (long_double_predec, long double, --, , 0, -(2*ITER_COUNT)) TEST_RACE_OPERATION_LONG_DOUBLE(long_double_predec, long double, --,, 0, -(2 * ITER_COUNT))

Wyświetl plik

@ -72,7 +72,6 @@ static void time_adc_test_task(void* arg)
vTaskDelete(NULL); vTaskDelete(NULL);
} }
TEST_CASE("Reading RTC registers on APP CPU doesn't affect clock", "[newlib]") TEST_CASE("Reading RTC registers on APP CPU doesn't affect clock", "[newlib]")
{ {
SemaphoreHandle_t done = xSemaphoreCreateBinary(); SemaphoreHandle_t done = xSemaphoreCreateBinary();
@ -82,7 +81,7 @@ TEST_CASE("Reading RTC registers on APP CPU doesn't affect clock", "[newlib]")
for (int i = 0; i < 4; ++i) { for (int i = 0; i < 4; ++i) {
struct timeval tv_start; struct timeval tv_start;
gettimeofday(&tv_start, NULL); gettimeofday(&tv_start, NULL);
vTaskDelay(1000/portTICK_PERIOD_MS); vTaskDelay(1000 / portTICK_PERIOD_MS);
struct timeval tv_stop; struct timeval tv_stop;
gettimeofday(&tv_stop, NULL); gettimeofday(&tv_stop, NULL);
float time_sec = tv_stop.tv_sec - tv_start.tv_sec + 1e-6f * (tv_stop.tv_usec - tv_start.tv_usec); float time_sec = tv_stop.tv_sec - tv_start.tv_sec + 1e-6f * (tv_stop.tv_usec - tv_start.tv_usec);
@ -190,7 +189,9 @@ static void adjtimeTask2(void *pvParameters)
while (exit_flag == false) { while (exit_flag == false) {
delta.tv_sec += 1; delta.tv_sec += 1;
delta.tv_usec = 900000; delta.tv_usec = 900000;
if (delta.tv_sec >= 2146) delta.tv_sec = 1; if (delta.tv_sec >= 2146) {
delta.tv_sec = 1;
}
adjtime(&delta, &outdelta); adjtime(&delta, &outdelta);
} }
xSemaphoreGive(*sema); xSemaphoreGive(*sema);
@ -242,7 +243,7 @@ TEST_CASE("test for no interlocking adjtime, gettimeofday and settimeofday funct
// set exit flag to let thread exit // set exit flag to let thread exit
exit_flag = true; exit_flag = true;
for (int i = 0; i < max_tasks; ++i) { for (int i = 0; i < max_tasks; ++i) {
if (!xSemaphoreTake(exit_sema[i], 2000/portTICK_PERIOD_MS)) { if (!xSemaphoreTake(exit_sema[i], 2000 / portTICK_PERIOD_MS)) {
TEST_FAIL_MESSAGE("exit_sema not released by test task"); TEST_FAIL_MESSAGE("exit_sema not released by test task");
} }
vSemaphoreDelete(exit_sema[i]); vSemaphoreDelete(exit_sema[i]);
@ -284,7 +285,7 @@ static int64_t calc_correction(const char* tag, int64_t* sys_time, int64_t* real
int64_t real_correction_us = dt_sys_time_us - dt_real_time_us; int64_t real_correction_us = dt_sys_time_us - dt_real_time_us;
int64_t error_us = calc_correction_us - real_correction_us; int64_t error_us = calc_correction_us - real_correction_us;
printf("%s: dt_real_time = %lli us, dt_sys_time = %lli us, calc_correction = %lli us, error = %lli us\n", printf("%s: dt_real_time = %lli us, dt_sys_time = %lli us, calc_correction = %lli us, error = %lli us\n",
tag, dt_real_time_us, dt_sys_time_us, calc_correction_us, error_us); tag, dt_real_time_us, dt_sys_time_us, calc_correction_us, error_us);
TEST_ASSERT_TRUE(dt_sys_time_us > 0 && dt_real_time_us > 0); TEST_ASSERT_TRUE(dt_sys_time_us > 0 && dt_real_time_us > 0);
TEST_ASSERT_INT_WITHIN(100, 0, error_us); TEST_ASSERT_INT_WITHIN(100, 0, error_us);
@ -349,7 +350,7 @@ TEST_CASE("test time adjustment happens linearly", "[newlib][timeout=15]")
exit_flag = true; exit_flag = true;
for (int i = 0; i < 2; ++i) { for (int i = 0; i < 2; ++i) {
if (!xSemaphoreTake(exit_sema[i], 2100/portTICK_PERIOD_MS)) { if (!xSemaphoreTake(exit_sema[i], 2100 / portTICK_PERIOD_MS)) {
TEST_FAIL_MESSAGE("exit_sema not released by test task"); TEST_FAIL_MESSAGE("exit_sema not released by test task");
} }
} }
@ -360,7 +361,7 @@ TEST_CASE("test time adjustment happens linearly", "[newlib][timeout=15]")
} }
#endif #endif
void test_posix_timers_clock (void) void test_posix_timers_clock(void)
{ {
#ifndef _POSIX_TIMERS #ifndef _POSIX_TIMERS
TEST_ASSERT_MESSAGE(false, "_POSIX_TIMERS - is not defined"); TEST_ASSERT_MESSAGE(false, "_POSIX_TIMERS - is not defined");
@ -475,7 +476,7 @@ static struct timeval get_time(const char *desc, char *buffer)
TEST_CASE("test time_t wide 64 bits", "[newlib]") TEST_CASE("test time_t wide 64 bits", "[newlib]")
{ {
static char buffer[32]; static char buffer[32];
ESP_LOGI("TAG", "sizeof(time_t): %d (%d-bit)", sizeof(time_t), sizeof(time_t)*8); ESP_LOGI("TAG", "sizeof(time_t): %d (%d-bit)", sizeof(time_t), sizeof(time_t) * 8);
TEST_ASSERT_EQUAL(8, sizeof(time_t)); TEST_ASSERT_EQUAL(8, sizeof(time_t));
// mktime takes current timezone into account, this test assumes it's UTC+0 // mktime takes current timezone into account, this test assumes it's UTC+0
@ -563,7 +564,6 @@ TEST_CASE("test time functions wide 64 bits", "[newlib]")
extern int64_t s_microseconds_offset; extern int64_t s_microseconds_offset;
static const uint64_t s_start_timestamp = 1606838354; static const uint64_t s_start_timestamp = 1606838354;
static __NOINIT_ATTR uint64_t s_saved_time; static __NOINIT_ATTR uint64_t s_saved_time;
static __NOINIT_ATTR uint64_t s_time_in_reboot; static __NOINIT_ATTR uint64_t s_time_in_reboot;
@ -662,7 +662,6 @@ static void check_time(void)
TEST_ASSERT_LESS_OR_EQUAL(latency_before_run_ut, dt); TEST_ASSERT_LESS_OR_EQUAL(latency_before_run_ut, dt);
} }
TEST_CASE_MULTIPLE_STAGES("Timestamp after abort is correct in case RTC & High-res timer have + big error", "[newlib][reset=abort,SW_CPU_RESET]", set_timestamp1, check_time); TEST_CASE_MULTIPLE_STAGES("Timestamp after abort is correct in case RTC & High-res timer have + big error", "[newlib][reset=abort,SW_CPU_RESET]", set_timestamp1, check_time);
TEST_CASE_MULTIPLE_STAGES("Timestamp after restart is correct in case RTC & High-res timer have + big error", "[newlib][reset=SW_CPU_RESET]", set_timestamp2, check_time); TEST_CASE_MULTIPLE_STAGES("Timestamp after restart is correct in case RTC & High-res timer have + big error", "[newlib][reset=SW_CPU_RESET]", set_timestamp2, check_time);
TEST_CASE_MULTIPLE_STAGES("Timestamp after restart is correct in case RTC & High-res timer have - big error", "[newlib][reset=SW_CPU_RESET]", set_timestamp3, check_time); TEST_CASE_MULTIPLE_STAGES("Timestamp after restart is correct in case RTC & High-res timer have - big error", "[newlib][reset=SW_CPU_RESET]", set_timestamp3, check_time);

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -45,7 +45,7 @@ static _lock_t s_time_lock;
// This function gradually changes boot_time to the correction value and immediately updates it. // This function gradually changes boot_time to the correction value and immediately updates it.
static uint64_t adjust_boot_time(void) static uint64_t adjust_boot_time(void)
{ {
#define ADJTIME_CORRECTION_FACTOR 6 #define ADJTIME_CORRECTION_FACTOR 6
uint64_t boot_time = esp_time_impl_get_boot_time(); uint64_t boot_time = esp_time_impl_get_boot_time();
if ((boot_time == 0) || (esp_time_impl_get_time_since_boot() < s_adjtime_start_us)) { if ((boot_time == 0) || (esp_time_impl_get_time_since_boot() < s_adjtime_start_us)) {
@ -83,7 +83,6 @@ static uint64_t adjust_boot_time(void)
return boot_time; return boot_time;
} }
// Get the adjusted boot time. // Get the adjusted boot time.
static uint64_t get_adjusted_boot_time(void) static uint64_t get_adjusted_boot_time(void)
{ {
@ -94,10 +93,10 @@ static uint64_t get_adjusted_boot_time(void)
} }
// Applying the accumulated correction to base_time and stopping the smooth time adjustment. // Applying the accumulated correction to base_time and stopping the smooth time adjustment.
static void adjtime_corr_stop (void) static void adjtime_corr_stop(void)
{ {
_lock_acquire(&s_time_lock); _lock_acquire(&s_time_lock);
if (s_adjtime_start_us != 0){ if (s_adjtime_start_us != 0) {
adjust_boot_time(); adjust_boot_time();
s_adjtime_start_us = 0; s_adjtime_start_us = 0;
} }
@ -108,7 +107,7 @@ static void adjtime_corr_stop (void)
int adjtime(const struct timeval *delta, struct timeval *outdelta) int adjtime(const struct timeval *delta, struct timeval *outdelta)
{ {
#if IMPL_NEWLIB_TIME_FUNCS #if IMPL_NEWLIB_TIME_FUNCS
if(outdelta != NULL){ if (outdelta != NULL) {
_lock_acquire(&s_time_lock); _lock_acquire(&s_time_lock);
adjust_boot_time(); adjust_boot_time();
if (s_adjtime_start_us != 0) { if (s_adjtime_start_us != 0) {
@ -120,10 +119,10 @@ int adjtime(const struct timeval *delta, struct timeval *outdelta)
} }
_lock_release(&s_time_lock); _lock_release(&s_time_lock);
} }
if(delta != NULL){ if (delta != NULL) {
int64_t sec = delta->tv_sec; int64_t sec = delta->tv_sec;
int64_t usec = delta->tv_usec; int64_t usec = delta->tv_usec;
if(llabs(sec) > ((INT_MAX / 1000000L) - 1L)) { if (llabs(sec) > ((INT_MAX / 1000000L) - 1L)) {
errno = EINVAL; errno = EINVAL;
return -1; return -1;
} }
@ -208,7 +207,7 @@ int usleep(useconds_t us)
unsigned int sleep(unsigned int seconds) unsigned int sleep(unsigned int seconds)
{ {
usleep(seconds*1000000UL); usleep(seconds * 1000000UL);
return 0; return 0;
} }
@ -221,14 +220,14 @@ int clock_settime(clockid_t clock_id, const struct timespec *tp)
} }
struct timeval tv; struct timeval tv;
switch (clock_id) { switch (clock_id) {
case CLOCK_REALTIME: case CLOCK_REALTIME:
tv.tv_sec = tp->tv_sec; tv.tv_sec = tp->tv_sec;
tv.tv_usec = tp->tv_nsec / 1000L; tv.tv_usec = tp->tv_nsec / 1000L;
settimeofday(&tv, NULL); settimeofday(&tv, NULL);
break; break;
default: default:
errno = EINVAL; errno = EINVAL;
return -1; return -1;
} }
return 0; return 0;
#else #else
@ -237,7 +236,7 @@ int clock_settime(clockid_t clock_id, const struct timespec *tp)
#endif #endif
} }
int clock_gettime (clockid_t clock_id, struct timespec *tp) int clock_gettime(clockid_t clock_id, struct timespec *tp)
{ {
#if IMPL_NEWLIB_TIME_FUNCS #if IMPL_NEWLIB_TIME_FUNCS
if (tp == NULL) { if (tp == NULL) {
@ -247,19 +246,19 @@ int clock_gettime (clockid_t clock_id, struct timespec *tp)
struct timeval tv; struct timeval tv;
uint64_t monotonic_time_us = 0; uint64_t monotonic_time_us = 0;
switch (clock_id) { switch (clock_id) {
case CLOCK_REALTIME: case CLOCK_REALTIME:
_gettimeofday_r(NULL, &tv, NULL); _gettimeofday_r(NULL, &tv, NULL);
tp->tv_sec = tv.tv_sec; tp->tv_sec = tv.tv_sec;
tp->tv_nsec = tv.tv_usec * 1000L; tp->tv_nsec = tv.tv_usec * 1000L;
break; break;
case CLOCK_MONOTONIC: case CLOCK_MONOTONIC:
monotonic_time_us = esp_time_impl_get_time(); monotonic_time_us = esp_time_impl_get_time();
tp->tv_sec = monotonic_time_us / 1000000LL; tp->tv_sec = monotonic_time_us / 1000000LL;
tp->tv_nsec = (monotonic_time_us % 1000000LL) * 1000L; tp->tv_nsec = (monotonic_time_us % 1000000LL) * 1000L;
break; break;
default: default:
errno = EINVAL; errno = EINVAL;
return -1; return -1;
} }
return 0; return 0;
#else #else
@ -268,7 +267,7 @@ int clock_gettime (clockid_t clock_id, struct timespec *tp)
#endif #endif
} }
int clock_getres (clockid_t clock_id, struct timespec *res) int clock_getres(clockid_t clock_id, struct timespec *res)
{ {
#if IMPL_NEWLIB_TIME_FUNCS #if IMPL_NEWLIB_TIME_FUNCS
if (res == NULL) { if (res == NULL) {

Wyświetl plik

@ -62,10 +62,9 @@ typedef struct {
static SemaphoreHandle_t s_threads_mux = NULL; static SemaphoreHandle_t s_threads_mux = NULL;
portMUX_TYPE pthread_lazy_init_lock = portMUX_INITIALIZER_UNLOCKED; // Used for mutexes and cond vars and rwlocks portMUX_TYPE pthread_lazy_init_lock = portMUX_INITIALIZER_UNLOCKED; // Used for mutexes and cond vars and rwlocks
static SLIST_HEAD(esp_thread_list_head, esp_pthread_entry) s_threads_list static SLIST_HEAD(esp_thread_list_head, esp_pthread_entry) s_threads_list
= SLIST_HEAD_INITIALIZER(s_threads_list); = SLIST_HEAD_INITIALIZER(s_threads_list);
static pthread_key_t s_pthread_cfg_key; static pthread_key_t s_pthread_cfg_key;
static int pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo); static int pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo);
static void esp_pthread_cfg_key_destructor(void *value) static void esp_pthread_cfg_key_destructor(void *value)
@ -239,16 +238,16 @@ static UBaseType_t coreID_to_AffinityMask(BaseType_t core_id)
#endif #endif
static BaseType_t pthread_create_freertos_task_with_caps(TaskFunction_t pxTaskCode, static BaseType_t pthread_create_freertos_task_with_caps(TaskFunction_t pxTaskCode,
const char * const pcName, const char * const pcName,
const configSTACK_DEPTH_TYPE usStackDepth, const configSTACK_DEPTH_TYPE usStackDepth,
void * const pvParameters, void * const pvParameters,
UBaseType_t uxPriority, UBaseType_t uxPriority,
BaseType_t core_id, BaseType_t core_id,
UBaseType_t uxStackMemoryCaps, UBaseType_t uxStackMemoryCaps,
TaskHandle_t * const pxCreatedTask) TaskHandle_t * const pxCreatedTask)
{ {
#if CONFIG_SPIRAM #if CONFIG_SPIRAM
#if CONFIG_FREERTOS_SMP #if CONFIG_FREERTOS_SMP
return prvTaskCreateDynamicAffinitySetWithCaps(pxTaskCode, return prvTaskCreateDynamicAffinitySetWithCaps(pxTaskCode,
pcName, pcName,
usStackDepth, usStackDepth,
@ -257,7 +256,7 @@ static BaseType_t pthread_create_freertos_task_with_caps(TaskFunction_t pxTaskCo
coreID_to_AffinityMask(core_id), coreID_to_AffinityMask(core_id),
uxStackMemoryCaps, uxStackMemoryCaps,
pxCreatedTask); pxCreatedTask);
#else #else
return prvTaskCreateDynamicPinnedToCoreWithCaps(pxTaskCode, return prvTaskCreateDynamicPinnedToCoreWithCaps(pxTaskCode,
pcName, pcName,
usStackDepth, usStackDepth,
@ -266,7 +265,7 @@ static BaseType_t pthread_create_freertos_task_with_caps(TaskFunction_t pxTaskCo
core_id, core_id,
uxStackMemoryCaps, uxStackMemoryCaps,
pxCreatedTask); pxCreatedTask);
#endif #endif
#else #else
return xTaskCreatePinnedToCore(pxTaskCode, return xTaskCreatePinnedToCore(pxTaskCode,
pcName, pcName,
@ -279,7 +278,7 @@ static BaseType_t pthread_create_freertos_task_with_caps(TaskFunction_t pxTaskCo
} }
int pthread_create(pthread_t *thread, const pthread_attr_t *attr, int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
void *(*start_routine) (void *), void *arg) void *(*start_routine)(void *), void *arg)
{ {
TaskHandle_t xHandle = NULL; TaskHandle_t xHandle = NULL;
@ -360,13 +359,13 @@ int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
pthread->task_arg = task_arg; pthread->task_arg = task_arg;
BaseType_t res = pthread_create_freertos_task_with_caps(&pthread_task_func, BaseType_t res = pthread_create_freertos_task_with_caps(&pthread_task_func,
task_name, task_name,
stack_size, stack_size,
task_arg, task_arg,
prio, prio,
core_id, core_id,
stack_alloc_caps, stack_alloc_caps,
&xHandle); &xHandle);
if (res != pdPASS) { if (res != pdPASS) {
ESP_LOGE(TAG, "Failed to create task!"); ESP_LOGE(TAG, "Failed to create task!");
@ -549,7 +548,7 @@ int pthread_cancel(pthread_t thread)
return ENOSYS; return ENOSYS;
} }
int sched_yield( void ) int sched_yield(void)
{ {
vTaskDelay(0); vTaskDelay(0);
return 0; return 0;
@ -594,8 +593,8 @@ int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
static int mutexattr_check(const pthread_mutexattr_t *attr) static int mutexattr_check(const pthread_mutexattr_t *attr)
{ {
if (attr->type != PTHREAD_MUTEX_NORMAL && if (attr->type != PTHREAD_MUTEX_NORMAL &&
attr->type != PTHREAD_MUTEX_RECURSIVE && attr->type != PTHREAD_MUTEX_RECURSIVE &&
attr->type != PTHREAD_MUTEX_ERRORCHECK) { attr->type != PTHREAD_MUTEX_ERRORCHECK) {
return EINVAL; return EINVAL;
} }
return 0; return 0;
@ -686,7 +685,7 @@ static int pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo)
} }
if ((mux->type == PTHREAD_MUTEX_ERRORCHECK) && if ((mux->type == PTHREAD_MUTEX_ERRORCHECK) &&
(xSemaphoreGetMutexHolder(mux->sem) == xTaskGetCurrentTaskHandle())) { (xSemaphoreGetMutexHolder(mux->sem) == xTaskGetCurrentTaskHandle())) {
return EDEADLK; return EDEADLK;
} }
@ -740,8 +739,8 @@ int pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *timeo
struct timespec currtime; struct timespec currtime;
clock_gettime(CLOCK_REALTIME, &currtime); clock_gettime(CLOCK_REALTIME, &currtime);
TickType_t tmo = ((timeout->tv_sec - currtime.tv_sec)*1000 + TickType_t tmo = ((timeout->tv_sec - currtime.tv_sec) * 1000 +
(timeout->tv_nsec - currtime.tv_nsec)/1000000)/portTICK_PERIOD_MS; (timeout->tv_nsec - currtime.tv_nsec) / 1000000) / portTICK_PERIOD_MS;
res = pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, tmo); res = pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, tmo);
if (res == EBUSY) { if (res == EBUSY) {
@ -775,8 +774,8 @@ int pthread_mutex_unlock(pthread_mutex_t *mutex)
} }
if (((mux->type == PTHREAD_MUTEX_RECURSIVE) || if (((mux->type == PTHREAD_MUTEX_RECURSIVE) ||
(mux->type == PTHREAD_MUTEX_ERRORCHECK)) && (mux->type == PTHREAD_MUTEX_ERRORCHECK)) &&
(xSemaphoreGetMutexHolder(mux->sem) != xTaskGetCurrentTaskHandle())) { (xSemaphoreGetMutexHolder(mux->sem) != xTaskGetCurrentTaskHandle())) {
return EPERM; return EPERM;
} }

Wyświetl plik

@ -127,7 +127,7 @@ int pthread_cond_timedwait(pthread_cond_t *cv, pthread_mutex_t *mut, const struc
timersub(&abs_time, &cur_time, &diff_time); timersub(&abs_time, &cur_time, &diff_time);
// Round up timeout microseconds to the next millisecond // Round up timeout microseconds to the next millisecond
timeout_msec = (diff_time.tv_sec * 1000) + timeout_msec = (diff_time.tv_sec * 1000) +
((diff_time.tv_usec + 1000 - 1) / 1000); ((diff_time.tv_usec + 1000 - 1) / 1000);
} }
if (timeout_msec <= 0) { if (timeout_msec <= 0) {

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2017-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2017-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -17,7 +17,7 @@
/* Sanity check to ensure that the number of FreeRTOS TLSPs is at least 1 */ /* Sanity check to ensure that the number of FreeRTOS TLSPs is at least 1 */
#if (CONFIG_FREERTOS_THREAD_LOCAL_STORAGE_POINTERS < 1) #if (CONFIG_FREERTOS_THREAD_LOCAL_STORAGE_POINTERS < 1)
#error "CONFIG_FREERTOS_THREAD_LOCAL_STORAGE_POINTERS cannot be 0 for pthread TLS" #error "CONFIG_FREERTOS_THREAD_LOCAL_STORAGE_POINTERS cannot be 0 for pthread TLS"
#endif #endif
#define PTHREAD_TLS_INDEX 0 #define PTHREAD_TLS_INDEX 0
@ -77,7 +77,7 @@ static key_entry_t *find_key(pthread_key_t key)
portENTER_CRITICAL(&s_keys_lock); portENTER_CRITICAL(&s_keys_lock);
key_entry_t *result = NULL;; key_entry_t *result = NULL;;
SLIST_FOREACH(result, &s_keys, next) { SLIST_FOREACH(result, &s_keys, next) {
if(result->key == key) { if (result->key == key) {
break; break;
} }
} }
@ -171,7 +171,7 @@ static value_entry_t *find_value(const values_list_t *list, pthread_key_t key)
{ {
value_entry_t *result = NULL;; value_entry_t *result = NULL;;
SLIST_FOREACH(result, list, next) { SLIST_FOREACH(result, list, next) {
if(result->key == key) { if (result->key == key) {
break; break;
} }
} }
@ -186,7 +186,7 @@ void *pthread_getspecific(pthread_key_t key)
} }
value_entry_t *entry = find_value(tls, key); value_entry_t *entry = find_value(tls, key);
if(entry != NULL) { if (entry != NULL) {
return entry->value; return entry->value;
} }
return NULL; return NULL;

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2021-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -23,7 +23,6 @@
#include "esp_log.h" #include "esp_log.h"
const static char *TAG = "pthread_rw_lock"; const static char *TAG = "pthread_rw_lock";
/** pthread rw_mutex FreeRTOS wrapper */ /** pthread rw_mutex FreeRTOS wrapper */
typedef struct { typedef struct {
/** /**
@ -46,8 +45,8 @@ typedef struct {
#define WRITER_QUEUE_SIZE 4 #define WRITER_QUEUE_SIZE 4
#define READER_QUEUE_SIZE 4 #define READER_QUEUE_SIZE 4
int pthread_rwlock_init (pthread_rwlock_t *rwlock, int pthread_rwlock_init(pthread_rwlock_t *rwlock,
const pthread_rwlockattr_t *attr) const pthread_rwlockattr_t *attr)
{ {
int result; int result;
if (!rwlock) { if (!rwlock) {
@ -99,7 +98,7 @@ static int pthread_rwlock_init_if_static(pthread_rwlock_t *rwlock)
return res; return res;
} }
int pthread_rwlock_destroy (pthread_rwlock_t *rwlock) int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
{ {
esp_pthread_rwlock_t *esp_rwlock; esp_pthread_rwlock_t *esp_rwlock;
@ -158,7 +157,7 @@ static int checkrw_lock(pthread_rwlock_t *rwlock)
return 0; return 0;
} }
int pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
{ {
esp_pthread_rwlock_t *esp_rwlock; esp_pthread_rwlock_t *esp_rwlock;
int res; int res;
@ -191,7 +190,7 @@ int pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
return 0; return 0;
} }
int pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
{ {
esp_pthread_rwlock_t *esp_rwlock; esp_pthread_rwlock_t *esp_rwlock;
int res; int res;
@ -219,7 +218,7 @@ int pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
return res; return res;
} }
int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock) int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
{ {
esp_pthread_rwlock_t *esp_rwlock; esp_pthread_rwlock_t *esp_rwlock;
int res; int res;
@ -247,7 +246,8 @@ int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
return 0; return 0;
} }
int pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) { int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
{
esp_pthread_rwlock_t *esp_rwlock; esp_pthread_rwlock_t *esp_rwlock;
int res; int res;
@ -276,7 +276,7 @@ int pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) {
return res; return res;
} }
int pthread_rwlock_unlock (pthread_rwlock_t *rwlock) int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
{ {
esp_pthread_rwlock_t *esp_rwlock; esp_pthread_rwlock_t *esp_rwlock;
int res; int res;

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -24,7 +24,7 @@ int sem_destroy(sem_t * semaphore)
return -1; return -1;
} }
SemaphoreHandle_t freertos_semaphore = (SemaphoreHandle_t) *semaphore; SemaphoreHandle_t freertos_semaphore = (SemaphoreHandle_t) * semaphore;
vSemaphoreDelete(freertos_semaphore); vSemaphoreDelete(freertos_semaphore);
return 0; return 0;
} }
@ -60,7 +60,7 @@ int sem_post(sem_t * semaphore)
return -1; return -1;
} }
SemaphoreHandle_t freertos_semaphore = (SemaphoreHandle_t) *semaphore; SemaphoreHandle_t freertos_semaphore = (SemaphoreHandle_t) * semaphore;
BaseType_t ret = xSemaphoreGive(freertos_semaphore); BaseType_t ret = xSemaphoreGive(freertos_semaphore);
if (ret == pdFALSE) { if (ret == pdFALSE) {
@ -96,7 +96,7 @@ int sem_timedwait(sem_t * restrict semaphore, const struct timespec *restrict ab
long timeout_msec; long timeout_msec;
// Round up timeout nanoseconds to the next millisecond // Round up timeout nanoseconds to the next millisecond
timeout_msec = (diff_time.tv_sec * 1000) + timeout_msec = (diff_time.tv_sec * 1000) +
((diff_time.tv_nsec + (1 * MIO) - 1) / (1 * MIO)); ((diff_time.tv_nsec + (1 * MIO) - 1) / (1 * MIO));
// Round up milliseconds to the next tick // Round up milliseconds to the next tick
timeout_ticks = (timeout_msec + portTICK_PERIOD_MS - 1) / portTICK_PERIOD_MS; timeout_ticks = (timeout_msec + portTICK_PERIOD_MS - 1) / portTICK_PERIOD_MS;
@ -112,7 +112,7 @@ int sem_timedwait(sem_t * restrict semaphore, const struct timespec *restrict ab
timeout_ticks += 1; timeout_ticks += 1;
} }
SemaphoreHandle_t freertos_semaphore = (SemaphoreHandle_t) *semaphore; SemaphoreHandle_t freertos_semaphore = (SemaphoreHandle_t) * semaphore;
BaseType_t sem_take_result; BaseType_t sem_take_result;
sem_take_result = xSemaphoreTake(freertos_semaphore, timeout_ticks); sem_take_result = xSemaphoreTake(freertos_semaphore, timeout_ticks);
if (sem_take_result == pdFALSE) { if (sem_take_result == pdFALSE) {
@ -130,7 +130,7 @@ int sem_trywait(sem_t * semaphore)
return -1; return -1;
} }
SemaphoreHandle_t freertos_semaphore = (SemaphoreHandle_t) *semaphore; SemaphoreHandle_t freertos_semaphore = (SemaphoreHandle_t) * semaphore;
BaseType_t ret = xSemaphoreTake(freertos_semaphore, 0); BaseType_t ret = xSemaphoreTake(freertos_semaphore, 0);
@ -149,7 +149,7 @@ int sem_wait(sem_t * semaphore)
return -1; return -1;
} }
SemaphoreHandle_t freertos_semaphore = (SemaphoreHandle_t) *semaphore; SemaphoreHandle_t freertos_semaphore = (SemaphoreHandle_t) * semaphore;
// Only returns failure if block time expires, but we block indefinitely, hence not return code check // Only returns failure if block time expires, but we block indefinitely, hence not return code check
xSemaphoreTake(freertos_semaphore, portMAX_DELAY); xSemaphoreTake(freertos_semaphore, portMAX_DELAY);
@ -168,7 +168,7 @@ int sem_getvalue(sem_t *restrict semaphore, int *restrict sval)
return -1; return -1;
} }
SemaphoreHandle_t freertos_semaphore = (SemaphoreHandle_t) *semaphore; SemaphoreHandle_t freertos_semaphore = (SemaphoreHandle_t) * semaphore;
*sval = uxSemaphoreGetCount(freertos_semaphore); *sval = uxSemaphoreGetCount(freertos_semaphore);
return 0; return 0;
} }

Wyświetl plik

@ -11,7 +11,6 @@
#include "unity_test_runner.h" #include "unity_test_runner.h"
#include "esp_heap_caps.h" #include "esp_heap_caps.h"
// Some resources are lazy allocated (e.g. newlib locks), the threshold is left for that case // Some resources are lazy allocated (e.g. newlib locks), the threshold is left for that case
#define TEST_MEMORY_LEAK_THRESHOLD (-200) #define TEST_MEMORY_LEAK_THRESHOLD (-200)

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Unlicense OR CC0-1.0 * SPDX-License-Identifier: Unlicense OR CC0-1.0
*/ */
@ -24,10 +24,13 @@ static void waits(int idx, int timeout_ms)
std::unique_lock<std::mutex> lk(cv_m); std::unique_lock<std::mutex> lk(cv_m);
auto now = std::chrono::system_clock::now(); auto now = std::chrono::system_clock::now();
if(cv.wait_until(lk, now + std::chrono::milliseconds(timeout_ms), [](){return i == 1;})) if (cv.wait_until(lk, now + std::chrono::milliseconds(timeout_ms), []() {
std::cout << "Thread " << idx << " finished waiting. i == " << i << '\n'; return i == 1;
else }))
std::cout << "Thread " << idx << " finished waiting. i == " << i << '\n';
else {
std::cout << "Thread " << idx << " timed out. i == " << i << '\n'; std::cout << "Thread " << idx << " timed out. i == " << i << '\n';
}
} }
static void signals(int signal_ms) static void signals(int signal_ms)
@ -53,7 +56,6 @@ TEST_CASE("C++ condition_variable", "[std::condition_variable]")
std::cout << "All threads joined\n"; std::cout << "All threads joined\n";
} }
TEST_CASE("cxx: condition_variable can timeout", "[cxx]") TEST_CASE("cxx: condition_variable can timeout", "[cxx]")
{ {
std::condition_variable cv; std::condition_variable cv;
@ -76,19 +78,20 @@ TEST_CASE("cxx: condition_variable timeout never before deadline", "[cxx]")
std::unique_lock<std::mutex> lock(mutex); std::unique_lock<std::mutex> lock(mutex);
for (int i = 0; i < 25; ++i) { for (int i = 0; i < 25; ++i) {
auto timeout = std::chrono::milliseconds(portTICK_PERIOD_MS * (i+1)); auto timeout = std::chrono::milliseconds(portTICK_PERIOD_MS * (i + 1));
auto deadline = SysClock::now() + timeout; auto deadline = SysClock::now() + timeout;
auto secs = std::chrono::time_point_cast<std::chrono::seconds>(deadline); auto secs = std::chrono::time_point_cast<std::chrono::seconds>(deadline);
auto nsecs = std::chrono::duration_cast<std::chrono::nanoseconds> auto nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>
(deadline - secs); (deadline - secs);
struct timespec ts = { struct timespec ts = {
.tv_sec = static_cast<time_t>(secs.time_since_epoch().count()), .tv_sec = static_cast<time_t>(secs.time_since_epoch().count()),
.tv_nsec = static_cast<long>(nsecs.count())}; .tv_nsec = static_cast<long>(nsecs.count())
};
int rc = ::pthread_cond_timedwait(cond.native_handle(), int rc = ::pthread_cond_timedwait(cond.native_handle(),
lock.mutex()->native_handle(), &ts); lock.mutex()->native_handle(), &ts);
auto status = (rc == ETIMEDOUT) ? std::cv_status::timeout : auto status = (rc == ETIMEDOUT) ? std::cv_status::timeout :
std::cv_status::no_timeout; std::cv_status::no_timeout;
auto end = SysClock::now(); auto end = SysClock::now();
auto extra = end - deadline; auto extra = end - deadline;
auto extra_us = extra / std::chrono::microseconds(1); auto extra_us = extra / std::chrono::microseconds(1);

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Unlicense OR CC0-1.0 * SPDX-License-Identifier: Unlicense OR CC0-1.0
*/ */
@ -12,17 +12,17 @@
TEST_CASE("C++ future", "[std::future]") TEST_CASE("C++ future", "[std::future]")
{ {
// future from a packaged_task // future from a packaged_task
std::packaged_task<int()> task([]{ return 7; }); // wrap the function std::packaged_task<int()> task([] { return 7; }); // wrap the function
std::future<int> f1 = task.get_future(); // get a future std::future<int> f1 = task.get_future(); // get a future
std::thread t(std::move(task)); // launch on a thread std::thread t(std::move(task)); // launch on a thread
// future from an async() // future from an async()
std::future<int> f2 = std::async(std::launch::async, []{ return 8; }); std::future<int> f2 = std::async(std::launch::async, [] { return 8; });
// future from a promise // future from a promise
std::promise<int> p; std::promise<int> p;
std::future<int> f3 = p.get_future(); std::future<int> f3 = p.get_future();
std::thread( [&p]{ p.set_value_at_thread_exit(9); }).detach(); std::thread([&p] { p.set_value_at_thread_exit(9); }).detach();
std::cout << "Waiting..." << std::flush; std::cout << "Waiting..." << std::flush;
f1.wait(); f1.wait();

Wyświetl plik

@ -232,7 +232,7 @@ static void test_mutex_lock_unlock(int mutex_type)
res = pthread_mutex_lock(&mutex); res = pthread_mutex_lock(&mutex);
if(mutex_type == PTHREAD_MUTEX_ERRORCHECK) { if (mutex_type == PTHREAD_MUTEX_ERRORCHECK) {
TEST_ASSERT_EQUAL_INT(EDEADLK, res); TEST_ASSERT_EQUAL_INT(EDEADLK, res);
} else { } else {
TEST_ASSERT_EQUAL_INT(0, res); TEST_ASSERT_EQUAL_INT(0, res);

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Unlicense OR CC0-1.0 * SPDX-License-Identifier: Unlicense OR CC0-1.0
*/ */
@ -114,7 +114,7 @@ TEST_CASE("pthread local storage destructor in FreeRTOS task", "[thread-specific
TEST_ASSERT_EQUAL(0, pthread_key_create(&key, test_pthread_destructor)); TEST_ASSERT_EQUAL(0, pthread_key_create(&key, test_pthread_destructor));
xTaskCreate(task_test_pthread_destructor, xTaskCreate(task_test_pthread_destructor,
"ptdest", 8192, (void *)key, UNITY_FREERTOS_PRIORITY+1, "ptdest", 8192, (void *)key, UNITY_FREERTOS_PRIORITY + 1,
NULL); NULL);
// Above task has higher priority to us, so should run immediately // Above task has higher priority to us, so should run immediately
@ -146,14 +146,13 @@ static void *thread_stress_test(void *v_key)
pthread_setspecific(key, tls_value); pthread_setspecific(key, tls_value);
for(int i = 0; i < STRESS_NUMITER; i++) { for (int i = 0; i < STRESS_NUMITER; i++) {
TEST_ASSERT_EQUAL_HEX32(pthread_getspecific(key), tls_value); TEST_ASSERT_EQUAL_HEX32(pthread_getspecific(key), tls_value);
} }
return NULL; return NULL;
} }
// This test case added to reproduce issues with unpinned tasks and TLS // This test case added to reproduce issues with unpinned tasks and TLS
TEST_CASE("pthread local storage stress test", "[thread-specific]") TEST_CASE("pthread local storage stress test", "[thread-specific]")
{ {
@ -169,7 +168,6 @@ TEST_CASE("pthread local storage stress test", "[thread-specific]")
} }
} }
#define NUM_KEYS 4 // number of keys used in repeat destructor test #define NUM_KEYS 4 // number of keys used in repeat destructor test
#define NUM_REPEATS 17 // number of times we re-set a key to a non-NULL value to re-trigger destructor #define NUM_REPEATS 17 // number of times we re-set a key to a non-NULL value to re-trigger destructor
@ -179,7 +177,6 @@ typedef struct {
int last_idx; // index of last key where destructor was called int last_idx; // index of last key where destructor was called
} destr_test_state_t; } destr_test_state_t;
static void s_test_repeat_destructor(void *vp_state); static void s_test_repeat_destructor(void *vp_state);
static void *s_test_repeat_destructor_thread(void *vp_state); static void *s_test_repeat_destructor_thread(void *vp_state);
@ -202,7 +199,7 @@ TEST_CASE("pthread local storage 'repeat' destructor test", "[thread-specific]")
TEST_ASSERT_EQUAL(0, r); TEST_ASSERT_EQUAL(0, r);
r = pthread_join(thread, NULL); r = pthread_join(thread, NULL);
TEST_ASSERT_EQUAL(0 ,r); TEST_ASSERT_EQUAL(0, r);
// Cheating here to make sure compiler reads the value of 'count' from memory not from a register // Cheating here to make sure compiler reads the value of 'count' from memory not from a register
// //

Wyświetl plik

@ -80,7 +80,6 @@ components_not_formatted_temporary:
- "/components/lwip/" - "/components/lwip/"
- "/components/mbedtls/" - "/components/mbedtls/"
- "/components/mqtt/" - "/components/mqtt/"
- "/components/newlib/"
- "/components/nvs_flash/" - "/components/nvs_flash/"
- "/components/nvs_sec_provider/" - "/components/nvs_sec_provider/"
- "/components/openthread/" - "/components/openthread/"
@ -88,7 +87,6 @@ components_not_formatted_temporary:
- "/components/perfmon/" - "/components/perfmon/"
- "/components/protobuf-c/" - "/components/protobuf-c/"
- "/components/protocomm/" - "/components/protocomm/"
- "/components/pthread/"
- "/components/riscv/" - "/components/riscv/"
- "/components/sdmmc/" - "/components/sdmmc/"
- "/components/soc/" - "/components/soc/"