feat(pthread): update pthread from esp-idf

Commit ID: aa087667
This commit is contained in:
dongheng
2019-08-27 10:31:21 +08:00
parent 3986202ee9
commit 8cf028873d
15 changed files with 949 additions and 165 deletions

View File

@ -1,11 +1,18 @@
if(CONFIG_ENABLE_PTHREAD) idf_component_register(SRCS "pthread.c"
set(COMPONENT_SRCDIRS "src") "pthread_cond_var.c"
set(COMPONENT_ADD_INCLUDEDIRS "include") "pthread_local_storage.c"
set(COMPONENT_REQUIRES) INCLUDE_DIRS include)
if(GCC_NOT_5_2_0)
set(extra_link_flags "-u pthread_include_pthread_impl")
list(APPEND extra_link_flags "-u pthread_include_pthread_cond_impl")
list(APPEND extra_link_flags "-u pthread_include_pthread_local_storage_impl")
endif() endif()
register_component() if(CONFIG_FREERTOS_ENABLE_STATIC_TASK_CLEAN_UP)
target_link_libraries(${COMPONENT_LIB} "-Wl,--wrap=vPortCleanUpTCB")
if(CONFIG_ENABLE_STATIC_TASK_CLEAN_UP_HOOK) endif()
target_link_libraries(pthread "-Wl,--wrap=vPortCleanUpTCB")
if(extra_link_flags)
target_link_libraries(${COMPONENT_LIB} INTERFACE "${extra_link_flags}")
endif() endif()

View File

@ -1,22 +1,49 @@
menu "PThreads" menu "PThreads"
config ENABLE_PTHREAD config PTHREAD_TASK_PRIO_DEFAULT
bool "Enable pthread" int "Default task priority"
default n range 0 255
help default 5
Enable this option and then pthread is to be used. help
Priority used to create new tasks with default pthread parameters.
config ESP32_PTHREAD_TASK_PRIO_DEFAULT config PTHREAD_TASK_STACK_SIZE_DEFAULT
int "Default task priority" int "Default task stack size"
range 0 255 default 3072
default 5 help
help Stack size used to create new tasks with default pthread parameters.
Priority used to create new tasks with default pthread parameters.
config ESP32_PTHREAD_TASK_STACK_SIZE_DEFAULT config PTHREAD_STACK_MIN
int "Default task stack size" int "Minimum allowed pthread stack size"
default 3072 default 768
help help
Stack size used to create new tasks with default pthread parameters. Minimum allowed pthread stack size set in attributes passed to pthread_create
choice PTHREAD_TASK_CORE_DEFAULT
bool "Default pthread core affinity"
default PTHREAD_DEFAULT_CORE_NO_AFFINITY
depends on !FREERTOS_UNICORE
help
The default core to which pthreads are pinned.
config PTHREAD_DEFAULT_CORE_NO_AFFINITY
bool "No affinity"
config PTHREAD_DEFAULT_CORE_0
bool "Core 0"
config PTHREAD_DEFAULT_CORE_1
bool "Core 1"
endchoice
config PTHREAD_TASK_CORE_DEFAULT
int
default -1 if PTHREAD_DEFAULT_CORE_NO_AFFINITY || FREERTOS_UNICORE
default 0 if PTHREAD_DEFAULT_CORE_0
default 1 if PTHREAD_DEFAULT_CORE_1
config PTHREAD_TASK_NAME_DEFAULT
string "Default name of pthreads"
default "pthread"
help
The default name of pthreads.
endmenu endmenu

View File

@ -2,14 +2,20 @@
# Component Makefile # Component Makefile
# #
ifdef CONFIG_ENABLE_PTHREAD COMPONENT_SRCDIRS := .
COMPONENT_SRCDIRS := src
endif
COMPONENT_ADD_INCLUDEDIRS := include COMPONENT_ADD_INCLUDEDIRS := include
COMPONENT_ADD_LDFLAGS := -lpthread COMPONENT_ADD_LDFLAGS := -lpthread
ifdef CONFIG_ENABLE_STATIC_TASK_CLEAN_UP_HOOK ifdef CONFIG_FREERTOS_ENABLE_STATIC_TASK_CLEAN_UP
COMPONENT_ADD_LDFLAGS += -Wl,--wrap=vPortCleanUpTCB COMPONENT_ADD_LDFLAGS += -Wl,--wrap=vPortCleanUpTCB
endif endif
ifeq ($(GCC_NOT_5_2_0), 1)
# Forces the linker to include pthread implementation from this component,
# instead of the weak implementations provided by libgcc.
COMPONENT_ADD_LDFLAGS += -u pthread_include_pthread_impl
COMPONENT_ADD_LDFLAGS += -u pthread_include_pthread_cond_impl
COMPONENT_ADD_LDFLAGS += -u pthread_include_pthread_local_storage_impl
endif # GCC_NOT_5_2_0

View File

@ -14,19 +14,35 @@
#pragma once #pragma once
#include "esp_err.h"
#include <freertos/FreeRTOSConfig.h>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
#include <pthread.h> #ifndef PTHREAD_STACK_MIN
#define PTHREAD_STACK_MIN CONFIG_PTHREAD_STACK_MIN
#endif
/** pthread configuration structure that influences pthread creation */ /** pthread configuration structure that influences pthread creation */
typedef struct { typedef struct {
size_t stack_size; ///< the stack size of the pthread size_t stack_size; ///< The stack size of the pthread
size_t prio; ///< the thread's priority size_t prio; ///< The thread's priority
bool inherit_cfg; ///< inherit this configuration further bool inherit_cfg; ///< Inherit this configuration further
const char* thread_name; ///< The thread name.
int pin_to_core; ///< The core id to pin the thread to. Has the same value range as xCoreId argument of xTaskCreatePinnedToCore.
} esp_pthread_cfg_t; } esp_pthread_cfg_t;
/**
* @brief Creates a default pthread configuration based
* on the values set via menuconfig.
*
* @return
* A default configuration structure.
*/
esp_pthread_cfg_t esp_pthread_get_default_config(void);
/** /**
* @brief Configure parameters for creating pthread * @brief Configure parameters for creating pthread
* *
@ -39,11 +55,15 @@ typedef struct {
* then the same configuration is also inherited in the thread * then the same configuration is also inherited in the thread
* subtree. * subtree.
* *
* @note Passing non-NULL attributes to pthread_create() will override
* the stack_size parameter set using this API
*
* @param cfg The pthread config parameters * @param cfg The pthread config parameters
* *
* @return * @return
* - ESP_OK if configuration was successfully set * - ESP_OK if configuration was successfully set
* - ESP_ERR_NO_MEM if out of memory * - ESP_ERR_NO_MEM if out of memory
* - ESP_ERR_INVALID_ARG if stack_size is less than PTHREAD_STACK_MIN
*/ */
esp_err_t esp_pthread_set_cfg(const esp_pthread_cfg_t *cfg); esp_err_t esp_pthread_set_cfg(const esp_pthread_cfg_t *cfg);

View File

@ -1,4 +1,4 @@
// Copyright 2017 Espressif Systems (Shanghai) PTE LTD // Copyright 2018 Espressif Systems (Shanghai) PTE LTD
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -14,35 +14,23 @@
// //
// This module implements pthread API on top of FreeRTOS. API is implemented to the level allowing // This module implements pthread API on top of FreeRTOS. API is implemented to the level allowing
// libstdcxx threading framework to operate correctly. So not all original pthread routines are supported. // libstdcxx threading framework to operate correctly. So not all original pthread routines are supported.
// Moreover some implemened functions do not provide full functionality, e.g. pthread_create does not support
// thread's attributes customization (prio, stack size and so on). So if you are not satisfied with default
// behavior use native FreeRTOS API.
// //
#include <time.h>
#include <errno.h> #include <errno.h>
#include <pthread.h> #include <pthread.h>
#include <string.h> #include <string.h>
#include <stdlib.h>
#include "esp_err.h" #include "esp_err.h"
#include "esp_attr.h" #include "esp_attr.h"
#include "sys/queue.h" #include "sys/queue.h"
#include "sys/types.h"
#include "freertos/FreeRTOS.h" #include "freertos/FreeRTOS.h"
#include "freertos/task.h" #include "freertos/task.h"
#include "freertos/semphr.h" #include "freertos/semphr.h"
#include "soc/soc_memory_layout.h"
#include "pthread_internal.h" #include "pthread_internal.h"
#include "esp_pthread.h" #include "esp_pthread.h"
#ifdef CONFIG_ENABLE_PTHREAD
#if portNUM_PROCESSORS == 1
#undef portENTER_CRITICAL
#undef portEXIT_CRITICAL
#define portENTER_CRITICAL(l) vPortEnterCritical()
#define portEXIT_CRITICAL(l) vPortExitCritical()
#endif
#define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL #define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL
#include "esp_log.h" #include "esp_log.h"
const static char *TAG = "pthread"; const static char *TAG = "pthread";
@ -60,6 +48,8 @@ typedef struct esp_pthread_entry {
TaskHandle_t join_task; ///< Handle of the task waiting to join TaskHandle_t join_task; ///< Handle of the task waiting to join
enum esp_pthread_task_state state; ///< pthread task state enum esp_pthread_task_state state; ///< pthread task state
bool detached; ///< True if pthread is detached bool detached; ///< True if pthread is detached
void *retval; ///< Value supplied to calling thread during join
void *task_arg; ///< Task arguments
} esp_pthread_t; } esp_pthread_t;
/** pthread wrapper task arg */ /** pthread wrapper task arg */
@ -77,23 +67,13 @@ typedef struct {
static SemaphoreHandle_t s_threads_mux = NULL; static SemaphoreHandle_t s_threads_mux = NULL;
#if portNUM_PROCESSORS > 1
static portMUX_TYPE s_mutex_init_lock = portMUX_INITIALIZER_UNLOCKED; static portMUX_TYPE s_mutex_init_lock = portMUX_INITIALIZER_UNLOCKED;
#endif
static SLIST_HEAD(esp_thread_list_head, esp_pthread_entry) s_threads_list static SLIST_HEAD(esp_thread_list_head, esp_pthread_entry) s_threads_list
= SLIST_HEAD_INITIALIZER(s_threads_list); = SLIST_HEAD_INITIALIZER(s_threads_list);
static pthread_key_t s_pthread_cfg_key; static pthread_key_t s_pthread_cfg_key;
static int pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo); static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo);
static inline void uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
portENTER_CRITICAL(&s_mutex_init_lock);
*addr = compare;
*set = 0;
portEXIT_CRITICAL(&s_mutex_init_lock);
}
static void esp_pthread_cfg_key_destructor(void *value) static void esp_pthread_cfg_key_destructor(void *value)
{ {
@ -107,7 +87,7 @@ esp_err_t esp_pthread_init(void)
} }
s_threads_mux = xSemaphoreCreateMutex(); s_threads_mux = xSemaphoreCreateMutex();
if (s_threads_mux == NULL) { if (s_threads_mux == NULL) {
pthread_key_delete(s_pthread_cfg_key); pthread_key_delete(s_pthread_cfg_key);
return ESP_ERR_NO_MEM; return ESP_ERR_NO_MEM;
} }
return ESP_OK; return ESP_OK;
@ -157,17 +137,20 @@ static void pthread_delete(esp_pthread_t *pthread)
free(pthread); free(pthread);
} }
/* Call this function to configure pthread stacks in Pthreads */ /* Call this function to configure pthread stacks in Pthreads */
esp_err_t esp_pthread_set_cfg(const esp_pthread_cfg_t *cfg) esp_err_t esp_pthread_set_cfg(const esp_pthread_cfg_t *cfg)
{ {
if (cfg->stack_size < PTHREAD_STACK_MIN) {
return ESP_ERR_INVALID_ARG;
}
/* If a value is already set, update that value */ /* If a value is already set, update that value */
esp_pthread_cfg_t *p = pthread_getspecific(s_pthread_cfg_key); esp_pthread_cfg_t *p = pthread_getspecific(s_pthread_cfg_key);
if (!p) { if (!p) {
p = malloc(sizeof(esp_pthread_cfg_t)); p = malloc(sizeof(esp_pthread_cfg_t));
if (!p) { if (!p) {
return ESP_ERR_NO_MEM; return ESP_ERR_NO_MEM;
} }
} }
*p = *cfg; *p = *cfg;
pthread_setspecific(s_pthread_cfg_key, p); pthread_setspecific(s_pthread_cfg_key, p);
@ -178,57 +161,55 @@ esp_err_t esp_pthread_get_cfg(esp_pthread_cfg_t *p)
{ {
esp_pthread_cfg_t *cfg = pthread_getspecific(s_pthread_cfg_key); esp_pthread_cfg_t *cfg = pthread_getspecific(s_pthread_cfg_key);
if (cfg) { if (cfg) {
*p = *cfg; *p = *cfg;
return ESP_OK; return ESP_OK;
} }
memset(p, 0, sizeof(*p)); memset(p, 0, sizeof(*p));
return ESP_ERR_NOT_FOUND; return ESP_ERR_NOT_FOUND;
} }
static int get_default_pthread_core(void)
{
return CONFIG_PTHREAD_TASK_CORE_DEFAULT == -1 ? tskNO_AFFINITY : CONFIG_PTHREAD_TASK_CORE_DEFAULT;
}
esp_pthread_cfg_t esp_pthread_get_default_config(void)
{
esp_pthread_cfg_t cfg = {
.stack_size = CONFIG_PTHREAD_TASK_STACK_SIZE_DEFAULT,
.prio = CONFIG_PTHREAD_TASK_PRIO_DEFAULT,
.inherit_cfg = false,
.thread_name = NULL,
.pin_to_core = get_default_pthread_core()
};
return cfg;
}
static void pthread_task_func(void *arg) static void pthread_task_func(void *arg)
{ {
void *rval = NULL;
esp_pthread_task_arg_t *task_arg = (esp_pthread_task_arg_t *)arg; esp_pthread_task_arg_t *task_arg = (esp_pthread_task_arg_t *)arg;
ESP_LOGV(TAG, "%s ENTER %p", __FUNCTION__, task_arg->func); ESP_LOGV(TAG, "%s ENTER %p", __FUNCTION__, task_arg->func);
// wait for start // wait for start
xTaskNotifyWait(0, 0, NULL, portMAX_DELAY); xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
if (task_arg->cfg.inherit_cfg) { if (task_arg->cfg.inherit_cfg) {
/* If inherit option is set, then do a set_cfg() ourselves for future forks */ /* If inherit option is set, then do a set_cfg() ourselves for future forks,
esp_pthread_set_cfg(&task_arg->cfg); but first set thread_name to NULL to enable inheritance of the name too.
(This also to prevents dangling pointers to name of tasks that might
possibly have been deleted when we use the configuration).*/
esp_pthread_cfg_t *cfg = &task_arg->cfg;
cfg->thread_name = NULL;
esp_pthread_set_cfg(cfg);
} }
ESP_LOGV(TAG, "%s START %p", __FUNCTION__, task_arg->func); ESP_LOGV(TAG, "%s START %p", __FUNCTION__, task_arg->func);
task_arg->func(task_arg->arg); rval = task_arg->func(task_arg->arg);
ESP_LOGV(TAG, "%s END %p", __FUNCTION__, task_arg->func); ESP_LOGV(TAG, "%s END %p", __FUNCTION__, task_arg->func);
free(task_arg);
/* preemptively clean up thread local storage, rather than pthread_exit(rval);
waiting for the idle task to clean up the thread */
pthread_internal_local_storage_destructor_callback();
if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
assert(false && "Failed to lock threads list!");
}
esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle());
if (!pthread) {
assert(false && "Failed to find pthread for current task!");
}
if (pthread->detached) {
// auto-free for detached threads
pthread_delete(pthread);
} else {
// Remove from list, it indicates that task has exited
if (pthread->join_task) {
// notify join
xTaskNotify(pthread->join_task, 0, eNoAction);
} else {
pthread->state = PTHREAD_TASK_STATE_EXIT;
}
}
xSemaphoreGive(s_threads_mux);
ESP_LOGD(TAG, "Task stk_wm = %lu", uxTaskGetStackHighWaterMark(NULL));
vTaskDelete(NULL);
ESP_LOGV(TAG, "%s EXIT", __FUNCTION__); ESP_LOGV(TAG, "%s EXIT", __FUNCTION__);
} }
@ -239,40 +220,84 @@ int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
TaskHandle_t xHandle = NULL; TaskHandle_t xHandle = NULL;
ESP_LOGV(TAG, "%s", __FUNCTION__); ESP_LOGV(TAG, "%s", __FUNCTION__);
if (attr) { esp_pthread_task_arg_t *task_arg = calloc(1, sizeof(esp_pthread_task_arg_t));
ESP_LOGE(TAG, "%s: attrs not supported!", __FUNCTION__);
return ENOSYS;
}
esp_pthread_task_arg_t *task_arg = malloc(sizeof(esp_pthread_task_arg_t));
if (task_arg == NULL) { if (task_arg == NULL) {
ESP_LOGE(TAG, "Failed to allocate task args!"); ESP_LOGE(TAG, "Failed to allocate task args!");
return ENOMEM; return ENOMEM;
} }
memset(task_arg, 0, sizeof(esp_pthread_task_arg_t));
esp_pthread_t *pthread = malloc(sizeof(esp_pthread_t)); esp_pthread_t *pthread = calloc(1, sizeof(esp_pthread_t));
if (pthread == NULL) { if (pthread == NULL) {
ESP_LOGE(TAG, "Failed to allocate pthread data!"); ESP_LOGE(TAG, "Failed to allocate pthread data!");
free(task_arg); free(task_arg);
return ENOMEM; return ENOMEM;
} }
uint32_t stack_size = CONFIG_ESP32_PTHREAD_TASK_STACK_SIZE_DEFAULT;
BaseType_t prio = CONFIG_ESP32_PTHREAD_TASK_PRIO_DEFAULT; uint32_t stack_size = CONFIG_PTHREAD_TASK_STACK_SIZE_DEFAULT;
BaseType_t prio = CONFIG_PTHREAD_TASK_PRIO_DEFAULT;
BaseType_t core_id = get_default_pthread_core();
const char *task_name = CONFIG_PTHREAD_TASK_NAME_DEFAULT;
esp_pthread_cfg_t *pthread_cfg = pthread_getspecific(s_pthread_cfg_key); esp_pthread_cfg_t *pthread_cfg = pthread_getspecific(s_pthread_cfg_key);
if (pthread_cfg) { if (pthread_cfg) {
if (pthread_cfg->stack_size) { if (pthread_cfg->stack_size) {
stack_size = pthread_cfg->stack_size; stack_size = pthread_cfg->stack_size;
} }
if (pthread_cfg->prio && pthread_cfg->prio < configMAX_PRIORITIES) { if (pthread_cfg->prio && pthread_cfg->prio < configMAX_PRIORITIES) {
prio = pthread_cfg->prio; prio = pthread_cfg->prio;
} }
task_arg->cfg = *pthread_cfg;
if (pthread_cfg->inherit_cfg) {
if (pthread_cfg->thread_name == NULL) {
// Inherit task name from current task.
task_name = pcTaskGetTaskName(NULL);
} else {
// Inheriting, but new task name.
task_name = pthread_cfg->thread_name;
}
} else if (pthread_cfg->thread_name == NULL) {
task_name = CONFIG_PTHREAD_TASK_NAME_DEFAULT;
} else {
task_name = pthread_cfg->thread_name;
}
if (pthread_cfg->pin_to_core >= 0 && pthread_cfg->pin_to_core < portNUM_PROCESSORS) {
core_id = pthread_cfg->pin_to_core;
}
task_arg->cfg = *pthread_cfg;
} }
memset(pthread, 0, sizeof(esp_pthread_t));
if (attr) {
/* Overwrite attributes */
stack_size = attr->stacksize;
switch (attr->detachstate) {
case PTHREAD_CREATE_DETACHED:
pthread->detached = true;
break;
case PTHREAD_CREATE_JOINABLE:
default:
pthread->detached = false;
}
}
task_arg->func = start_routine; task_arg->func = start_routine;
task_arg->arg = arg; task_arg->arg = arg;
BaseType_t res = xTaskCreate(&pthread_task_func, "pthread", stack_size, pthread->task_arg = task_arg;
task_arg, prio, &xHandle); BaseType_t res = xTaskCreatePinnedToCore(&pthread_task_func,
if(res != pdPASS) { task_name,
// stack_size is in bytes. This transformation ensures that the units are
// transformed to the units used in FreeRTOS.
// Note: float division of ceil(m / n) ==
// integer division of (m + n - 1) / n
(stack_size + sizeof(StackType_t) - 1) / sizeof(StackType_t),
task_arg,
prio,
&xHandle,
core_id);
if (res != pdPASS) {
ESP_LOGE(TAG, "Failed to create task!"); ESP_LOGE(TAG, "Failed to create task!");
free(pthread); free(pthread);
free(task_arg); free(task_arg);
@ -305,6 +330,7 @@ int pthread_join(pthread_t thread, void **retval)
esp_pthread_t *pthread = (esp_pthread_t *)thread; esp_pthread_t *pthread = (esp_pthread_t *)thread;
int ret = 0; int ret = 0;
bool wait = false; bool wait = false;
void *child_task_retval = 0;
ESP_LOGV(TAG, "%s %p", __FUNCTION__, pthread); ESP_LOGV(TAG, "%s %p", __FUNCTION__, pthread);
@ -316,6 +342,9 @@ int pthread_join(pthread_t thread, void **retval)
if (!handle) { if (!handle) {
// not found // not found
ret = ESRCH; ret = ESRCH;
} else if (pthread->detached) {
// Thread is detached
ret = EDEADLK;
} else if (pthread->join_task) { } else if (pthread->join_task) {
// already have waiting task to join // already have waiting task to join
ret = EINVAL; ret = EINVAL;
@ -332,23 +361,28 @@ int pthread_join(pthread_t thread, void **retval)
pthread->join_task = xTaskGetCurrentTaskHandle(); pthread->join_task = xTaskGetCurrentTaskHandle();
wait = true; wait = true;
} else { } else {
child_task_retval = pthread->retval;
pthread_delete(pthread); pthread_delete(pthread);
} }
} }
} }
xSemaphoreGive(s_threads_mux); xSemaphoreGive(s_threads_mux);
if (ret == 0 && wait) { if (ret == 0) {
xTaskNotifyWait(0, 0, NULL, portMAX_DELAY); if (wait) {
if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) { xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
assert(false && "Failed to lock threads list!"); if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
assert(false && "Failed to lock threads list!");
}
child_task_retval = pthread->retval;
pthread_delete(pthread);
xSemaphoreGive(s_threads_mux);
} }
pthread_delete(pthread); vTaskDelete(handle);
xSemaphoreGive(s_threads_mux);
} }
if (retval) { if (retval) {
*retval = 0; // no exit code in FreeRTOS *retval = child_task_retval;
} }
ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret); ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret);
@ -366,14 +400,71 @@ int pthread_detach(pthread_t thread)
TaskHandle_t handle = pthread_find_handle(thread); TaskHandle_t handle = pthread_find_handle(thread);
if (!handle) { if (!handle) {
ret = ESRCH; ret = ESRCH;
} else { } else if (pthread->detached) {
// already detached
ret = EINVAL;
} else if (pthread->join_task) {
// already have waiting task to join
ret = EINVAL;
} else if (pthread->state == PTHREAD_TASK_STATE_RUN) {
// pthread still running
pthread->detached = true; pthread->detached = true;
} else {
// pthread already stopped
pthread_delete(pthread);
vTaskDelete(handle);
} }
xSemaphoreGive(s_threads_mux); xSemaphoreGive(s_threads_mux);
ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret); ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret);
return ret; return ret;
} }
void pthread_exit(void *value_ptr)
{
bool detached = false;
/* preemptively clean up thread local storage, rather than
waiting for the idle task to clean up the thread */
pthread_internal_local_storage_destructor_callback();
if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
assert(false && "Failed to lock threads list!");
}
esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle());
if (!pthread) {
assert(false && "Failed to find pthread for current task!");
}
if (pthread->task_arg) {
free(pthread->task_arg);
}
if (pthread->detached) {
// auto-free for detached threads
pthread_delete(pthread);
detached = true;
} else {
// Set return value
pthread->retval = value_ptr;
// Remove from list, it indicates that task has exited
if (pthread->join_task) {
// notify join
xTaskNotify(pthread->join_task, 0, eNoAction);
} else {
pthread->state = PTHREAD_TASK_STATE_EXIT;
}
}
xSemaphoreGive(s_threads_mux);
ESP_LOGD(TAG, "Task stk_wm = %d", uxTaskGetStackHighWaterMark(NULL));
if (detached) {
vTaskDelete(NULL);
} else {
vTaskSuspend(NULL);
}
// Should never be reached
abort();
}
int pthread_cancel(pthread_t thread) int pthread_cancel(pthread_t thread)
{ {
ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__); ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
@ -413,13 +504,13 @@ int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
} }
uint32_t res = 1; uint32_t res = 1;
#if defined(CONFIG_SPIRAM_SUPPORT) #if defined(CONFIG_ESP32_SPIRAM_SUPPORT)
if (esp_ptr_external_ram(once_control)) { if (esp_ptr_external_ram(once_control)) {
uxPortCompareSetExtram((uint32_t *) &once_control->init_executed, 0, &res); uxPortCompareSetExtram((uint32_t *) &once_control->init_executed, 0, &res);
} else { } else {
#endif #endif
uxPortCompareSet((uint32_t *) &once_control->init_executed, 0, &res); uxPortCompareSet((uint32_t *) &once_control->init_executed, 0, &res);
#if defined(CONFIG_SPIRAM_SUPPORT) #if defined(CONFIG_ESP32_SPIRAM_SUPPORT)
} }
#endif #endif
// Check if compare and set was successful // Check if compare and set was successful
@ -434,7 +525,9 @@ int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
/***************** MUTEX ******************/ /***************** MUTEX ******************/
static int mutexattr_check(const pthread_mutexattr_t *attr) static int mutexattr_check(const pthread_mutexattr_t *attr)
{ {
if (attr->type < PTHREAD_MUTEX_NORMAL || attr->type > PTHREAD_MUTEX_RECURSIVE) { if (attr->type != PTHREAD_MUTEX_NORMAL &&
attr->type != PTHREAD_MUTEX_RECURSIVE &&
attr->type != PTHREAD_MUTEX_ERRORCHECK) {
return EINVAL; return EINVAL;
} }
return 0; return 0;
@ -490,6 +583,9 @@ int pthread_mutex_destroy(pthread_mutex_t *mutex)
return EINVAL; return EINVAL;
} }
mux = (esp_pthread_mutex_t *)*mutex; mux = (esp_pthread_mutex_t *)*mutex;
if (!mux) {
return EINVAL;
}
// check if mux is busy // check if mux is busy
int res = pthread_mutex_lock_internal(mux, 0); int res = pthread_mutex_lock_internal(mux, 0);
@ -503,8 +599,17 @@ int pthread_mutex_destroy(pthread_mutex_t *mutex)
return 0; return 0;
} }
static int pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo) static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo)
{ {
if (!mux) {
return EINVAL;
}
if ((mux->type == PTHREAD_MUTEX_ERRORCHECK) &&
(xSemaphoreGetMutexHolder(mux->sem) == xTaskGetCurrentTaskHandle())) {
return EDEADLK;
}
if (mux->type == PTHREAD_MUTEX_RECURSIVE) { if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
if (xSemaphoreTakeRecursive(mux->sem, tmo) != pdTRUE) { if (xSemaphoreTakeRecursive(mux->sem, tmo) != pdTRUE) {
return EBUSY; return EBUSY;
@ -518,7 +623,8 @@ static int pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo)
return 0; return 0;
} }
static int pthread_mutex_init_if_static(pthread_mutex_t *mutex) { static int pthread_mutex_init_if_static(pthread_mutex_t *mutex)
{
int res = 0; int res = 0;
if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) { if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) {
portENTER_CRITICAL(&s_mutex_init_lock); portENTER_CRITICAL(&s_mutex_init_lock);
@ -530,7 +636,7 @@ static int pthread_mutex_init_if_static(pthread_mutex_t *mutex) {
return res; return res;
} }
int pthread_mutex_lock(pthread_mutex_t *mutex) int IRAM_ATTR pthread_mutex_lock(pthread_mutex_t *mutex)
{ {
if (!mutex) { if (!mutex) {
return EINVAL; return EINVAL;
@ -542,7 +648,29 @@ int pthread_mutex_lock(pthread_mutex_t *mutex)
return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, portMAX_DELAY); return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, portMAX_DELAY);
} }
int pthread_mutex_trylock(pthread_mutex_t *mutex) int IRAM_ATTR pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *timeout)
{
if (!mutex) {
return EINVAL;
}
int res = pthread_mutex_init_if_static(mutex);
if (res != 0) {
return res;
}
struct timespec currtime;
clock_gettime(CLOCK_REALTIME, &currtime);
TickType_t tmo = ((timeout->tv_sec - currtime.tv_sec)*1000 +
(timeout->tv_nsec - currtime.tv_nsec)/1000000)/portTICK_PERIOD_MS;
res = pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, tmo);
if (res == EBUSY) {
return ETIMEDOUT;
}
return res;
}
int IRAM_ATTR pthread_mutex_trylock(pthread_mutex_t *mutex)
{ {
if (!mutex) { if (!mutex) {
return EINVAL; return EINVAL;
@ -554,7 +682,7 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex)
return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, 0); return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, 0);
} }
int pthread_mutex_unlock(pthread_mutex_t *mutex) int IRAM_ATTR pthread_mutex_unlock(pthread_mutex_t *mutex)
{ {
esp_pthread_mutex_t *mux; esp_pthread_mutex_t *mux;
@ -562,11 +690,24 @@ int pthread_mutex_unlock(pthread_mutex_t *mutex)
return EINVAL; return EINVAL;
} }
mux = (esp_pthread_mutex_t *)*mutex; mux = (esp_pthread_mutex_t *)*mutex;
if (!mux) {
return EINVAL;
}
if (((mux->type == PTHREAD_MUTEX_RECURSIVE) ||
(mux->type == PTHREAD_MUTEX_ERRORCHECK)) &&
(xSemaphoreGetMutexHolder(mux->sem) != xTaskGetCurrentTaskHandle())) {
return EPERM;
}
int ret;
if (mux->type == PTHREAD_MUTEX_RECURSIVE) { if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
xSemaphoreGiveRecursive(mux->sem); ret = xSemaphoreGiveRecursive(mux->sem);
} else { } else {
xSemaphoreGive(mux->sem); ret = xSemaphoreGive(mux->sem);
}
if (ret != pdTRUE) {
assert(false && "Failed to unlock mutex!");
} }
return 0; return 0;
} }
@ -592,8 +733,11 @@ int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type) int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
{ {
ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__); if (!attr) {
return ENOSYS; return EINVAL;
}
*type = attr->type;
return 0;
} }
int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type) int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
@ -609,4 +753,75 @@ int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
return res; return res;
} }
#endif /***************** ATTRIBUTES ******************/
int pthread_attr_init(pthread_attr_t *attr)
{
if (attr) {
/* Nothing to allocate. Set everything to default */
attr->stacksize = CONFIG_PTHREAD_TASK_STACK_SIZE_DEFAULT;
attr->detachstate = PTHREAD_CREATE_JOINABLE;
return 0;
}
return EINVAL;
}
int pthread_attr_destroy(pthread_attr_t *attr)
{
if (attr) {
/* Nothing to deallocate. Reset everything to default */
attr->stacksize = CONFIG_PTHREAD_TASK_STACK_SIZE_DEFAULT;
attr->detachstate = PTHREAD_CREATE_JOINABLE;
return 0;
}
return EINVAL;
}
int pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
{
if (attr) {
*stacksize = attr->stacksize;
return 0;
}
return EINVAL;
}
int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
{
if (attr && !(stacksize < PTHREAD_STACK_MIN)) {
attr->stacksize = stacksize;
return 0;
}
return EINVAL;
}
int pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
{
if (attr) {
*detachstate = attr->detachstate;
return 0;
}
return EINVAL;
}
int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
{
if (attr) {
switch (detachstate) {
case PTHREAD_CREATE_DETACHED:
attr->detachstate = PTHREAD_CREATE_DETACHED;
break;
case PTHREAD_CREATE_JOINABLE:
attr->detachstate = PTHREAD_CREATE_JOINABLE;
break;
default:
return EINVAL;
}
return 0;
}
return EINVAL;
}
/* Hook function to force linking this file */
void pthread_include_pthread_impl(void)
{
}

View File

@ -20,19 +20,16 @@
#include <errno.h> #include <errno.h>
#include <pthread.h> #include <pthread.h>
#include <string.h> #include <string.h>
#include <stdlib.h>
#include "esp_err.h" #include "esp_err.h"
#include "esp_attr.h" #include "esp_attr.h"
#include "freertos/FreeRTOS.h" #include "freertos/FreeRTOS.h"
#include "freertos/task.h" #include "freertos/task.h"
#include "freertos/semphr.h" #include "freertos/semphr.h"
#include "freertos/private/list.h" #include "freertos/list.h"
#include <sys/queue.h> #include <sys/queue.h>
#include <sys/time.h> #include <sys/time.h>
#ifdef CONFIG_ENABLE_PTHREAD
#define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL #define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL
#include "esp_log.h" #include "esp_log.h"
const static char *TAG = "esp_pthread"; const static char *TAG = "esp_pthread";
@ -202,4 +199,7 @@ int pthread_cond_destroy(pthread_cond_t *cv)
return ret; return ret;
} }
#endif /* Hook function to force linking this file */
void pthread_include_pthread_cond_var_impl(void)
{
}

View File

@ -13,4 +13,4 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
void pthread_internal_local_storage_destructor_callback(); void pthread_internal_local_storage_destructor_callback(void);

View File

@ -14,7 +14,6 @@
#include <errno.h> #include <errno.h>
#include <pthread.h> #include <pthread.h>
#include <string.h> #include <string.h>
#include <stdlib.h>
#include "esp_err.h" #include "esp_err.h"
#include "esp_log.h" #include "esp_log.h"
#include "freertos/FreeRTOS.h" #include "freertos/FreeRTOS.h"
@ -24,17 +23,7 @@
#include "pthread_internal.h" #include "pthread_internal.h"
#ifdef CONFIG_ENABLE_PTHREAD #define PTHREAD_TLS_INDEX 0
#define PTHREAD_TLS_INDEX 1
#if portNUM_PROCESSORS == 1
#undef portENTER_CRITICAL
#undef portEXIT_CRITICAL
#define portENTER_CRITICAL(l) vPortEnterCritical()
#define portEXIT_CRITICAL(l) vPortExitCritical()
#endif
typedef void (*pthread_destructor_t)(void*); typedef void (*pthread_destructor_t)(void*);
@ -53,9 +42,7 @@ typedef struct key_entry_t_ {
// List of all keys created with pthread_key_create() // List of all keys created with pthread_key_create()
SLIST_HEAD(key_list_t, key_entry_t_) s_keys = SLIST_HEAD_INITIALIZER(s_keys); SLIST_HEAD(key_list_t, key_entry_t_) s_keys = SLIST_HEAD_INITIALIZER(s_keys);
#if portNUM_PROCESSORS > 1
static portMUX_TYPE s_keys_lock = portMUX_INITIALIZER_UNLOCKED; static portMUX_TYPE s_keys_lock = portMUX_INITIALIZER_UNLOCKED;
#endif
// List of all value entries associated with a thread via pthread_setspecific() // List of all value entries associated with a thread via pthread_setspecific()
typedef struct value_entry_t_ { typedef struct value_entry_t_ {
@ -155,7 +142,7 @@ static void pthread_local_storage_thread_deleted_callback(int index, void *v_tls
free(tls); free(tls);
} }
#if defined(CONFIG_ENABLE_STATIC_TASK_CLEAN_UP_HOOK) #if defined(CONFIG_FREERTOS_ENABLE_STATIC_TASK_CLEAN_UP)
/* Called from FreeRTOS task delete hook */ /* Called from FreeRTOS task delete hook */
void pthread_local_storage_cleanup(TaskHandle_t task) void pthread_local_storage_cleanup(TaskHandle_t task)
{ {
@ -179,7 +166,7 @@ void __wrap_vPortCleanUpTCB(void *tcb)
#endif #endif
/* this function called from pthread_task_func for "early" cleanup of TLS in a pthread */ /* this function called from pthread_task_func for "early" cleanup of TLS in a pthread */
void pthread_internal_local_storage_destructor_callback() void pthread_internal_local_storage_destructor_callback(void)
{ {
void *tls = pvTaskGetThreadLocalStoragePointer(NULL, PTHREAD_TLS_INDEX); void *tls = pvTaskGetThreadLocalStoragePointer(NULL, PTHREAD_TLS_INDEX);
if (tls != NULL) { if (tls != NULL) {
@ -187,7 +174,7 @@ void pthread_internal_local_storage_destructor_callback()
/* remove the thread-local-storage pointer to avoid the idle task cleanup /* remove the thread-local-storage pointer to avoid the idle task cleanup
calling it again... calling it again...
*/ */
#if defined(CONFIG_ENABLE_STATIC_TASK_CLEAN_UP_HOOK) #if defined(CONFIG_FREERTOS_ENABLE_STATIC_TASK_CLEAN_UP)
vTaskSetThreadLocalStoragePointer(NULL, PTHREAD_TLS_INDEX, NULL); vTaskSetThreadLocalStoragePointer(NULL, PTHREAD_TLS_INDEX, NULL);
#else #else
vTaskSetThreadLocalStoragePointerAndDelCallback(NULL, vTaskSetThreadLocalStoragePointerAndDelCallback(NULL,
@ -236,7 +223,7 @@ int pthread_setspecific(pthread_key_t key, const void *value)
if (tls == NULL) { if (tls == NULL) {
return ENOMEM; return ENOMEM;
} }
#if defined(CONFIG_ENABLE_STATIC_TASK_CLEAN_UP_HOOK) #if defined(CONFIG_FREERTOS_ENABLE_STATIC_TASK_CLEAN_UP)
vTaskSetThreadLocalStoragePointer(NULL, PTHREAD_TLS_INDEX, tls); vTaskSetThreadLocalStoragePointer(NULL, PTHREAD_TLS_INDEX, tls);
#else #else
vTaskSetThreadLocalStoragePointerAndDelCallback(NULL, vTaskSetThreadLocalStoragePointerAndDelCallback(NULL,
@ -269,4 +256,7 @@ int pthread_setspecific(pthread_key_t key, const void *value)
return 0; return 0;
} }
#endif /* Hook function to force linking this file */
void pthread_include_pthread_local_storage_impl(void)
{
}

View File

@ -0,0 +1,11 @@
# sdkconfig replacement configurations for deprecated options formatted as
# CONFIG_DEPRECATED_OPTION CONFIG_NEW_OPTION
CONFIG_ESP32_PTHREAD_TASK_PRIO_DEFAULT CONFIG_PTHREAD_TASK_PRIO_DEFAULT
CONFIG_ESP32_PTHREAD_TASK_STACK_SIZE_DEFAULT CONFIG_PTHREAD_TASK_STACK_SIZE_DEFAULT
CONFIG_ESP32_PTHREAD_STACK_MIN CONFIG_PTHREAD_STACK_MIN
CONFIG_ESP32_PTHREAD_TASK_CORE_DEFAULT CONFIG_PTHREAD_TASK_CORE_DEFAULT
CONFIG_ESP32_DEFAULT_PTHREAD_CORE_NO_AFFINITY CONFIG_PTHREAD_DEFAULT_CORE_NO_AFFINITY
CONFIG_ESP32_DEFAULT_PTHREAD_CORE_0 CONFIG_PTHREAD_DEFAULT_CORE_0
CONFIG_ESP32_DEFAULT_PTHREAD_CORE_1 CONFIG_PTHREAD_DEFAULT_CORE_1
CONFIG_ESP32_PTHREAD_TASK_NAME_DEFAULT CONFIG_PTHREAD_TASK_NAME_DEFAULT

View File

@ -0,0 +1,3 @@
idf_component_register(SRC_DIRS "."
INCLUDE_DIRS "."
REQUIRES unity test_utils pthread)

View File

@ -0,0 +1,48 @@
#include <iostream>
#include <thread>
#include <condition_variable>
#include <chrono>
#include <mutex>
#include <atomic>
#include "unity.h"
#if __GTHREADS && __GTHREADS_CXX0X
std::condition_variable cv;
std::mutex cv_m;
std::atomic<int> i{0};
static void waits(int idx, int timeout_ms)
{
std::unique_lock<std::mutex> lk(cv_m);
auto now = std::chrono::system_clock::now();
if(cv.wait_until(lk, now + std::chrono::milliseconds(timeout_ms), [](){return i == 1;}))
std::cout << "Thread " << idx << " finished waiting. i == " << i << '\n';
else
std::cout << "Thread " << idx << " timed out. i == " << i << '\n';
}
static void signals(int signal_ms)
{
std::this_thread::sleep_for(std::chrono::milliseconds(signal_ms));
std::cout << "Notifying...\n";
cv.notify_all();
std::this_thread::sleep_for(std::chrono::milliseconds(signal_ms));
i = 1;
std::cout << "Notifying again...\n";
cv.notify_all();
}
TEST_CASE("C++ condition_variable", "[std::condition_variable]")
{
i = 0;
std::thread t1(waits, 1, 100), t2(waits, 2, 800), t3(signals, 200);
t1.join();
t2.join();
t3.join();
std::cout << "All threads joined\n";
}
#endif

View File

@ -0,0 +1,31 @@
#include <iostream>
#include <future>
#include <thread>
#include "unity.h"
#if __GTHREADS && __GTHREADS_CXX0X
TEST_CASE("C++ future", "[std::future]")
{
// future from a packaged_task
std::packaged_task<int()> task([]{ return 7; }); // wrap the function
std::future<int> f1 = task.get_future(); // get a future
std::thread t(std::move(task)); // launch on a thread
// future from an async()
std::future<int> f2 = std::async(std::launch::async, []{ return 8; });
// future from a promise
std::promise<int> p;
std::future<int> f3 = p.get_future();
std::thread( [&p]{ p.set_value_at_thread_exit(9); }).detach();
std::cout << "Waiting..." << std::flush;
f1.wait();
f2.wait();
f3.wait();
std::cout << "Done!\nResults are: "
<< f1.get() << ' ' << f2.get() << ' ' << f3.get() << '\n';
t.join();
}
#endif

View File

@ -0,0 +1,294 @@
#include <errno.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "esp_pthread.h"
#include <pthread.h>
#include "unity.h"
static void *compute_square(void *arg)
{
int *num = (int *) arg;
*num = (*num) * (*num);
pthread_exit((void *) num);
return NULL;
}
TEST_CASE("pthread create join", "[pthread]")
{
int res = 0;
volatile int num = 7;
volatile bool attr_init = false;
void *thread_rval = NULL;
pthread_t new_thread = (pthread_t)NULL;
pthread_attr_t attr;
if (TEST_PROTECT()) {
res = pthread_attr_init(&attr);
TEST_ASSERT_EQUAL_INT(0, res);
attr_init = true;
res = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_create(&new_thread, &attr, compute_square, (void *) &num);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_join(new_thread, &thread_rval);
TEST_ASSERT_EQUAL_INT(EDEADLK, res);
vTaskDelay(100 / portTICK_PERIOD_MS);
TEST_ASSERT_EQUAL_INT(49, num);
res = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_create(&new_thread, &attr, compute_square, (void *) &num);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_join(new_thread, &thread_rval);
TEST_ASSERT_EQUAL_INT(0, res);
TEST_ASSERT_EQUAL_INT(2401, num);
TEST_ASSERT_EQUAL_PTR(&num, thread_rval);
}
if (attr_init) {
pthread_attr_destroy(&attr);
}
}
static void *waiting_thread(void *arg)
{
TaskHandle_t *task_handle = (TaskHandle_t *)arg;
TaskHandle_t parent_task = *task_handle;
*task_handle = xTaskGetCurrentTaskHandle();
xTaskNotify(parent_task, 0, eNoAction);
xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
return NULL;
}
TEST_CASE("pthread detach", "[pthread]")
{
int res = 0;
pthread_t new_thread = (pthread_t)NULL;
TaskHandle_t task_handle = NULL;
const int task_count = uxTaskGetNumberOfTasks();
bool detach_works = false;
if (TEST_PROTECT()) {
task_handle = xTaskGetCurrentTaskHandle();
res = pthread_create(&new_thread, NULL, waiting_thread, (void *)&task_handle);
TEST_ASSERT_EQUAL_INT(0, res);
res = xTaskNotifyWait(0, 0, NULL, 100 / portTICK_PERIOD_MS);
TEST_ASSERT_EQUAL_INT(pdTRUE, res);
xTaskNotify(task_handle, 0, eNoAction);
vTaskDelay(100 / portTICK_PERIOD_MS);
res = pthread_detach(new_thread);
TEST_ASSERT_EQUAL_INT(0, res);
res = uxTaskGetNumberOfTasks();
TEST_ASSERT_EQUAL_INT(task_count, res);
detach_works = true;
}
if (!detach_works) {
vTaskDelete(task_handle);
} else {
detach_works = false;
}
if (TEST_PROTECT()) {
task_handle = xTaskGetCurrentTaskHandle();
res = pthread_create(&new_thread, NULL, waiting_thread, (void *)&task_handle);
TEST_ASSERT_EQUAL_INT(0, res);
res = xTaskNotifyWait(0, 0, NULL, 100 / portTICK_PERIOD_MS);
TEST_ASSERT_EQUAL_INT(pdTRUE, res);
res = pthread_detach(new_thread);
TEST_ASSERT_EQUAL_INT(0, res);
xTaskNotify(task_handle, 0, eNoAction);
vTaskDelay(100 / portTICK_PERIOD_MS);
res = uxTaskGetNumberOfTasks();
TEST_ASSERT_EQUAL_INT(task_count, res);
detach_works = true;
}
if (!detach_works) {
vTaskDelete(task_handle);
}
}
TEST_CASE("pthread attr init destroy", "[pthread]")
{
int res = 0;
size_t stack_size_1 = 0, stack_size_2 = 0;
volatile bool attr_init = pdFALSE;
pthread_attr_t attr;
if (TEST_PROTECT()) {
res = pthread_attr_init(&attr);
TEST_ASSERT_EQUAL_INT(0, res);
attr_init = true;
res = pthread_attr_getstacksize(&attr, &stack_size_1);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_attr_setstacksize(&attr, stack_size_1);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_attr_getstacksize(&attr, &stack_size_2);
TEST_ASSERT_EQUAL_INT(0, res);
TEST_ASSERT_EQUAL_INT(stack_size_2, stack_size_1);
stack_size_1 = PTHREAD_STACK_MIN - 1;
res = pthread_attr_setstacksize(&attr, stack_size_1);
TEST_ASSERT_EQUAL_INT(EINVAL, res);
}
if (attr_init) {
TEST_ASSERT_EQUAL_INT(0, pthread_attr_destroy(&attr));
}
}
static void *unlock_mutex(void *arg)
{
pthread_mutex_t *mutex = (pthread_mutex_t *) arg;
intptr_t res = (intptr_t) pthread_mutex_unlock(mutex);
pthread_exit((void *) res);
return NULL;
}
static void test_mutex_lock_unlock(int mutex_type)
{
int res = 0;
int set_type = -1;
volatile bool attr_created = false;
volatile bool mutex_created = false;
volatile intptr_t thread_rval = 0;
pthread_mutex_t mutex;
pthread_mutexattr_t attr;
pthread_t new_thread;
if (TEST_PROTECT()) {
res = pthread_mutexattr_init(&attr);
TEST_ASSERT_EQUAL_INT(0, res);
attr_created = true;
res = pthread_mutexattr_settype(&attr, mutex_type);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_mutexattr_gettype(&attr, &set_type);
TEST_ASSERT_EQUAL_INT(0, res);
TEST_ASSERT_EQUAL_INT(mutex_type, set_type);
res = pthread_mutex_init(&mutex, &attr);
TEST_ASSERT_EQUAL_INT(0, res);
mutex_created = true;
res = pthread_mutex_lock(&mutex);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_mutex_lock(&mutex);
if(mutex_type == PTHREAD_MUTEX_ERRORCHECK) {
TEST_ASSERT_EQUAL_INT(EDEADLK, res);
} else {
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_mutex_unlock(&mutex);
TEST_ASSERT_EQUAL_INT(0, res);
}
pthread_create(&new_thread, NULL, unlock_mutex, &mutex);
pthread_join(new_thread, (void **) &thread_rval);
TEST_ASSERT_EQUAL_INT(EPERM, (int) thread_rval);
res = pthread_mutex_unlock(&mutex);
TEST_ASSERT_EQUAL_INT(0, res);
}
if (attr_created) {
pthread_mutexattr_destroy(&attr);
}
if (mutex_created) {
pthread_mutex_destroy(&mutex);
}
}
TEST_CASE("pthread mutex lock unlock", "[pthread]")
{
int res = 0;
/* Present behavior of mutex initializer is unlike what is
* defined in Posix standard, ie. calling pthread_mutex_lock
* on such a mutex would internally cause dynamic allocation.
* Therefore pthread_mutex_destroy needs to be called in
* order to avoid memory leak. */
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
res = pthread_mutex_lock(&mutex);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_mutex_unlock(&mutex);
TEST_ASSERT_EQUAL_INT(0, res);
/* This deviates from the Posix standard static mutex behavior.
* This needs to be removed in the future when standard mutex
* initializer is supported */
pthread_mutex_destroy(&mutex);
test_mutex_lock_unlock(PTHREAD_MUTEX_ERRORCHECK);
test_mutex_lock_unlock(PTHREAD_MUTEX_RECURSIVE);
}
static void timespec_add_nano(struct timespec * out, struct timespec * in, long val)
{
out->tv_nsec = val + in->tv_nsec;
if (out->tv_nsec < (in->tv_nsec)) {
out->tv_sec += 1;
}
}
TEST_CASE("pthread mutex trylock timedlock", "[pthread]")
{
int res = 0;
volatile bool mutex_created = false;
pthread_mutex_t mutex;
struct timespec abs_timeout;
if (TEST_PROTECT()) {
res = pthread_mutex_init(&mutex, NULL);
TEST_ASSERT_EQUAL_INT(0, res);
mutex_created = true;
res = pthread_mutex_trylock(&mutex);
TEST_ASSERT_EQUAL_INT(0, res);
res = pthread_mutex_trylock(&mutex);
TEST_ASSERT_EQUAL_INT(EBUSY, res);
clock_gettime(CLOCK_REALTIME, &abs_timeout);
timespec_add_nano(&abs_timeout, &abs_timeout, 100000000LL);
res = pthread_mutex_timedlock(&mutex, &abs_timeout);
TEST_ASSERT_EQUAL_INT(ETIMEDOUT, res);
res = pthread_mutex_unlock(&mutex);
TEST_ASSERT_EQUAL_INT(0, res);
}
if (mutex_created) {
pthread_mutex_destroy(&mutex);
}
}

View File

@ -0,0 +1,132 @@
#include <iostream>
#include <sstream>
#include <thread>
#include <mutex>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "unity.h"
#if __GTHREADS && __GTHREADS_CXX0X
#define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL
#include "esp_log.h"
const static char *TAG = "pthread_test";
static std::shared_ptr<int> global_sp;
static std::mutex mtx;
static std::recursive_mutex recur_mtx;
static void thread_do_nothing() {}
static void thread_main()
{
int i = 0;
std::cout << "thread_main CXX " << std::hex << std::this_thread::get_id() << std::endl;
std::chrono::milliseconds dur = std::chrono::milliseconds(300);
while (i < 3) {
int old_val, new_val;
// mux test
mtx.lock();
old_val = *global_sp;
std::this_thread::yield();
(*global_sp)++;
std::this_thread::yield();
new_val = *global_sp;
mtx.unlock();
std::cout << "thread " << std::hex << std::this_thread::get_id() << ": " << i++ << " val= " << *global_sp << std::endl;
TEST_ASSERT_TRUE(new_val == old_val + 1);
// sleep_for test
std::this_thread::sleep_for(dur);
// recursive mux test
recur_mtx.lock();
recur_mtx.lock();
old_val = *global_sp;
std::this_thread::yield();
(*global_sp)++;
std::this_thread::yield();
new_val = *global_sp;
recur_mtx.unlock();
recur_mtx.unlock();
std::cout << "thread " << std::hex << std::this_thread::get_id() << ": " << i++ << " val= " << *global_sp << std::endl;
TEST_ASSERT_TRUE(new_val == old_val + 1);
// sleep_until test
using std::chrono::system_clock;
std::time_t tt = system_clock::to_time_t(system_clock::now());
struct std::tm *ptm = std::localtime(&tt);
ptm->tm_sec++;
std::this_thread::sleep_until(system_clock::from_time_t(mktime(ptm)));
}
}
TEST_CASE("pthread C++", "[pthread]")
{
global_sp.reset(new int(1));
std::thread t1(thread_do_nothing);
t1.join();
std::thread t2(thread_main);
std::cout << "Detach thread " << std::hex << t2.get_id() << std::endl;
t2.detach();
TEST_ASSERT_FALSE(t2.joinable());
std::thread t3(thread_main);
std::thread t4(thread_main);
if (t3.joinable()) {
std::cout << "Join thread " << std::hex << t3.get_id() << std::endl;
t3.join();
}
if (t4.joinable()) {
std::cout << "Join thread " << std::hex << t4.get_id() << std::endl;
t4.join();
}
global_sp.reset(); // avoid reported leak
}
static void task_test_sandbox()
{
std::stringstream ss;
ESP_LOGI(TAG, "About to create a string stream");
ESP_LOGI(TAG, "About to write to string stream");
ss << "Hello World!";
ESP_LOGI(TAG, "About to extract from stringstream");
ESP_LOGI(TAG, "Text: %s", ss.str().c_str());
}
static void task_test_sandbox_c(void *arg)
{
bool *running = (bool *)arg;
// wrap thread func to ensure that all C++ stack objects are cleaned up by their destructors
task_test_sandbox();
ESP_LOGI(TAG, "Task stk_wm = %d", uxTaskGetStackHighWaterMark(NULL));
if (running) {
*running = false;
vTaskDelete(NULL);
}
}
TEST_CASE("pthread mix C/C++", "[pthread]")
{
bool c_running = true;
std::thread t1(task_test_sandbox);
xTaskCreatePinnedToCore((TaskFunction_t)&task_test_sandbox_c, "task_test_sandbox", 3072, &c_running, 5, NULL, 0);
while (c_running) {
vTaskDelay(1);
}
if (t1.joinable()) {
std::cout << "Join thread " << std::hex << t1.get_id() << std::endl;
t1.join();
}
}
#endif

View File

@ -1,9 +1,9 @@
// Test pthread_create_key, pthread_delete_key, pthread_setspecific, pthread_getspecific // Test pthread_create_key, pthread_delete_key, pthread_setspecific, pthread_getspecific
#include <pthread.h> #include <pthread.h>
#include <stdio.h>
#include "unity.h" #include "unity.h"
#include "freertos/FreeRTOS.h" #include "freertos/FreeRTOS.h"
#include "freertos/task.h" #include "freertos/task.h"
#include "test_utils.h"
TEST_CASE("pthread local storage basics", "[pthread]") TEST_CASE("pthread local storage basics", "[pthread]")
{ {