mirror of
https://github.com/espressif/ESP8266_RTOS_SDK.git
synced 2025-05-25 02:57:33 +08:00
feat(pthread): Modify for ESP8266
This commit is contained in:
612
components/pthread/src/pthread.c
Normal file
612
components/pthread/src/pthread.c
Normal file
@ -0,0 +1,612 @@
|
||||
// Copyright 2017 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// This module implements pthread API on top of FreeRTOS. API is implemented to the level allowing
|
||||
// libstdcxx threading framework to operate correctly. So not all original pthread routines are supported.
|
||||
// Moreover some implemened functions do not provide full functionality, e.g. pthread_create does not support
|
||||
// thread's attributes customization (prio, stack size and so on). So if you are not satisfied with default
|
||||
// behavior use native FreeRTOS API.
|
||||
//
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include "esp_err.h"
|
||||
#include "esp_attr.h"
|
||||
#include "sys/queue.h"
|
||||
#include "sys/types.h"
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "freertos/semphr.h"
|
||||
|
||||
#include "pthread_internal.h"
|
||||
#include "esp_pthread.h"
|
||||
|
||||
#ifdef CONFIG_ENABLE_PTHREAD
|
||||
|
||||
#if portNUM_PROCESSORS == 1
|
||||
#undef portENTER_CRITICAL
|
||||
#undef portEXIT_CRITICAL
|
||||
|
||||
#define portENTER_CRITICAL(l) vPortEnterCritical()
|
||||
#define portEXIT_CRITICAL(l) vPortExitCritical()
|
||||
#endif
|
||||
|
||||
#define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL
|
||||
#include "esp_log.h"
|
||||
const static char *TAG = "pthread";
|
||||
|
||||
/** task state */
|
||||
enum esp_pthread_task_state {
|
||||
PTHREAD_TASK_STATE_RUN,
|
||||
PTHREAD_TASK_STATE_EXIT
|
||||
};
|
||||
|
||||
/** pthread thread FreeRTOS wrapper */
|
||||
typedef struct esp_pthread_entry {
|
||||
SLIST_ENTRY(esp_pthread_entry) list_node; ///< Tasks list node struct.
|
||||
TaskHandle_t handle; ///< FreeRTOS task handle
|
||||
TaskHandle_t join_task; ///< Handle of the task waiting to join
|
||||
enum esp_pthread_task_state state; ///< pthread task state
|
||||
bool detached; ///< True if pthread is detached
|
||||
} esp_pthread_t;
|
||||
|
||||
/** pthread wrapper task arg */
|
||||
typedef struct {
|
||||
void *(*func)(void *); ///< user task entry
|
||||
void *arg; ///< user task argument
|
||||
esp_pthread_cfg_t cfg; ///< pthread configuration
|
||||
} esp_pthread_task_arg_t;
|
||||
|
||||
/** pthread mutex FreeRTOS wrapper */
|
||||
typedef struct {
|
||||
SemaphoreHandle_t sem; ///< Handle of the task waiting to join
|
||||
int type; ///< Mutex type. Currently supported PTHREAD_MUTEX_NORMAL and PTHREAD_MUTEX_RECURSIVE
|
||||
} esp_pthread_mutex_t;
|
||||
|
||||
|
||||
static SemaphoreHandle_t s_threads_mux = NULL;
|
||||
#if portNUM_PROCESSORS > 1
|
||||
static portMUX_TYPE s_mutex_init_lock = portMUX_INITIALIZER_UNLOCKED;
|
||||
#endif
|
||||
static SLIST_HEAD(esp_thread_list_head, esp_pthread_entry) s_threads_list
|
||||
= SLIST_HEAD_INITIALIZER(s_threads_list);
|
||||
static pthread_key_t s_pthread_cfg_key;
|
||||
|
||||
|
||||
static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo);
|
||||
|
||||
static inline void uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
|
||||
{
|
||||
portENTER_CRITICAL(&s_mutex_init_lock);
|
||||
*addr = compare;
|
||||
*set = 0;
|
||||
portEXIT_CRITICAL(&s_mutex_init_lock);
|
||||
}
|
||||
|
||||
static void esp_pthread_cfg_key_destructor(void *value)
|
||||
{
|
||||
free(value);
|
||||
}
|
||||
|
||||
esp_err_t esp_pthread_init(void)
|
||||
{
|
||||
if (pthread_key_create(&s_pthread_cfg_key, esp_pthread_cfg_key_destructor) != 0) {
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
s_threads_mux = xSemaphoreCreateMutex();
|
||||
if (s_threads_mux == NULL) {
|
||||
pthread_key_delete(s_pthread_cfg_key);
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
static void *pthread_list_find_item(void *(*item_check)(esp_pthread_t *, void *arg), void *check_arg)
|
||||
{
|
||||
esp_pthread_t *it;
|
||||
SLIST_FOREACH(it, &s_threads_list, list_node) {
|
||||
void *val = item_check(it, check_arg);
|
||||
if (val) {
|
||||
return val;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *pthread_get_handle_by_desc(esp_pthread_t *item, void *desc)
|
||||
{
|
||||
if (item == desc) {
|
||||
return item->handle;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *pthread_get_desc_by_handle(esp_pthread_t *item, void *hnd)
|
||||
{
|
||||
if (hnd == item->handle) {
|
||||
return item;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline TaskHandle_t pthread_find_handle(pthread_t thread)
|
||||
{
|
||||
return pthread_list_find_item(pthread_get_handle_by_desc, (void *)thread);
|
||||
}
|
||||
|
||||
static esp_pthread_t *pthread_find(TaskHandle_t task_handle)
|
||||
{
|
||||
return pthread_list_find_item(pthread_get_desc_by_handle, task_handle);
|
||||
}
|
||||
|
||||
static void pthread_delete(esp_pthread_t *pthread)
|
||||
{
|
||||
SLIST_REMOVE(&s_threads_list, pthread, esp_pthread_entry, list_node);
|
||||
free(pthread);
|
||||
}
|
||||
|
||||
|
||||
/* Call this function to configure pthread stacks in Pthreads */
|
||||
esp_err_t esp_pthread_set_cfg(const esp_pthread_cfg_t *cfg)
|
||||
{
|
||||
/* If a value is already set, update that value */
|
||||
esp_pthread_cfg_t *p = pthread_getspecific(s_pthread_cfg_key);
|
||||
if (!p) {
|
||||
p = malloc(sizeof(esp_pthread_cfg_t));
|
||||
if (!p) {
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
}
|
||||
*p = *cfg;
|
||||
pthread_setspecific(s_pthread_cfg_key, p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
esp_err_t esp_pthread_get_cfg(esp_pthread_cfg_t *p)
|
||||
{
|
||||
esp_pthread_cfg_t *cfg = pthread_getspecific(s_pthread_cfg_key);
|
||||
if (cfg) {
|
||||
*p = *cfg;
|
||||
return ESP_OK;
|
||||
}
|
||||
memset(p, 0, sizeof(*p));
|
||||
return ESP_ERR_NOT_FOUND;
|
||||
}
|
||||
|
||||
static void pthread_task_func(void *arg)
|
||||
{
|
||||
esp_pthread_task_arg_t *task_arg = (esp_pthread_task_arg_t *)arg;
|
||||
|
||||
ESP_LOGV(TAG, "%s ENTER %p", __FUNCTION__, task_arg->func);
|
||||
// wait for start
|
||||
xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
|
||||
|
||||
if (task_arg->cfg.inherit_cfg) {
|
||||
/* If inherit option is set, then do a set_cfg() ourselves for future forks */
|
||||
esp_pthread_set_cfg(&task_arg->cfg);
|
||||
}
|
||||
ESP_LOGV(TAG, "%s START %p", __FUNCTION__, task_arg->func);
|
||||
task_arg->func(task_arg->arg);
|
||||
ESP_LOGV(TAG, "%s END %p", __FUNCTION__, task_arg->func);
|
||||
free(task_arg);
|
||||
|
||||
/* preemptively clean up thread local storage, rather than
|
||||
waiting for the idle task to clean up the thread */
|
||||
pthread_internal_local_storage_destructor_callback();
|
||||
|
||||
if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
|
||||
assert(false && "Failed to lock threads list!");
|
||||
}
|
||||
esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle());
|
||||
if (!pthread) {
|
||||
assert(false && "Failed to find pthread for current task!");
|
||||
}
|
||||
if (pthread->detached) {
|
||||
// auto-free for detached threads
|
||||
pthread_delete(pthread);
|
||||
} else {
|
||||
// Remove from list, it indicates that task has exited
|
||||
if (pthread->join_task) {
|
||||
// notify join
|
||||
xTaskNotify(pthread->join_task, 0, eNoAction);
|
||||
} else {
|
||||
pthread->state = PTHREAD_TASK_STATE_EXIT;
|
||||
}
|
||||
}
|
||||
xSemaphoreGive(s_threads_mux);
|
||||
|
||||
ESP_LOGD(TAG, "Task stk_wm = %lu", uxTaskGetStackHighWaterMark(NULL));
|
||||
vTaskDelete(NULL);
|
||||
|
||||
ESP_LOGV(TAG, "%s EXIT", __FUNCTION__);
|
||||
}
|
||||
|
||||
int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
|
||||
void *(*start_routine) (void *), void *arg)
|
||||
{
|
||||
TaskHandle_t xHandle = NULL;
|
||||
|
||||
ESP_LOGV(TAG, "%s", __FUNCTION__);
|
||||
if (attr) {
|
||||
ESP_LOGE(TAG, "%s: attrs not supported!", __FUNCTION__);
|
||||
return ENOSYS;
|
||||
}
|
||||
esp_pthread_task_arg_t *task_arg = malloc(sizeof(esp_pthread_task_arg_t));
|
||||
if (task_arg == NULL) {
|
||||
ESP_LOGE(TAG, "Failed to allocate task args!");
|
||||
return ENOMEM;
|
||||
}
|
||||
memset(task_arg, 0, sizeof(esp_pthread_task_arg_t));
|
||||
esp_pthread_t *pthread = malloc(sizeof(esp_pthread_t));
|
||||
if (pthread == NULL) {
|
||||
ESP_LOGE(TAG, "Failed to allocate pthread data!");
|
||||
free(task_arg);
|
||||
return ENOMEM;
|
||||
}
|
||||
uint32_t stack_size = CONFIG_ESP32_PTHREAD_TASK_STACK_SIZE_DEFAULT;
|
||||
BaseType_t prio = CONFIG_ESP32_PTHREAD_TASK_PRIO_DEFAULT;
|
||||
esp_pthread_cfg_t *pthread_cfg = pthread_getspecific(s_pthread_cfg_key);
|
||||
if (pthread_cfg) {
|
||||
if (pthread_cfg->stack_size) {
|
||||
stack_size = pthread_cfg->stack_size;
|
||||
}
|
||||
if (pthread_cfg->prio && pthread_cfg->prio < configMAX_PRIORITIES) {
|
||||
prio = pthread_cfg->prio;
|
||||
}
|
||||
task_arg->cfg = *pthread_cfg;
|
||||
}
|
||||
memset(pthread, 0, sizeof(esp_pthread_t));
|
||||
task_arg->func = start_routine;
|
||||
task_arg->arg = arg;
|
||||
BaseType_t res = xTaskCreate(&pthread_task_func, "pthread", stack_size,
|
||||
task_arg, prio, &xHandle);
|
||||
if(res != pdPASS) {
|
||||
ESP_LOGE(TAG, "Failed to create task!");
|
||||
free(pthread);
|
||||
free(task_arg);
|
||||
if (res == errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY) {
|
||||
return ENOMEM;
|
||||
} else {
|
||||
return EAGAIN;
|
||||
}
|
||||
}
|
||||
pthread->handle = xHandle;
|
||||
|
||||
if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
|
||||
assert(false && "Failed to lock threads list!");
|
||||
}
|
||||
SLIST_INSERT_HEAD(&s_threads_list, pthread, list_node);
|
||||
xSemaphoreGive(s_threads_mux);
|
||||
|
||||
// start task
|
||||
xTaskNotify(xHandle, 0, eNoAction);
|
||||
|
||||
*thread = (pthread_t)pthread; // pointer value fit into pthread_t (uint32_t)
|
||||
|
||||
ESP_LOGV(TAG, "Created task %x", (uint32_t)xHandle);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_join(pthread_t thread, void **retval)
|
||||
{
|
||||
esp_pthread_t *pthread = (esp_pthread_t *)thread;
|
||||
int ret = 0;
|
||||
bool wait = false;
|
||||
|
||||
ESP_LOGV(TAG, "%s %p", __FUNCTION__, pthread);
|
||||
|
||||
// find task
|
||||
if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
|
||||
assert(false && "Failed to lock threads list!");
|
||||
}
|
||||
TaskHandle_t handle = pthread_find_handle(thread);
|
||||
if (!handle) {
|
||||
// not found
|
||||
ret = ESRCH;
|
||||
} else if (pthread->join_task) {
|
||||
// already have waiting task to join
|
||||
ret = EINVAL;
|
||||
} else if (handle == xTaskGetCurrentTaskHandle()) {
|
||||
// join to self not allowed
|
||||
ret = EDEADLK;
|
||||
} else {
|
||||
esp_pthread_t *cur_pthread = pthread_find(xTaskGetCurrentTaskHandle());
|
||||
if (cur_pthread && cur_pthread->join_task == handle) {
|
||||
// join to each other not allowed
|
||||
ret = EDEADLK;
|
||||
} else {
|
||||
if (pthread->state == PTHREAD_TASK_STATE_RUN) {
|
||||
pthread->join_task = xTaskGetCurrentTaskHandle();
|
||||
wait = true;
|
||||
} else {
|
||||
pthread_delete(pthread);
|
||||
}
|
||||
}
|
||||
}
|
||||
xSemaphoreGive(s_threads_mux);
|
||||
|
||||
if (ret == 0 && wait) {
|
||||
xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
|
||||
if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
|
||||
assert(false && "Failed to lock threads list!");
|
||||
}
|
||||
pthread_delete(pthread);
|
||||
xSemaphoreGive(s_threads_mux);
|
||||
}
|
||||
|
||||
if (retval) {
|
||||
*retval = 0; // no exit code in FreeRTOS
|
||||
}
|
||||
|
||||
ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int pthread_detach(pthread_t thread)
|
||||
{
|
||||
esp_pthread_t *pthread = (esp_pthread_t *)thread;
|
||||
int ret = 0;
|
||||
|
||||
if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
|
||||
assert(false && "Failed to lock threads list!");
|
||||
}
|
||||
TaskHandle_t handle = pthread_find_handle(thread);
|
||||
if (!handle) {
|
||||
ret = ESRCH;
|
||||
} else {
|
||||
pthread->detached = true;
|
||||
}
|
||||
xSemaphoreGive(s_threads_mux);
|
||||
ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int pthread_cancel(pthread_t thread)
|
||||
{
|
||||
ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
|
||||
return ENOSYS;
|
||||
}
|
||||
|
||||
int sched_yield( void )
|
||||
{
|
||||
vTaskDelay(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pthread_t pthread_self(void)
|
||||
{
|
||||
if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
|
||||
assert(false && "Failed to lock threads list!");
|
||||
}
|
||||
esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle());
|
||||
if (!pthread) {
|
||||
assert(false && "Failed to find current thread ID!");
|
||||
}
|
||||
xSemaphoreGive(s_threads_mux);
|
||||
return (pthread_t)pthread;
|
||||
}
|
||||
|
||||
int pthread_equal(pthread_t t1, pthread_t t2)
|
||||
{
|
||||
return t1 == t2 ? 1 : 0;
|
||||
}
|
||||
|
||||
/***************** ONCE ******************/
|
||||
int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
|
||||
{
|
||||
if (once_control == NULL || init_routine == NULL || !once_control->is_initialized) {
|
||||
ESP_LOGE(TAG, "%s: Invalid args!", __FUNCTION__);
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
uint32_t res = 1;
|
||||
#if defined(CONFIG_SPIRAM_SUPPORT)
|
||||
if (esp_ptr_external_ram(once_control)) {
|
||||
uxPortCompareSetExtram((uint32_t *) &once_control->init_executed, 0, &res);
|
||||
} else {
|
||||
#endif
|
||||
uxPortCompareSet((uint32_t *) &once_control->init_executed, 0, &res);
|
||||
#if defined(CONFIG_SPIRAM_SUPPORT)
|
||||
}
|
||||
#endif
|
||||
// Check if compare and set was successful
|
||||
if (res == 0) {
|
||||
ESP_LOGV(TAG, "%s: call init_routine %p", __FUNCTION__, once_control);
|
||||
init_routine();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/***************** MUTEX ******************/
|
||||
static int mutexattr_check(const pthread_mutexattr_t *attr)
|
||||
{
|
||||
if (attr->type < PTHREAD_MUTEX_NORMAL || attr->type > PTHREAD_MUTEX_RECURSIVE) {
|
||||
return EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
|
||||
{
|
||||
int type = PTHREAD_MUTEX_NORMAL;
|
||||
|
||||
if (!mutex) {
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
if (attr) {
|
||||
if (!attr->is_initialized) {
|
||||
return EINVAL;
|
||||
}
|
||||
int res = mutexattr_check(attr);
|
||||
if (res) {
|
||||
return res;
|
||||
}
|
||||
type = attr->type;
|
||||
}
|
||||
|
||||
esp_pthread_mutex_t *mux = (esp_pthread_mutex_t *)malloc(sizeof(esp_pthread_mutex_t));
|
||||
if (!mux) {
|
||||
return ENOMEM;
|
||||
}
|
||||
mux->type = type;
|
||||
|
||||
if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
|
||||
mux->sem = xSemaphoreCreateRecursiveMutex();
|
||||
} else {
|
||||
mux->sem = xSemaphoreCreateMutex();
|
||||
}
|
||||
if (!mux->sem) {
|
||||
free(mux);
|
||||
return EAGAIN;
|
||||
}
|
||||
|
||||
*mutex = (pthread_mutex_t)mux; // pointer value fit into pthread_mutex_t (uint32_t)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_mutex_destroy(pthread_mutex_t *mutex)
|
||||
{
|
||||
esp_pthread_mutex_t *mux;
|
||||
|
||||
ESP_LOGV(TAG, "%s %p", __FUNCTION__, mutex);
|
||||
|
||||
if (!mutex) {
|
||||
return EINVAL;
|
||||
}
|
||||
mux = (esp_pthread_mutex_t *)*mutex;
|
||||
|
||||
// check if mux is busy
|
||||
int res = pthread_mutex_lock_internal(mux, 0);
|
||||
if (res == EBUSY) {
|
||||
return EBUSY;
|
||||
}
|
||||
|
||||
vSemaphoreDelete(mux->sem);
|
||||
free(mux);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo)
|
||||
{
|
||||
if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
|
||||
if (xSemaphoreTakeRecursive(mux->sem, tmo) != pdTRUE) {
|
||||
return EBUSY;
|
||||
}
|
||||
} else {
|
||||
if (xSemaphoreTake(mux->sem, tmo) != pdTRUE) {
|
||||
return EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pthread_mutex_init_if_static(pthread_mutex_t *mutex) {
|
||||
int res = 0;
|
||||
if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) {
|
||||
portENTER_CRITICAL(&s_mutex_init_lock);
|
||||
if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) {
|
||||
res = pthread_mutex_init(mutex, NULL);
|
||||
}
|
||||
portEXIT_CRITICAL(&s_mutex_init_lock);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
int IRAM_ATTR pthread_mutex_lock(pthread_mutex_t *mutex)
|
||||
{
|
||||
if (!mutex) {
|
||||
return EINVAL;
|
||||
}
|
||||
int res = pthread_mutex_init_if_static(mutex);
|
||||
if (res != 0) {
|
||||
return res;
|
||||
}
|
||||
return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, portMAX_DELAY);
|
||||
}
|
||||
|
||||
int IRAM_ATTR pthread_mutex_trylock(pthread_mutex_t *mutex)
|
||||
{
|
||||
if (!mutex) {
|
||||
return EINVAL;
|
||||
}
|
||||
int res = pthread_mutex_init_if_static(mutex);
|
||||
if (res != 0) {
|
||||
return res;
|
||||
}
|
||||
return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, 0);
|
||||
}
|
||||
|
||||
int IRAM_ATTR pthread_mutex_unlock(pthread_mutex_t *mutex)
|
||||
{
|
||||
esp_pthread_mutex_t *mux;
|
||||
|
||||
if (!mutex) {
|
||||
return EINVAL;
|
||||
}
|
||||
mux = (esp_pthread_mutex_t *)*mutex;
|
||||
|
||||
if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
|
||||
xSemaphoreGiveRecursive(mux->sem);
|
||||
} else {
|
||||
xSemaphoreGive(mux->sem);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_mutexattr_init(pthread_mutexattr_t *attr)
|
||||
{
|
||||
if (!attr) {
|
||||
return EINVAL;
|
||||
}
|
||||
attr->type = PTHREAD_MUTEX_NORMAL;
|
||||
attr->is_initialized = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
|
||||
{
|
||||
if (!attr) {
|
||||
return EINVAL;
|
||||
}
|
||||
attr->is_initialized = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
|
||||
{
|
||||
ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
|
||||
return ENOSYS;
|
||||
}
|
||||
|
||||
int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
|
||||
{
|
||||
if (!attr) {
|
||||
return EINVAL;
|
||||
}
|
||||
pthread_mutexattr_t tmp_attr = {.type = type};
|
||||
int res = mutexattr_check(&tmp_attr);
|
||||
if (!res) {
|
||||
attr->type = type;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
#endif
|
205
components/pthread/src/pthread_cond_var.c
Normal file
205
components/pthread/src/pthread_cond_var.c
Normal file
@ -0,0 +1,205 @@
|
||||
// Copyright 2017 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This is a simple implementation of pthread condition variables. In essence,
|
||||
// the waiter creates its own semaphore to wait on and pushes it in the cond var
|
||||
// specific list. Upon notify and broadcast, all the waiters for the given cond
|
||||
// var are woken up.
|
||||
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include "esp_err.h"
|
||||
#include "esp_attr.h"
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "freertos/semphr.h"
|
||||
#include "freertos/private/list.h"
|
||||
|
||||
#include <sys/queue.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#ifdef CONFIG_ENABLE_PTHREAD
|
||||
|
||||
#define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL
|
||||
#include "esp_log.h"
|
||||
const static char *TAG = "esp_pthread";
|
||||
|
||||
typedef struct esp_pthread_cond_waiter {
|
||||
SemaphoreHandle_t wait_sem; ///< task specific semaphore to wait on
|
||||
TAILQ_ENTRY(esp_pthread_cond_waiter) link; ///< stash on the list of semaphores to be notified
|
||||
} esp_pthread_cond_waiter_t;
|
||||
|
||||
typedef struct esp_pthread_cond {
|
||||
_lock_t lock; ///< lock that protects the list of semaphores
|
||||
TAILQ_HEAD(, esp_pthread_cond_waiter) waiter_list; ///< head of the list of semaphores
|
||||
} esp_pthread_cond_t;
|
||||
|
||||
int pthread_cond_signal(pthread_cond_t *cv)
|
||||
{
|
||||
if (cv == NULL || *cv == (pthread_cond_t) 0) {
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
esp_pthread_cond_t *cond = (esp_pthread_cond_t *) *cv;
|
||||
|
||||
_lock_acquire_recursive(&cond->lock);
|
||||
esp_pthread_cond_waiter_t *entry;
|
||||
entry = TAILQ_FIRST(&cond->waiter_list);
|
||||
if (entry) {
|
||||
xSemaphoreGive(entry->wait_sem);
|
||||
}
|
||||
_lock_release_recursive(&cond->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_cond_broadcast(pthread_cond_t *cv)
|
||||
{
|
||||
if (cv == NULL || *cv == (pthread_cond_t) 0) {
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
esp_pthread_cond_t *cond = (esp_pthread_cond_t *) *cv;
|
||||
|
||||
_lock_acquire_recursive(&cond->lock);
|
||||
esp_pthread_cond_waiter_t *entry;
|
||||
TAILQ_FOREACH(entry, &cond->waiter_list, link) {
|
||||
xSemaphoreGive(entry->wait_sem);
|
||||
}
|
||||
_lock_release_recursive(&cond->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut)
|
||||
{
|
||||
return pthread_cond_timedwait(cv, mut, NULL);
|
||||
}
|
||||
|
||||
int pthread_cond_timedwait(pthread_cond_t *cv, pthread_mutex_t *mut, const struct timespec *to)
|
||||
{
|
||||
int ret;
|
||||
TickType_t timeout_ticks;
|
||||
|
||||
if (cv == NULL || *cv == (pthread_cond_t) 0) {
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
esp_pthread_cond_t *cond = (esp_pthread_cond_t *) *cv;
|
||||
|
||||
if (to == NULL) {
|
||||
timeout_ticks = portMAX_DELAY;
|
||||
} else {
|
||||
struct timeval abs_time, cur_time, diff_time;
|
||||
long timeout_msec;
|
||||
|
||||
gettimeofday(&cur_time, NULL);
|
||||
|
||||
abs_time.tv_sec = to->tv_sec;
|
||||
abs_time.tv_usec = to->tv_nsec / 1000;
|
||||
|
||||
if (timercmp(&abs_time, &cur_time, <)) {
|
||||
/* As per the pthread spec, if the time has already
|
||||
* passed, no sleep is required.
|
||||
*/
|
||||
timeout_msec = 0;
|
||||
} else {
|
||||
timersub(&abs_time, &cur_time, &diff_time);
|
||||
timeout_msec = (diff_time.tv_sec * 1000) + (diff_time.tv_usec / 1000);
|
||||
}
|
||||
|
||||
if (timeout_msec <= 0) {
|
||||
return ETIMEDOUT;
|
||||
}
|
||||
|
||||
timeout_ticks = timeout_msec / portTICK_PERIOD_MS;
|
||||
}
|
||||
|
||||
esp_pthread_cond_waiter_t w;
|
||||
w.wait_sem = xSemaphoreCreateCounting(1, 0); /* First get will block */
|
||||
|
||||
_lock_acquire_recursive(&cond->lock);
|
||||
TAILQ_INSERT_TAIL(&cond->waiter_list, &w, link);
|
||||
_lock_release_recursive(&cond->lock);
|
||||
pthread_mutex_unlock(mut);
|
||||
|
||||
if (xSemaphoreTake(w.wait_sem, timeout_ticks) == pdTRUE) {
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = ETIMEDOUT;
|
||||
}
|
||||
|
||||
_lock_acquire_recursive(&cond->lock);
|
||||
TAILQ_REMOVE(&cond->waiter_list, &w, link);
|
||||
_lock_release_recursive(&cond->lock);
|
||||
vSemaphoreDelete(w.wait_sem);
|
||||
|
||||
pthread_mutex_lock(mut);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int pthread_condattr_init(pthread_condattr_t *attr)
|
||||
{
|
||||
ESP_LOGV(TAG, "%s not yet implemented (%p)", __FUNCTION__, attr);
|
||||
return ENOSYS;
|
||||
}
|
||||
|
||||
int pthread_cond_init(pthread_cond_t *cv, const pthread_condattr_t *att)
|
||||
{
|
||||
(void) att; /* Unused argument as of now */
|
||||
|
||||
if (cv == NULL) {
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
esp_pthread_cond_t *cond = (esp_pthread_cond_t *) calloc(1, sizeof(esp_pthread_cond_t));
|
||||
if (cond == NULL) {
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
_lock_init_recursive(&cond->lock);
|
||||
TAILQ_INIT(&cond->waiter_list);
|
||||
|
||||
*cv = (pthread_cond_t) cond;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_cond_destroy(pthread_cond_t *cv)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (cv == NULL || *cv == (pthread_cond_t) 0) {
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
esp_pthread_cond_t *cond = (esp_pthread_cond_t *) *cv;
|
||||
|
||||
_lock_acquire_recursive(&cond->lock);
|
||||
if (!TAILQ_EMPTY(&cond->waiter_list)) {
|
||||
ret = EBUSY;
|
||||
}
|
||||
_lock_release_recursive(&cond->lock);
|
||||
|
||||
if (ret == 0) {
|
||||
*cv = (pthread_cond_t) 0;
|
||||
_lock_close_recursive(&cond->lock);
|
||||
free(cond);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
16
components/pthread/src/pthread_internal.h
Normal file
16
components/pthread/src/pthread_internal.h
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2017 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#pragma once
|
||||
|
||||
void pthread_internal_local_storage_destructor_callback();
|
272
components/pthread/src/pthread_local_storage.c
Normal file
272
components/pthread/src/pthread_local_storage.c
Normal file
@ -0,0 +1,272 @@
|
||||
// Copyright 2017 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include "esp_err.h"
|
||||
#include "esp_log.h"
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "sys/lock.h"
|
||||
#include "sys/queue.h"
|
||||
|
||||
#include "pthread_internal.h"
|
||||
|
||||
#ifdef CONFIG_ENABLE_PTHREAD
|
||||
|
||||
#define PTHREAD_TLS_INDEX 1
|
||||
|
||||
#if portNUM_PROCESSORS == 1
|
||||
#undef portENTER_CRITICAL
|
||||
#undef portEXIT_CRITICAL
|
||||
|
||||
#define portENTER_CRITICAL(l) vPortEnterCritical()
|
||||
#define portEXIT_CRITICAL(l) vPortExitCritical()
|
||||
#endif
|
||||
|
||||
typedef void (*pthread_destructor_t)(void*);
|
||||
|
||||
/* This is a very naive implementation of key-indexed thread local storage, using two linked lists
|
||||
(one is a global list of registered keys, one per thread for thread local storage values).
|
||||
|
||||
It won't work well if lots of keys & thread-local values are stored (O(n) lookup for both),
|
||||
but it should work for small amounts of data.
|
||||
*/
|
||||
typedef struct key_entry_t_ {
|
||||
pthread_key_t key;
|
||||
pthread_destructor_t destructor;
|
||||
SLIST_ENTRY(key_entry_t_) next;
|
||||
} key_entry_t;
|
||||
|
||||
// List of all keys created with pthread_key_create()
|
||||
SLIST_HEAD(key_list_t, key_entry_t_) s_keys = SLIST_HEAD_INITIALIZER(s_keys);
|
||||
|
||||
#if portNUM_PROCESSORS > 1
|
||||
static portMUX_TYPE s_keys_lock = portMUX_INITIALIZER_UNLOCKED;
|
||||
#endif
|
||||
|
||||
// List of all value entries associated with a thread via pthread_setspecific()
|
||||
typedef struct value_entry_t_ {
|
||||
pthread_key_t key;
|
||||
void *value;
|
||||
SLIST_ENTRY(value_entry_t_) next;
|
||||
} value_entry_t;
|
||||
|
||||
// Type for the head of the list, as saved as a FreeRTOS thread local storage pointer
|
||||
SLIST_HEAD(values_list_t_, value_entry_t_);
|
||||
typedef struct values_list_t_ values_list_t;
|
||||
|
||||
int pthread_key_create(pthread_key_t *key, pthread_destructor_t destructor)
|
||||
{
|
||||
key_entry_t *new_key = malloc(sizeof(key_entry_t));
|
||||
if (new_key == NULL) {
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
portENTER_CRITICAL(&s_keys_lock);
|
||||
|
||||
const key_entry_t *head = SLIST_FIRST(&s_keys);
|
||||
new_key->key = (head == NULL) ? 1 : (head->key + 1);
|
||||
new_key->destructor = destructor;
|
||||
*key = new_key->key;
|
||||
|
||||
SLIST_INSERT_HEAD(&s_keys, new_key, next);
|
||||
|
||||
portEXIT_CRITICAL(&s_keys_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static key_entry_t *find_key(pthread_key_t key)
|
||||
{
|
||||
portENTER_CRITICAL(&s_keys_lock);
|
||||
key_entry_t *result = NULL;;
|
||||
SLIST_FOREACH(result, &s_keys, next) {
|
||||
if(result->key == key) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
portEXIT_CRITICAL(&s_keys_lock);
|
||||
return result;
|
||||
}
|
||||
|
||||
int pthread_key_delete(pthread_key_t key)
|
||||
{
|
||||
|
||||
portENTER_CRITICAL(&s_keys_lock);
|
||||
|
||||
/* Ideally, we would also walk all tasks' thread local storage value_list here
|
||||
and delete any values associated with this key. We do not do this...
|
||||
*/
|
||||
|
||||
key_entry_t *entry = find_key(key);
|
||||
if (entry != NULL) {
|
||||
SLIST_REMOVE(&s_keys, entry, key_entry_t_, next);
|
||||
free(entry);
|
||||
}
|
||||
|
||||
portEXIT_CRITICAL(&s_keys_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Clean up callback for deleted tasks.
|
||||
|
||||
This is called from one of two places:
|
||||
|
||||
If the thread was created via pthread_create() then it's called by pthread_task_func() when that thread ends,
|
||||
and the FreeRTOS thread-local-storage is removed before the FreeRTOS task is deleted.
|
||||
|
||||
For other tasks, this is called when the FreeRTOS idle task performs its task cleanup after the task is deleted.
|
||||
|
||||
(The reason for calling it early for pthreads is to keep the timing consistent with "normal" pthreads, so after
|
||||
pthread_join() the task's destructors have all been called even if the idle task hasn't run cleanup yet.)
|
||||
*/
|
||||
static void pthread_local_storage_thread_deleted_callback(int index, void *v_tls)
|
||||
{
|
||||
values_list_t *tls = (values_list_t *)v_tls;
|
||||
assert(tls != NULL);
|
||||
|
||||
/* Walk the list, freeing all entries and calling destructors if they are registered */
|
||||
value_entry_t *entry = SLIST_FIRST(tls);
|
||||
while(entry != NULL) {
|
||||
// This is a little slow, walking the linked list of keys once per value,
|
||||
// but assumes that the thread's value list will have less entries
|
||||
// than the keys list
|
||||
key_entry_t *key = find_key(entry->key);
|
||||
if (key != NULL && key->destructor != NULL) {
|
||||
key->destructor(entry->value);
|
||||
}
|
||||
value_entry_t *next_entry = SLIST_NEXT(entry, next);
|
||||
free(entry);
|
||||
entry = next_entry;
|
||||
}
|
||||
free(tls);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_ENABLE_STATIC_TASK_CLEAN_UP_HOOK)
|
||||
/* Called from FreeRTOS task delete hook */
|
||||
void pthread_local_storage_cleanup(TaskHandle_t task)
|
||||
{
|
||||
void *tls = pvTaskGetThreadLocalStoragePointer(task, PTHREAD_TLS_INDEX);
|
||||
if (tls != NULL) {
|
||||
pthread_local_storage_thread_deleted_callback(PTHREAD_TLS_INDEX, tls);
|
||||
vTaskSetThreadLocalStoragePointer(task, PTHREAD_TLS_INDEX, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void __real_vPortCleanUpTCB(void *tcb);
|
||||
|
||||
/* If static task cleanup hook is defined then its applications responsibility to define `vPortCleanUpTCB`.
|
||||
Here we are wrapping it, so that we can do pthread specific TLS cleanup and then invoke application
|
||||
real specific `vPortCleanUpTCB` */
|
||||
void __wrap_vPortCleanUpTCB(void *tcb)
|
||||
{
|
||||
pthread_local_storage_cleanup(tcb);
|
||||
__real_vPortCleanUpTCB(tcb);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* this function called from pthread_task_func for "early" cleanup of TLS in a pthread */
|
||||
void pthread_internal_local_storage_destructor_callback()
|
||||
{
|
||||
void *tls = pvTaskGetThreadLocalStoragePointer(NULL, PTHREAD_TLS_INDEX);
|
||||
if (tls != NULL) {
|
||||
pthread_local_storage_thread_deleted_callback(PTHREAD_TLS_INDEX, tls);
|
||||
/* remove the thread-local-storage pointer to avoid the idle task cleanup
|
||||
calling it again...
|
||||
*/
|
||||
#if defined(CONFIG_ENABLE_STATIC_TASK_CLEAN_UP_HOOK)
|
||||
vTaskSetThreadLocalStoragePointer(NULL, PTHREAD_TLS_INDEX, NULL);
|
||||
#else
|
||||
vTaskSetThreadLocalStoragePointerAndDelCallback(NULL,
|
||||
PTHREAD_TLS_INDEX,
|
||||
NULL,
|
||||
NULL);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
static value_entry_t *find_value(const values_list_t *list, pthread_key_t key)
|
||||
{
|
||||
value_entry_t *result = NULL;;
|
||||
SLIST_FOREACH(result, list, next) {
|
||||
if(result->key == key) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void *pthread_getspecific(pthread_key_t key)
|
||||
{
|
||||
values_list_t *tls = (values_list_t *) pvTaskGetThreadLocalStoragePointer(NULL, PTHREAD_TLS_INDEX);
|
||||
if (tls == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
value_entry_t *entry = find_value(tls, key);
|
||||
if(entry != NULL) {
|
||||
return entry->value;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int pthread_setspecific(pthread_key_t key, const void *value)
|
||||
{
|
||||
key_entry_t *key_entry = find_key(key);
|
||||
if (key_entry == NULL) {
|
||||
return ENOENT; // this situation is undefined by pthreads standard
|
||||
}
|
||||
|
||||
values_list_t *tls = pvTaskGetThreadLocalStoragePointer(NULL, PTHREAD_TLS_INDEX);
|
||||
if (tls == NULL) {
|
||||
tls = calloc(1, sizeof(values_list_t));
|
||||
if (tls == NULL) {
|
||||
return ENOMEM;
|
||||
}
|
||||
#if defined(CONFIG_ENABLE_STATIC_TASK_CLEAN_UP_HOOK)
|
||||
vTaskSetThreadLocalStoragePointer(NULL, PTHREAD_TLS_INDEX, tls);
|
||||
#else
|
||||
vTaskSetThreadLocalStoragePointerAndDelCallback(NULL,
|
||||
PTHREAD_TLS_INDEX,
|
||||
tls,
|
||||
pthread_local_storage_thread_deleted_callback);
|
||||
#endif
|
||||
}
|
||||
|
||||
value_entry_t *entry = find_value(tls, key);
|
||||
if (entry != NULL) {
|
||||
if (value != NULL) {
|
||||
// cast on next line is necessary as pthreads API uses
|
||||
// 'const void *' here but elsewhere uses 'void *'
|
||||
entry->value = (void *) value;
|
||||
} else { // value == NULL, remove the entry
|
||||
SLIST_REMOVE(tls, entry, value_entry_t_, next);
|
||||
free(entry);
|
||||
}
|
||||
} else if (value != NULL) {
|
||||
entry = malloc(sizeof(value_entry_t));
|
||||
if (entry == NULL) {
|
||||
return ENOMEM;
|
||||
}
|
||||
entry->key = key;
|
||||
entry->value = (void *) value; // see note above about cast
|
||||
SLIST_INSERT_HEAD(tls, entry, next);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
Reference in New Issue
Block a user