diff --git a/components/partition_table/Kconfig.projbuild b/components/partition_table/Kconfig.projbuild new file mode 100644 index 00000000..a6176e0f --- /dev/null +++ b/components/partition_table/Kconfig.projbuild @@ -0,0 +1,67 @@ +menu "Partition Table" + +choice PARTITION_TABLE_TYPE + prompt "Partition Table" + default PARTITION_TABLE_SINGLE_APP + help + The partition table to flash to the ESP32. The partition table + determines where apps, data and other resources are expected to + be found. + + The predefined partition table CSV descriptions can be found + in the components/partition_table directory. Otherwise it's + possible to create a new custom partition CSV for your application. + +config PARTITION_TABLE_SINGLE_APP + bool "Single factory app, no OTA" +config PARTITION_TABLE_TWO_OTA + bool "Factory app, two OTA definitions" +config PARTITION_TABLE_CUSTOM + bool "Custom partition table CSV" +endchoice + +config PARTITION_TABLE_CUSTOM_FILENAME + string "Custom partition CSV file" if PARTITION_TABLE_CUSTOM + default partitions.csv + help + Name of the custom partition CSV filename. This path is evaluated + relative to the project root directory. + +config PARTITION_TABLE_CUSTOM_APP_BIN_OFFSET + hex "Factory app partition offset" if PARTITION_TABLE_CUSTOM + default 0x10000 + help + If using a custom partition table, specify the offset in the flash + where 'make flash' should write the built app. + +config PARTITION_TABLE_CUSTOM_PHY_DATA_OFFSET + hex "PHY data partition offset" if PARTITION_TABLE_CUSTOM + depends on ESP32_PHY_INIT_DATA_IN_PARTITION + default 0xf000 + help + If using a custom partition table, specify the offset in the flash + where 'make flash' should write the initial PHY data file. + + +config PARTITION_TABLE_FILENAME + string + default partitions_singleapp.csv if PARTITION_TABLE_SINGLE_APP && !ESP32_ENABLE_COREDUMP_TO_FLASH + default partitions_singleapp_coredump.csv if PARTITION_TABLE_SINGLE_APP && ESP32_ENABLE_COREDUMP_TO_FLASH + default partitions_two_ota.csv if PARTITION_TABLE_TWO_OTA && !ESP32_ENABLE_COREDUMP_TO_FLASH + default partitions_two_ota_coredump.csv if PARTITION_TABLE_TWO_OTA && ESP32_ENABLE_COREDUMP_TO_FLASH + default PARTITION_TABLE_CUSTOM_FILENAME if PARTITION_TABLE_CUSTOM + +config APP_OFFSET + hex + default PARTITION_TABLE_CUSTOM_APP_BIN_OFFSET if PARTITION_TABLE_CUSTOM + default 0x10000 # this is the factory app offset used by the default tables + +config PHY_DATA_OFFSET + depends on ESP32_PHY_INIT_DATA_IN_PARTITION + hex + default PARTITION_TABLE_CUSTOM_PHY_DATA_OFFSET if PARTITION_TABLE_CUSTOM + default 0xf000 # this is the factory app offset used by the default tables + +endmenu + + diff --git a/components/partition_table/Makefile.projbuild b/components/partition_table/Makefile.projbuild new file mode 100644 index 00000000..a7d4f2ec --- /dev/null +++ b/components/partition_table/Makefile.projbuild @@ -0,0 +1,67 @@ +# +# Partition table +# +# The partition table is not a real component that gets linked into +# the project. Instead, it is a standalone project to generate +# the partition table binary as part of the build process. This +# binary is then added to the list of files for esptool.py to flash. +# +.PHONY: partition_table partition_table-flash partition_table-clean + +# NB: gen_esp32part.py lives in the sdk/bin/ dir not component dir +GEN_ESP32PART := $(PYTHON) $(COMPONENT_PATH)/gen_esp32part.py -q + +# Has a matching value in bootloader_support esp_flash_partitions.h +PARTITION_TABLE_OFFSET := 0x8000 + +# if CONFIG_PARTITION_TABLE_FILENAME is unset, means we haven't re-generated config yet... +ifneq ("$(CONFIG_PARTITION_TABLE_FILENAME)","") + +ifndef PARTITION_TABLE_CSV_PATH +# Path to partition CSV file is relative to project path for custom +# partition CSV files, but relative to component dir otherwise. +PARTITION_TABLE_ROOT := $(call dequote,$(if $(CONFIG_PARTITION_TABLE_CUSTOM),$(PROJECT_PATH),$(COMPONENT_PATH))) +PARTITION_TABLE_CSV_PATH := $(call dequote,$(abspath $(PARTITION_TABLE_ROOT)/$(call dequote,$(CONFIG_PARTITION_TABLE_FILENAME)))) +endif + +PARTITION_TABLE_CSV_NAME := $(notdir $(PARTITION_TABLE_CSV_PATH)) + +PARTITION_TABLE_BIN := $(BUILD_DIR_BASE)/$(PARTITION_TABLE_CSV_NAME:.csv=.bin) + +ifdef CONFIG_SECURE_BOOT_BUILD_SIGNED_BINARIES +PARTITION_TABLE_BIN_UNSIGNED := $(PARTITION_TABLE_BIN:.bin=-unsigned.bin) +# add an extra signing step for secure partition table +$(PARTITION_TABLE_BIN): $(PARTITION_TABLE_BIN_UNSIGNED) $(SDKCONFIG_MAKEFILE) $(SECURE_BOOT_SIGNING_KEY) + $(ESPSECUREPY) sign_data --keyfile $(SECURE_BOOT_SIGNING_KEY) -o $@ $< +else +# secure bootloader disabled, both files are the same +PARTITION_TABLE_BIN_UNSIGNED := $(PARTITION_TABLE_BIN) +endif + +$(PARTITION_TABLE_BIN_UNSIGNED): $(PARTITION_TABLE_CSV_PATH) $(SDKCONFIG_MAKEFILE) + @echo "Building partitions from $(PARTITION_TABLE_CSV_PATH)..." + $(GEN_ESP32PART) $< $@ + +all_binaries: $(PARTITION_TABLE_BIN) + +PARTITION_TABLE_FLASH_CMD = $(ESPTOOLPY_SERIAL) write_flash $(PARTITION_TABLE_OFFSET) $(PARTITION_TABLE_BIN) +ESPTOOL_ALL_FLASH_ARGS += $(PARTITION_TABLE_OFFSET) $(PARTITION_TABLE_BIN) + +partition_table: $(PARTITION_TABLE_BIN) + @echo "Partition table binary generated. Contents:" + @echo $(SEPARATOR) + $(GEN_ESP32PART) $< + @echo $(SEPARATOR) + @echo "Partition flashing command:" + @echo "$(PARTITION_TABLE_FLASH_CMD)" + +partition_table-flash: $(PARTITION_TABLE_BIN) + @echo "Flashing partition table..." + $(PARTITION_TABLE_FLASH_CMD) + +partition_table-clean: + rm -f $(PARTITION_TABLE_BIN) + +clean: partition_table-clean + +endif diff --git a/components/partition_table/component.mk b/components/partition_table/component.mk new file mode 100644 index 00000000..7d857daa --- /dev/null +++ b/components/partition_table/component.mk @@ -0,0 +1,5 @@ +# partition table component is special, because it doesn't contain any +# IDF source files. It only adds steps via Makefile.projbuild & +# Kconfig.projbuild +COMPONENT_CONFIG_ONLY := 1 + diff --git a/components/partition_table/gen_esp32part.py b/components/partition_table/gen_esp32part.py new file mode 100755 index 00000000..897e637d --- /dev/null +++ b/components/partition_table/gen_esp32part.py @@ -0,0 +1,389 @@ +#!/usr/bin/env python +# +# ESP32 partition table generation tool +# +# Converts partition tables to/from CSV and binary formats. +# +# See http://esp-idf.readthedocs.io/en/latest/api-guides/partition-tables.html +# for explanation of partition table structure and uses. +# +# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import print_function, division +import argparse +import os +import re +import struct +import sys + +MAX_PARTITION_LENGTH = 0xC00 # 3K for partition data (96 entries) leaves 1K in a 4K sector for signature + +__version__ = '1.0' + +quiet = False + +def status(msg): + """ Print status message to stderr """ + if not quiet: + critical(msg) + +def critical(msg): + """ Print critical message to stderr """ + if not quiet: + sys.stderr.write(msg) + sys.stderr.write('\n') + +class PartitionTable(list): + def __init__(self): + super(PartitionTable, self).__init__(self) + + @classmethod + def from_csv(cls, csv_contents): + res = PartitionTable() + lines = csv_contents.splitlines() + + def expand_vars(f): + f = os.path.expandvars(f) + m = re.match(r'(?= MAX_PARTITION_LENGTH: + raise InputError("Binary partition table length (%d) longer than max" % len(result)) + result += b"\xFF" * (MAX_PARTITION_LENGTH - len(result)) # pad the sector, for signing + return result + + def to_csv(self, simple_formatting=False): + rows = [ "# Espressif ESP32 Partition Table", + "# Name, Type, SubType, Offset, Size, Flags" ] + rows += [ x.to_csv(simple_formatting) for x in self ] + return "\n".join(rows) + "\n" + +class PartitionDefinition(object): + APP_TYPE = 0x00 + DATA_TYPE = 0x01 + TYPES = { + "app" : APP_TYPE, + "data" : DATA_TYPE, + } + + # Keep this map in sync with esp_partition_subtype_t enum in esp_partition.h + SUBTYPES = { + APP_TYPE : { + "factory" : 0x00, + "test" : 0x20, + }, + DATA_TYPE : { + "ota" : 0x00, + "phy" : 0x01, + "nvs" : 0x02, + "coredump" : 0x03, + "esphttpd" : 0x80, + "fat" : 0x81, + "spiffs" : 0x82, + }, + } + + MAGIC_BYTES = b"\xAA\x50" + + ALIGNMENT = { + APP_TYPE : 0x10000, + DATA_TYPE : 0x04, + } + + # dictionary maps flag name (as used in CSV flags list, property name) + # to bit set in flags words in binary format + FLAGS = { + "encrypted" : 0 + } + + # add subtypes for the 16 OTA slot values ("ota_XXX, etc.") + for ota_slot in range(16): + SUBTYPES[TYPES["app"]]["ota_%d" % ota_slot] = 0x10 + ota_slot + + def __init__(self): + self.name = "" + self.type = None + self.subtype = None + self.offset = None + self.size = None + self.encrypted = False + + @classmethod + def from_csv(cls, line): + """ Parse a line from the CSV """ + line_w_defaults = line + ",,,," # lazy way to support default fields + fields = [ f.strip() for f in line_w_defaults.split(",") ] + + res = PartitionDefinition() + res.name = fields[0] + res.type = res.parse_type(fields[1]) + res.subtype = res.parse_subtype(fields[2]) + res.offset = res.parse_address(fields[3]) + res.size = res.parse_address(fields[4]) + if res.size is None: + raise InputError("Size field can't be empty") + + flags = fields[5].split(":") + for flag in flags: + if flag in cls.FLAGS: + setattr(res, flag, True) + elif len(flag) > 0: + raise InputError("CSV flag column contains unknown flag '%s'" % (flag)) + + return res + + def __eq__(self, other): + return self.name == other.name and self.type == other.type \ + and self.subtype == other.subtype and self.offset == other.offset \ + and self.size == other.size + + def __repr__(self): + def maybe_hex(x): + return "0x%x" % x if x is not None else "None" + return "PartitionDefinition('%s', 0x%x, 0x%x, %s, %s)" % (self.name, self.type, self.subtype or 0, + maybe_hex(self.offset), maybe_hex(self.size)) + + def __str__(self): + return "Part '%s' %d/%d @ 0x%x size 0x%x" % (self.name, self.type, self.subtype, self.offset or -1, self.size or -1) + + def __cmp__(self, other): + return self.offset - other.offset + + def parse_type(self, strval): + if strval == "": + raise InputError("Field 'type' can't be left empty.") + return parse_int(strval, self.TYPES) + + def parse_subtype(self, strval): + if strval == "": + return 0 # default + return parse_int(strval, self.SUBTYPES.get(self.type, {})) + + def parse_address(self, strval): + if strval == "": + return None # PartitionTable will fill in default + return parse_int(strval) + + def verify(self): + if self.type is None: + raise ValidationError(self, "Type field is not set") + if self.subtype is None: + raise ValidationError(self, "Subtype field is not set") + if self.offset is None: + raise ValidationError(self, "Offset field is not set") + align = self.ALIGNMENT.get(self.type, 4) + if self.offset % align: + raise ValidationError(self, "Offset 0x%x is not aligned to 0x%x" % (self.offset, align)) + if self.size is None: + raise ValidationError(self, "Size field is not set") + + STRUCT_FORMAT = "<2sBBLL16sL" + + @classmethod + def from_binary(cls, b): + if len(b) != 32: + raise InputError("Partition definition length must be exactly 32 bytes. Got %d bytes." % len(b)) + res = cls() + (magic, res.type, res.subtype, res.offset, + res.size, res.name, flags) = struct.unpack(cls.STRUCT_FORMAT, b) + if b"\x00" in res.name: # strip null byte padding from name string + res.name = res.name[:res.name.index(b"\x00")] + res.name = res.name.decode() + if magic != cls.MAGIC_BYTES: + raise InputError("Invalid magic bytes (%r) for partition definition" % magic) + for flag,bit in cls.FLAGS.items(): + if flags & (1< +#include +#include "unity.h" +#include "test_utils.h" +#include "esp_partition.h" + + +TEST_CASE("Can read partition table", "[partition]") +{ + + const esp_partition_t *p = esp_partition_find_first(ESP_PARTITION_TYPE_APP, ESP_PARTITION_SUBTYPE_ANY, NULL); + TEST_ASSERT_NOT_NULL(p); + TEST_ASSERT_EQUAL(0x10000, p->address); + TEST_ASSERT_EQUAL(ESP_PARTITION_SUBTYPE_APP_FACTORY, p->subtype); + + esp_partition_iterator_t it = esp_partition_find(ESP_PARTITION_TYPE_DATA, ESP_PARTITION_SUBTYPE_ANY, NULL); + TEST_ASSERT_NOT_NULL(it); + int count = 0; + const esp_partition_t* prev = NULL; + for (; it != NULL; it = esp_partition_next(it)) { + const esp_partition_t *p = esp_partition_get(it); + TEST_ASSERT_NOT_NULL(p); + if (prev) { + TEST_ASSERT_TRUE_MESSAGE(prev->address < p->address, "incorrect partition order"); + } + prev = p; + ++count; + } + esp_partition_iterator_release(it); + TEST_ASSERT_EQUAL(4, count); +} + +TEST_CASE("Can write, read, mmap partition", "[partition][ignore]") +{ + const esp_partition_t *p = get_test_data_partition(); + printf("Using partition %s at 0x%x, size 0x%x\n", p->label, p->address, p->size); + TEST_ASSERT_NOT_NULL(p); + const size_t max_size = 2 * SPI_FLASH_SEC_SIZE; + uint8_t *data = (uint8_t *) malloc(max_size); + TEST_ASSERT_NOT_NULL(data); + + TEST_ASSERT_EQUAL(ESP_OK, esp_partition_erase_range(p, 0, p->size)); + + srand(0); + size_t block_size; + for (size_t offset = 0; offset < p->size; offset += block_size) { + block_size = ((rand() + 4) % max_size) & (~0x3); + size_t left = p->size - offset; + if (block_size > left) { + block_size = left; + } + for (size_t i = 0; i < block_size / 4; ++i) { + ((uint32_t *) (data))[i] = rand(); + } + TEST_ASSERT_EQUAL(ESP_OK, esp_partition_write(p, offset, data, block_size)); + } + + srand(0); + for (size_t offset = 0; offset < p->size; offset += block_size) { + block_size = ((rand() + 4) % max_size) & (~0x3); + size_t left = p->size - offset; + if (block_size > left) { + block_size = left; + } + TEST_ASSERT_EQUAL(ESP_OK, esp_partition_read(p, offset, data, block_size)); + for (size_t i = 0; i < block_size / 4; ++i) { + TEST_ASSERT_EQUAL(rand(), ((uint32_t *) data)[i]); + } + } + + free(data); + + const uint32_t *mmap_data; + spi_flash_mmap_handle_t mmap_handle; + size_t begin = 3000; + size_t size = 64000; //chosen so size is smaller than 64K but the mmap straddles 2 MMU blocks + TEST_ASSERT_EQUAL(ESP_OK, esp_partition_mmap(p, begin, size, SPI_FLASH_MMAP_DATA, + (const void **)&mmap_data, &mmap_handle)); + srand(0); + for (size_t offset = 0; offset < p->size; offset += block_size) { + block_size = ((rand() + 4) % max_size) & (~0x3); + size_t left = p->size - offset; + if (block_size > left) { + block_size = left; + } + for (size_t i = 0; i < block_size / 4; ++i) { + size_t pos = offset + i * 4; + uint32_t expected = rand(); + if (pos < begin || pos >= (begin + size)) { + continue; + } + TEST_ASSERT_EQUAL(expected, mmap_data[(pos - begin) / 4]); + } + } + + spi_flash_munmap(mmap_handle); +} diff --git a/components/partition_table/test_gen_esp32part_host/gen_esp32part_tests.py b/components/partition_table/test_gen_esp32part_host/gen_esp32part_tests.py new file mode 100755 index 00000000..46fe45c2 --- /dev/null +++ b/components/partition_table/test_gen_esp32part_host/gen_esp32part_tests.py @@ -0,0 +1,354 @@ +#!/usr/bin/env python +from __future__ import print_function, division +import unittest +import struct +import csv +import sys +import subprocess +import tempfile +import os +sys.path.append("..") +from gen_esp32part import * + +SIMPLE_CSV = """ +# Name,Type,SubType,Offset,Size,Flags +factory,0,2,65536,1048576, +""" + +LONGER_BINARY_TABLE = b"" +# type 0x00, subtype 0x00, +# offset 64KB, size 1MB +LONGER_BINARY_TABLE += b"\xAA\x50\x00\x00" + \ + b"\x00\x00\x01\x00" + \ + b"\x00\x00\x10\x00" + \ + b"factory\0" + (b"\0"*8) + \ + b"\x00\x00\x00\x00" +# type 0x01, subtype 0x20, +# offset 0x110000, size 128KB +LONGER_BINARY_TABLE += b"\xAA\x50\x01\x20" + \ + b"\x00\x00\x11\x00" + \ + b"\x00\x02\x00\x00" + \ + b"data" + (b"\0"*12) + \ + b"\x00\x00\x00\x00" +# type 0x10, subtype 0x00, +# offset 0x150000, size 1MB +LONGER_BINARY_TABLE += b"\xAA\x50\x10\x00" + \ + b"\x00\x00\x15\x00" + \ + b"\x00\x10\x00\x00" + \ + b"second" + (b"\0"*10) + \ + b"\x00\x00\x00\x00" +LONGER_BINARY_TABLE += b"\xFF" * 32 + + +def _strip_trailing_ffs(binary_table): + """ + Strip all FFs down to the last 32 bytes (terminating entry) + """ + while binary_table.endswith(b"\xFF"*64): + binary_table = binary_table[0:len(binary_table)-32] + return binary_table + + +class CSVParserTests(unittest.TestCase): + + def test_simple_partition(self): + table = PartitionTable.from_csv(SIMPLE_CSV) + self.assertEqual(len(table), 1) + self.assertEqual(table[0].name, "factory") + self.assertEqual(table[0].type, 0) + self.assertEqual(table[0].subtype, 2) + self.assertEqual(table[0].offset, 65536) + self.assertEqual(table[0].size, 1048576) + + + def test_require_type(self): + csv = """ +# Name,Type, SubType,Offset,Size +ihavenotype, +""" + with self.assertRaisesRegexp(InputError, "type"): + PartitionTable.from_csv(csv) + + + def test_type_subtype_names(self): + csv_magicnumbers = """ +# Name, Type, SubType, Offset, Size +myapp, 0, 0,, 0x100000 +myota_0, 0, 0x10,, 0x100000 +myota_1, 0, 0x11,, 0x100000 +myota_15, 0, 0x1f,, 0x100000 +mytest, 0, 0x20,, 0x100000 +myota_status, 1, 0,, 0x100000 + """ + csv_nomagicnumbers = """ +# Name, Type, SubType, Offset, Size +myapp, app, factory,, 0x100000 +myota_0, app, ota_0,, 0x100000 +myota_1, app, ota_1,, 0x100000 +myota_15, app, ota_15,, 0x100000 +mytest, app, test,, 0x100000 +myota_status, data, ota,, 0x100000 +""" + # make two equivalent partition tables, one using + # magic numbers and one using shortcuts. Ensure they match + magic = PartitionTable.from_csv(csv_magicnumbers) + magic.verify() + nomagic = PartitionTable.from_csv(csv_nomagicnumbers) + nomagic.verify() + + self.assertEqual(nomagic["myapp"].type, 0) + self.assertEqual(nomagic["myapp"].subtype, 0) + self.assertEqual(nomagic["myapp"], magic["myapp"]) + self.assertEqual(nomagic["myota_0"].type, 0) + self.assertEqual(nomagic["myota_0"].subtype, 0x10) + self.assertEqual(nomagic["myota_0"], magic["myota_0"]) + self.assertEqual(nomagic["myota_15"], magic["myota_15"]) + self.assertEqual(nomagic["mytest"], magic["mytest"]) + self.assertEqual(nomagic["myota_status"], magic["myota_status"]) + + #self.assertEqual(nomagic.to_binary(), magic.to_binary()) + + def test_unit_suffixes(self): + csv = """ +# Name, Type, Subtype, Offset, Size +one_megabyte, app, factory, 64k, 1M +""" + t = PartitionTable.from_csv(csv) + t.verify() + self.assertEqual(t[0].offset, 64*1024) + self.assertEqual(t[0].size, 1*1024*1024) + + def test_default_offsets(self): + csv = """ +# Name, Type, Subtype, Offset, Size +first, app, factory,, 1M +second, data, 0x15,, 1M +minidata, data, 0x40,, 32K +otherapp, app, factory,, 1M + """ + t = PartitionTable.from_csv(csv) + # 'first' + self.assertEqual(t[0].offset, 0x010000) # 64KB boundary as it's an app image + self.assertEqual(t[0].size, 0x100000) # Size specified in CSV + # 'second' + self.assertEqual(t[1].offset, 0x110000) # prev offset+size + self.assertEqual(t[1].size, 0x100000) # Size specified in CSV + # 'minidata' + self.assertEqual(t[2].offset, 0x210000) + # 'otherapp' + self.assertEqual(t[3].offset, 0x220000) # 64KB boundary as it's an app image + + def test_negative_size_to_offset(self): + csv = """ +# Name, Type, Subtype, Offset, Size +first, app, factory, 0x10000, -2M +second, data, 0x15, , 1M + """ + t = PartitionTable.from_csv(csv) + t.verify() + # 'first' + self.assertEqual(t[0].offset, 0x10000) # in CSV + self.assertEqual(t[0].size, 0x200000 - t[0].offset) # Up to 2M + # 'second' + self.assertEqual(t[1].offset, 0x200000) # prev offset+size + + def test_overlapping_offsets_fail(self): + csv = """ +first, app, factory, 0x100000, 2M +second, app, ota_0, 0x200000, 1M +""" + t = PartitionTable.from_csv(csv) + with self.assertRaisesRegexp(InputError, "overlap"): + t.verify() + +class BinaryOutputTests(unittest.TestCase): + def test_binary_entry(self): + csv = """ +first, 0x30, 0xEE, 0x100400, 0x300000 +""" + t = PartitionTable.from_csv(csv) + tb = _strip_trailing_ffs(t.to_binary()) + self.assertEqual(len(tb), 64) + self.assertEqual(b'\xAA\x50', tb[0:2]) # magic + self.assertEqual(b'\x30\xee', tb[2:4]) # type, subtype + eo, es = struct.unpack("