feat(partition_table): Add partition table base file from esp-idf

Commit ID: c2b39f4a
This commit is contained in:
Dong Heng
2018-06-11 14:06:50 +08:00
parent 1c10af9402
commit 3e11873423
11 changed files with 1012 additions and 0 deletions

View File

@ -0,0 +1,67 @@
menu "Partition Table"
choice PARTITION_TABLE_TYPE
prompt "Partition Table"
default PARTITION_TABLE_SINGLE_APP
help
The partition table to flash to the ESP32. The partition table
determines where apps, data and other resources are expected to
be found.
The predefined partition table CSV descriptions can be found
in the components/partition_table directory. Otherwise it's
possible to create a new custom partition CSV for your application.
config PARTITION_TABLE_SINGLE_APP
bool "Single factory app, no OTA"
config PARTITION_TABLE_TWO_OTA
bool "Factory app, two OTA definitions"
config PARTITION_TABLE_CUSTOM
bool "Custom partition table CSV"
endchoice
config PARTITION_TABLE_CUSTOM_FILENAME
string "Custom partition CSV file" if PARTITION_TABLE_CUSTOM
default partitions.csv
help
Name of the custom partition CSV filename. This path is evaluated
relative to the project root directory.
config PARTITION_TABLE_CUSTOM_APP_BIN_OFFSET
hex "Factory app partition offset" if PARTITION_TABLE_CUSTOM
default 0x10000
help
If using a custom partition table, specify the offset in the flash
where 'make flash' should write the built app.
config PARTITION_TABLE_CUSTOM_PHY_DATA_OFFSET
hex "PHY data partition offset" if PARTITION_TABLE_CUSTOM
depends on ESP32_PHY_INIT_DATA_IN_PARTITION
default 0xf000
help
If using a custom partition table, specify the offset in the flash
where 'make flash' should write the initial PHY data file.
config PARTITION_TABLE_FILENAME
string
default partitions_singleapp.csv if PARTITION_TABLE_SINGLE_APP && !ESP32_ENABLE_COREDUMP_TO_FLASH
default partitions_singleapp_coredump.csv if PARTITION_TABLE_SINGLE_APP && ESP32_ENABLE_COREDUMP_TO_FLASH
default partitions_two_ota.csv if PARTITION_TABLE_TWO_OTA && !ESP32_ENABLE_COREDUMP_TO_FLASH
default partitions_two_ota_coredump.csv if PARTITION_TABLE_TWO_OTA && ESP32_ENABLE_COREDUMP_TO_FLASH
default PARTITION_TABLE_CUSTOM_FILENAME if PARTITION_TABLE_CUSTOM
config APP_OFFSET
hex
default PARTITION_TABLE_CUSTOM_APP_BIN_OFFSET if PARTITION_TABLE_CUSTOM
default 0x10000 # this is the factory app offset used by the default tables
config PHY_DATA_OFFSET
depends on ESP32_PHY_INIT_DATA_IN_PARTITION
hex
default PARTITION_TABLE_CUSTOM_PHY_DATA_OFFSET if PARTITION_TABLE_CUSTOM
default 0xf000 # this is the factory app offset used by the default tables
endmenu

View File

@ -0,0 +1,67 @@
#
# Partition table
#
# The partition table is not a real component that gets linked into
# the project. Instead, it is a standalone project to generate
# the partition table binary as part of the build process. This
# binary is then added to the list of files for esptool.py to flash.
#
.PHONY: partition_table partition_table-flash partition_table-clean
# NB: gen_esp32part.py lives in the sdk/bin/ dir not component dir
GEN_ESP32PART := $(PYTHON) $(COMPONENT_PATH)/gen_esp32part.py -q
# Has a matching value in bootloader_support esp_flash_partitions.h
PARTITION_TABLE_OFFSET := 0x8000
# if CONFIG_PARTITION_TABLE_FILENAME is unset, means we haven't re-generated config yet...
ifneq ("$(CONFIG_PARTITION_TABLE_FILENAME)","")
ifndef PARTITION_TABLE_CSV_PATH
# Path to partition CSV file is relative to project path for custom
# partition CSV files, but relative to component dir otherwise.
PARTITION_TABLE_ROOT := $(call dequote,$(if $(CONFIG_PARTITION_TABLE_CUSTOM),$(PROJECT_PATH),$(COMPONENT_PATH)))
PARTITION_TABLE_CSV_PATH := $(call dequote,$(abspath $(PARTITION_TABLE_ROOT)/$(call dequote,$(CONFIG_PARTITION_TABLE_FILENAME))))
endif
PARTITION_TABLE_CSV_NAME := $(notdir $(PARTITION_TABLE_CSV_PATH))
PARTITION_TABLE_BIN := $(BUILD_DIR_BASE)/$(PARTITION_TABLE_CSV_NAME:.csv=.bin)
ifdef CONFIG_SECURE_BOOT_BUILD_SIGNED_BINARIES
PARTITION_TABLE_BIN_UNSIGNED := $(PARTITION_TABLE_BIN:.bin=-unsigned.bin)
# add an extra signing step for secure partition table
$(PARTITION_TABLE_BIN): $(PARTITION_TABLE_BIN_UNSIGNED) $(SDKCONFIG_MAKEFILE) $(SECURE_BOOT_SIGNING_KEY)
$(ESPSECUREPY) sign_data --keyfile $(SECURE_BOOT_SIGNING_KEY) -o $@ $<
else
# secure bootloader disabled, both files are the same
PARTITION_TABLE_BIN_UNSIGNED := $(PARTITION_TABLE_BIN)
endif
$(PARTITION_TABLE_BIN_UNSIGNED): $(PARTITION_TABLE_CSV_PATH) $(SDKCONFIG_MAKEFILE)
@echo "Building partitions from $(PARTITION_TABLE_CSV_PATH)..."
$(GEN_ESP32PART) $< $@
all_binaries: $(PARTITION_TABLE_BIN)
PARTITION_TABLE_FLASH_CMD = $(ESPTOOLPY_SERIAL) write_flash $(PARTITION_TABLE_OFFSET) $(PARTITION_TABLE_BIN)
ESPTOOL_ALL_FLASH_ARGS += $(PARTITION_TABLE_OFFSET) $(PARTITION_TABLE_BIN)
partition_table: $(PARTITION_TABLE_BIN)
@echo "Partition table binary generated. Contents:"
@echo $(SEPARATOR)
$(GEN_ESP32PART) $<
@echo $(SEPARATOR)
@echo "Partition flashing command:"
@echo "$(PARTITION_TABLE_FLASH_CMD)"
partition_table-flash: $(PARTITION_TABLE_BIN)
@echo "Flashing partition table..."
$(PARTITION_TABLE_FLASH_CMD)
partition_table-clean:
rm -f $(PARTITION_TABLE_BIN)
clean: partition_table-clean
endif

View File

@ -0,0 +1,5 @@
# partition table component is special, because it doesn't contain any
# IDF source files. It only adds steps via Makefile.projbuild &
# Kconfig.projbuild
COMPONENT_CONFIG_ONLY := 1

View File

@ -0,0 +1,389 @@
#!/usr/bin/env python
#
# ESP32 partition table generation tool
#
# Converts partition tables to/from CSV and binary formats.
#
# See http://esp-idf.readthedocs.io/en/latest/api-guides/partition-tables.html
# for explanation of partition table structure and uses.
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import argparse
import os
import re
import struct
import sys
MAX_PARTITION_LENGTH = 0xC00 # 3K for partition data (96 entries) leaves 1K in a 4K sector for signature
__version__ = '1.0'
quiet = False
def status(msg):
""" Print status message to stderr """
if not quiet:
critical(msg)
def critical(msg):
""" Print critical message to stderr """
if not quiet:
sys.stderr.write(msg)
sys.stderr.write('\n')
class PartitionTable(list):
def __init__(self):
super(PartitionTable, self).__init__(self)
@classmethod
def from_csv(cls, csv_contents):
res = PartitionTable()
lines = csv_contents.splitlines()
def expand_vars(f):
f = os.path.expandvars(f)
m = re.match(r'(?<!\\)\$([A-Za-z_][A-Za-z0-9_]*)', f)
if m:
raise InputError("unknown variable '%s'" % m.group(1))
return f
for line_no in range(len(lines)):
line = expand_vars(lines[line_no]).strip()
if line.startswith("#") or len(line) == 0:
continue
try:
res.append(PartitionDefinition.from_csv(line))
except InputError as e:
raise InputError("Error at line %d: %s" % (line_no+1, e))
except Exception:
critical("Unexpected error parsing line %d: %s" % (line_no+1, line))
raise
# fix up missing offsets & negative sizes
last_end = 0x5000 # first offset after partition table
for e in res:
if e.offset is None:
pad_to = 0x10000 if e.type == PartitionDefinition.APP_TYPE else 4
if last_end % pad_to != 0:
last_end += pad_to - (last_end % pad_to)
e.offset = last_end
if e.size < 0:
e.size = -e.size - e.offset
last_end = e.offset + e.size
return res
def __getitem__(self, item):
""" Allow partition table access via name as well as by
numeric index. """
if isinstance(item, str):
for x in self:
if x.name == item:
return x
raise ValueError("No partition entry named '%s'" % item)
else:
return super(PartitionTable, self).__getitem__(item)
def verify(self):
# verify each partition individually
for p in self:
p.verify()
# check for overlaps
last = None
for p in sorted(self, key=lambda x:x.offset):
if p.offset < 0x5000:
raise InputError("Partition offset 0x%x is below 0x5000" % p.offset)
if last is not None and p.offset < last.offset + last.size:
raise InputError("Partition at 0x%x overlaps 0x%x-0x%x" % (p.offset, last.offset, last.offset+last.size-1))
last = p
@classmethod
def from_binary(cls, b):
result = cls()
for o in range(0,len(b),32):
data = b[o:o+32]
if len(data) != 32:
raise InputError("Partition table length must be a multiple of 32 bytes")
if data == b'\xFF'*32:
return result # got end marker
result.append(PartitionDefinition.from_binary(data))
raise InputError("Partition table is missing an end-of-table marker")
def to_binary(self):
result = b"".join(e.to_binary() for e in self)
if len(result )>= MAX_PARTITION_LENGTH:
raise InputError("Binary partition table length (%d) longer than max" % len(result))
result += b"\xFF" * (MAX_PARTITION_LENGTH - len(result)) # pad the sector, for signing
return result
def to_csv(self, simple_formatting=False):
rows = [ "# Espressif ESP32 Partition Table",
"# Name, Type, SubType, Offset, Size, Flags" ]
rows += [ x.to_csv(simple_formatting) for x in self ]
return "\n".join(rows) + "\n"
class PartitionDefinition(object):
APP_TYPE = 0x00
DATA_TYPE = 0x01
TYPES = {
"app" : APP_TYPE,
"data" : DATA_TYPE,
}
# Keep this map in sync with esp_partition_subtype_t enum in esp_partition.h
SUBTYPES = {
APP_TYPE : {
"factory" : 0x00,
"test" : 0x20,
},
DATA_TYPE : {
"ota" : 0x00,
"phy" : 0x01,
"nvs" : 0x02,
"coredump" : 0x03,
"esphttpd" : 0x80,
"fat" : 0x81,
"spiffs" : 0x82,
},
}
MAGIC_BYTES = b"\xAA\x50"
ALIGNMENT = {
APP_TYPE : 0x10000,
DATA_TYPE : 0x04,
}
# dictionary maps flag name (as used in CSV flags list, property name)
# to bit set in flags words in binary format
FLAGS = {
"encrypted" : 0
}
# add subtypes for the 16 OTA slot values ("ota_XXX, etc.")
for ota_slot in range(16):
SUBTYPES[TYPES["app"]]["ota_%d" % ota_slot] = 0x10 + ota_slot
def __init__(self):
self.name = ""
self.type = None
self.subtype = None
self.offset = None
self.size = None
self.encrypted = False
@classmethod
def from_csv(cls, line):
""" Parse a line from the CSV """
line_w_defaults = line + ",,,," # lazy way to support default fields
fields = [ f.strip() for f in line_w_defaults.split(",") ]
res = PartitionDefinition()
res.name = fields[0]
res.type = res.parse_type(fields[1])
res.subtype = res.parse_subtype(fields[2])
res.offset = res.parse_address(fields[3])
res.size = res.parse_address(fields[4])
if res.size is None:
raise InputError("Size field can't be empty")
flags = fields[5].split(":")
for flag in flags:
if flag in cls.FLAGS:
setattr(res, flag, True)
elif len(flag) > 0:
raise InputError("CSV flag column contains unknown flag '%s'" % (flag))
return res
def __eq__(self, other):
return self.name == other.name and self.type == other.type \
and self.subtype == other.subtype and self.offset == other.offset \
and self.size == other.size
def __repr__(self):
def maybe_hex(x):
return "0x%x" % x if x is not None else "None"
return "PartitionDefinition('%s', 0x%x, 0x%x, %s, %s)" % (self.name, self.type, self.subtype or 0,
maybe_hex(self.offset), maybe_hex(self.size))
def __str__(self):
return "Part '%s' %d/%d @ 0x%x size 0x%x" % (self.name, self.type, self.subtype, self.offset or -1, self.size or -1)
def __cmp__(self, other):
return self.offset - other.offset
def parse_type(self, strval):
if strval == "":
raise InputError("Field 'type' can't be left empty.")
return parse_int(strval, self.TYPES)
def parse_subtype(self, strval):
if strval == "":
return 0 # default
return parse_int(strval, self.SUBTYPES.get(self.type, {}))
def parse_address(self, strval):
if strval == "":
return None # PartitionTable will fill in default
return parse_int(strval)
def verify(self):
if self.type is None:
raise ValidationError(self, "Type field is not set")
if self.subtype is None:
raise ValidationError(self, "Subtype field is not set")
if self.offset is None:
raise ValidationError(self, "Offset field is not set")
align = self.ALIGNMENT.get(self.type, 4)
if self.offset % align:
raise ValidationError(self, "Offset 0x%x is not aligned to 0x%x" % (self.offset, align))
if self.size is None:
raise ValidationError(self, "Size field is not set")
STRUCT_FORMAT = "<2sBBLL16sL"
@classmethod
def from_binary(cls, b):
if len(b) != 32:
raise InputError("Partition definition length must be exactly 32 bytes. Got %d bytes." % len(b))
res = cls()
(magic, res.type, res.subtype, res.offset,
res.size, res.name, flags) = struct.unpack(cls.STRUCT_FORMAT, b)
if b"\x00" in res.name: # strip null byte padding from name string
res.name = res.name[:res.name.index(b"\x00")]
res.name = res.name.decode()
if magic != cls.MAGIC_BYTES:
raise InputError("Invalid magic bytes (%r) for partition definition" % magic)
for flag,bit in cls.FLAGS.items():
if flags & (1<<bit):
setattr(res, flag, True)
flags &= ~(1<<bit)
if flags != 0:
critical("WARNING: Partition definition had unknown flag(s) 0x%08x. Newer binary format?" % flags)
return res
def get_flags_list(self):
return [ flag for flag in self.FLAGS.keys() if getattr(self, flag) ]
def to_binary(self):
flags = sum((1 << self.FLAGS[flag]) for flag in self.get_flags_list())
return struct.pack(self.STRUCT_FORMAT,
self.MAGIC_BYTES,
self.type, self.subtype,
self.offset, self.size,
self.name.encode(),
flags)
def to_csv(self, simple_formatting=False):
def addr_format(a, include_sizes):
if not simple_formatting and include_sizes:
for (val, suffix) in [ (0x100000, "M"), (0x400, "K") ]:
if a % val == 0:
return "%d%s" % (a // val, suffix)
return "0x%x" % a
def lookup_keyword(t, keywords):
for k,v in keywords.items():
if simple_formatting == False and t == v:
return k
return "%d" % t
def generate_text_flags():
""" colon-delimited list of flags """
return ":".join(self.get_flags_list())
return ",".join([ self.name,
lookup_keyword(self.type, self.TYPES),
lookup_keyword(self.subtype, self.SUBTYPES.get(self.type, {})),
addr_format(self.offset, False),
addr_format(self.size, True),
generate_text_flags()])
def parse_int(v, keywords={}):
"""Generic parser for integer fields - int(x,0) with provision for
k/m/K/M suffixes and 'keyword' value lookup.
"""
try:
for letter, multiplier in [ ("k",1024), ("m",1024*1024) ]:
if v.lower().endswith(letter):
return parse_int(v[:-1], keywords) * multiplier
return int(v, 0)
except ValueError:
if len(keywords) == 0:
raise InputError("Invalid field value %s" % v)
try:
return keywords[v.lower()]
except KeyError:
raise InputError("Value '%s' is not valid. Known keywords: %s" % (v, ", ".join(keywords)))
def main():
global quiet
parser = argparse.ArgumentParser(description='ESP32 partition table utility')
parser.add_argument('--verify', '-v', help='Verify partition table fields', default=True, action='store_false')
parser.add_argument('--quiet', '-q', help="Don't print status messages to stderr", action='store_true')
parser.add_argument('input', help='Path to CSV or binary file to parse. Will use stdin if omitted.', type=argparse.FileType('rb'), default=sys.stdin)
parser.add_argument('output', help='Path to output converted binary or CSV file. Will use stdout if omitted, unless the --display argument is also passed (in which case only the summary is printed.)',
nargs='?',
default='-')
args = parser.parse_args()
quiet = args.quiet
input = args.input.read()
input_is_binary = input[0:2] == PartitionDefinition.MAGIC_BYTES
if input_is_binary:
status("Parsing binary partition input...")
table = PartitionTable.from_binary(input)
else:
input = input.decode()
status("Parsing CSV input...")
table = PartitionTable.from_csv(input)
if args.verify:
status("Verifying table...")
table.verify()
if input_is_binary:
output = table.to_csv()
with sys.stdout if args.output == '-' else open(args.output, 'w') as f:
f.write(output)
else:
output = table.to_binary()
with sys.stdout.buffer if args.output == '-' else open(args.output, 'wb') as f:
f.write(output)
class InputError(RuntimeError):
def __init__(self, e):
super(InputError, self).__init__(e)
class ValidationError(InputError):
def __init__(self, partition, message):
super(ValidationError, self).__init__(
"Partition %s invalid: %s" % (partition.name, message))
if __name__ == '__main__':
try:
main()
except InputError as e:
print(e, file=sys.stderr)
sys.exit(2)

View File

@ -0,0 +1,5 @@
# Name, Type, SubType, Offset, Size, Flags
# Note: if you change the phy_init or app partition offset, make sure to change the offset in Kconfig.projbuild
nvs, data, nvs, 0x9000, 0x6000,
phy_init, data, phy, 0xf000, 0x1000,
factory, app, factory, 0x10000, 1M,
1 # Name, Type, SubType, Offset, Size, Flags
2 # Note: if you change the phy_init or app partition offset, make sure to change the offset in Kconfig.projbuild
3 nvs, data, nvs, 0x9000, 0x6000,
4 phy_init, data, phy, 0xf000, 0x1000,
5 factory, app, factory, 0x10000, 1M,

View File

@ -0,0 +1,6 @@
# Name, Type, SubType, Offset, Size
# Note: if you change the phy_init or app partition offset, make sure to change the offset in Kconfig.projbuild
nvs, data, nvs, 0x9000, 0x6000
phy_init, data, phy, 0xf000, 0x1000
factory, app, factory, 0x10000, 1M
coredump, data, coredump,, 64K
1 # Name, Type, SubType, Offset, Size
2 # Note: if you change the phy_init or app partition offset, make sure to change the offset in Kconfig.projbuild
3 nvs, data, nvs, 0x9000, 0x6000
4 phy_init, data, phy, 0xf000, 0x1000
5 factory, app, factory, 0x10000, 1M
6 coredump, data, coredump,, 64K

View File

@ -0,0 +1,8 @@
# Name, Type, SubType, Offset, Size, Flags
# Note: if you change the phy_init or app partition offset, make sure to change the offset in Kconfig.projbuild
nvs, data, nvs, 0x9000, 0x4000
otadata, data, ota, 0xd000, 0x2000
phy_init, data, phy, 0xf000, 0x1000
factory, 0, 0, 0x10000, 1M
ota_0, 0, ota_0, , 1M
ota_1, 0, ota_1, , 1M
1 # Name, Type, SubType, Offset, Size, Flags
2 # Note: if you change the phy_init or app partition offset, make sure to change the offset in Kconfig.projbuild
3 nvs, data, nvs, 0x9000, 0x4000
4 otadata, data, ota, 0xd000, 0x2000
5 phy_init, data, phy, 0xf000, 0x1000
6 factory, 0, 0, 0x10000, 1M
7 ota_0, 0, ota_0, , 1M
8 ota_1, 0, ota_1, , 1M

View File

@ -0,0 +1,9 @@
# Name, Type, SubType, Offset, Size
# Note: if you change the phy_init or app partition offset, make sure to change the offset in Kconfig.projbuild
nvs, data, nvs, 0x9000, 0x4000
otadata, data, ota, 0xd000, 0x2000
phy_init, data, phy, 0xf000, 0x1000
factory, 0, 0, 0x10000, 1M
coredump, data, coredump,, 64K
ota_0, 0, ota_0, , 1M
ota_1, 0, ota_1, , 1M
1 # Name, Type, SubType, Offset, Size
2 # Note: if you change the phy_init or app partition offset, make sure to change the offset in Kconfig.projbuild
3 nvs, data, nvs, 0x9000, 0x4000
4 otadata, data, ota, 0xd000, 0x2000
5 phy_init, data, phy, 0xf000, 0x1000
6 factory, 0, 0, 0x10000, 1M
7 coredump, data, coredump,, 64K
8 ota_0, 0, ota_0, , 1M
9 ota_1, 0, ota_1, , 1M

View File

@ -0,0 +1,5 @@
#
#Component Makefile
#
COMPONENT_ADD_LDFLAGS = -Wl,--whole-archive -l$(COMPONENT_NAME) -Wl,--no-whole-archive

View File

@ -0,0 +1,97 @@
#include <stdio.h>
#include <stdlib.h>
#include "unity.h"
#include "test_utils.h"
#include "esp_partition.h"
TEST_CASE("Can read partition table", "[partition]")
{
const esp_partition_t *p = esp_partition_find_first(ESP_PARTITION_TYPE_APP, ESP_PARTITION_SUBTYPE_ANY, NULL);
TEST_ASSERT_NOT_NULL(p);
TEST_ASSERT_EQUAL(0x10000, p->address);
TEST_ASSERT_EQUAL(ESP_PARTITION_SUBTYPE_APP_FACTORY, p->subtype);
esp_partition_iterator_t it = esp_partition_find(ESP_PARTITION_TYPE_DATA, ESP_PARTITION_SUBTYPE_ANY, NULL);
TEST_ASSERT_NOT_NULL(it);
int count = 0;
const esp_partition_t* prev = NULL;
for (; it != NULL; it = esp_partition_next(it)) {
const esp_partition_t *p = esp_partition_get(it);
TEST_ASSERT_NOT_NULL(p);
if (prev) {
TEST_ASSERT_TRUE_MESSAGE(prev->address < p->address, "incorrect partition order");
}
prev = p;
++count;
}
esp_partition_iterator_release(it);
TEST_ASSERT_EQUAL(4, count);
}
TEST_CASE("Can write, read, mmap partition", "[partition][ignore]")
{
const esp_partition_t *p = get_test_data_partition();
printf("Using partition %s at 0x%x, size 0x%x\n", p->label, p->address, p->size);
TEST_ASSERT_NOT_NULL(p);
const size_t max_size = 2 * SPI_FLASH_SEC_SIZE;
uint8_t *data = (uint8_t *) malloc(max_size);
TEST_ASSERT_NOT_NULL(data);
TEST_ASSERT_EQUAL(ESP_OK, esp_partition_erase_range(p, 0, p->size));
srand(0);
size_t block_size;
for (size_t offset = 0; offset < p->size; offset += block_size) {
block_size = ((rand() + 4) % max_size) & (~0x3);
size_t left = p->size - offset;
if (block_size > left) {
block_size = left;
}
for (size_t i = 0; i < block_size / 4; ++i) {
((uint32_t *) (data))[i] = rand();
}
TEST_ASSERT_EQUAL(ESP_OK, esp_partition_write(p, offset, data, block_size));
}
srand(0);
for (size_t offset = 0; offset < p->size; offset += block_size) {
block_size = ((rand() + 4) % max_size) & (~0x3);
size_t left = p->size - offset;
if (block_size > left) {
block_size = left;
}
TEST_ASSERT_EQUAL(ESP_OK, esp_partition_read(p, offset, data, block_size));
for (size_t i = 0; i < block_size / 4; ++i) {
TEST_ASSERT_EQUAL(rand(), ((uint32_t *) data)[i]);
}
}
free(data);
const uint32_t *mmap_data;
spi_flash_mmap_handle_t mmap_handle;
size_t begin = 3000;
size_t size = 64000; //chosen so size is smaller than 64K but the mmap straddles 2 MMU blocks
TEST_ASSERT_EQUAL(ESP_OK, esp_partition_mmap(p, begin, size, SPI_FLASH_MMAP_DATA,
(const void **)&mmap_data, &mmap_handle));
srand(0);
for (size_t offset = 0; offset < p->size; offset += block_size) {
block_size = ((rand() + 4) % max_size) & (~0x3);
size_t left = p->size - offset;
if (block_size > left) {
block_size = left;
}
for (size_t i = 0; i < block_size / 4; ++i) {
size_t pos = offset + i * 4;
uint32_t expected = rand();
if (pos < begin || pos >= (begin + size)) {
continue;
}
TEST_ASSERT_EQUAL(expected, mmap_data[(pos - begin) / 4]);
}
}
spi_flash_munmap(mmap_handle);
}

View File

@ -0,0 +1,354 @@
#!/usr/bin/env python
from __future__ import print_function, division
import unittest
import struct
import csv
import sys
import subprocess
import tempfile
import os
sys.path.append("..")
from gen_esp32part import *
SIMPLE_CSV = """
# Name,Type,SubType,Offset,Size,Flags
factory,0,2,65536,1048576,
"""
LONGER_BINARY_TABLE = b""
# type 0x00, subtype 0x00,
# offset 64KB, size 1MB
LONGER_BINARY_TABLE += b"\xAA\x50\x00\x00" + \
b"\x00\x00\x01\x00" + \
b"\x00\x00\x10\x00" + \
b"factory\0" + (b"\0"*8) + \
b"\x00\x00\x00\x00"
# type 0x01, subtype 0x20,
# offset 0x110000, size 128KB
LONGER_BINARY_TABLE += b"\xAA\x50\x01\x20" + \
b"\x00\x00\x11\x00" + \
b"\x00\x02\x00\x00" + \
b"data" + (b"\0"*12) + \
b"\x00\x00\x00\x00"
# type 0x10, subtype 0x00,
# offset 0x150000, size 1MB
LONGER_BINARY_TABLE += b"\xAA\x50\x10\x00" + \
b"\x00\x00\x15\x00" + \
b"\x00\x10\x00\x00" + \
b"second" + (b"\0"*10) + \
b"\x00\x00\x00\x00"
LONGER_BINARY_TABLE += b"\xFF" * 32
def _strip_trailing_ffs(binary_table):
"""
Strip all FFs down to the last 32 bytes (terminating entry)
"""
while binary_table.endswith(b"\xFF"*64):
binary_table = binary_table[0:len(binary_table)-32]
return binary_table
class CSVParserTests(unittest.TestCase):
def test_simple_partition(self):
table = PartitionTable.from_csv(SIMPLE_CSV)
self.assertEqual(len(table), 1)
self.assertEqual(table[0].name, "factory")
self.assertEqual(table[0].type, 0)
self.assertEqual(table[0].subtype, 2)
self.assertEqual(table[0].offset, 65536)
self.assertEqual(table[0].size, 1048576)
def test_require_type(self):
csv = """
# Name,Type, SubType,Offset,Size
ihavenotype,
"""
with self.assertRaisesRegexp(InputError, "type"):
PartitionTable.from_csv(csv)
def test_type_subtype_names(self):
csv_magicnumbers = """
# Name, Type, SubType, Offset, Size
myapp, 0, 0,, 0x100000
myota_0, 0, 0x10,, 0x100000
myota_1, 0, 0x11,, 0x100000
myota_15, 0, 0x1f,, 0x100000
mytest, 0, 0x20,, 0x100000
myota_status, 1, 0,, 0x100000
"""
csv_nomagicnumbers = """
# Name, Type, SubType, Offset, Size
myapp, app, factory,, 0x100000
myota_0, app, ota_0,, 0x100000
myota_1, app, ota_1,, 0x100000
myota_15, app, ota_15,, 0x100000
mytest, app, test,, 0x100000
myota_status, data, ota,, 0x100000
"""
# make two equivalent partition tables, one using
# magic numbers and one using shortcuts. Ensure they match
magic = PartitionTable.from_csv(csv_magicnumbers)
magic.verify()
nomagic = PartitionTable.from_csv(csv_nomagicnumbers)
nomagic.verify()
self.assertEqual(nomagic["myapp"].type, 0)
self.assertEqual(nomagic["myapp"].subtype, 0)
self.assertEqual(nomagic["myapp"], magic["myapp"])
self.assertEqual(nomagic["myota_0"].type, 0)
self.assertEqual(nomagic["myota_0"].subtype, 0x10)
self.assertEqual(nomagic["myota_0"], magic["myota_0"])
self.assertEqual(nomagic["myota_15"], magic["myota_15"])
self.assertEqual(nomagic["mytest"], magic["mytest"])
self.assertEqual(nomagic["myota_status"], magic["myota_status"])
#self.assertEqual(nomagic.to_binary(), magic.to_binary())
def test_unit_suffixes(self):
csv = """
# Name, Type, Subtype, Offset, Size
one_megabyte, app, factory, 64k, 1M
"""
t = PartitionTable.from_csv(csv)
t.verify()
self.assertEqual(t[0].offset, 64*1024)
self.assertEqual(t[0].size, 1*1024*1024)
def test_default_offsets(self):
csv = """
# Name, Type, Subtype, Offset, Size
first, app, factory,, 1M
second, data, 0x15,, 1M
minidata, data, 0x40,, 32K
otherapp, app, factory,, 1M
"""
t = PartitionTable.from_csv(csv)
# 'first'
self.assertEqual(t[0].offset, 0x010000) # 64KB boundary as it's an app image
self.assertEqual(t[0].size, 0x100000) # Size specified in CSV
# 'second'
self.assertEqual(t[1].offset, 0x110000) # prev offset+size
self.assertEqual(t[1].size, 0x100000) # Size specified in CSV
# 'minidata'
self.assertEqual(t[2].offset, 0x210000)
# 'otherapp'
self.assertEqual(t[3].offset, 0x220000) # 64KB boundary as it's an app image
def test_negative_size_to_offset(self):
csv = """
# Name, Type, Subtype, Offset, Size
first, app, factory, 0x10000, -2M
second, data, 0x15, , 1M
"""
t = PartitionTable.from_csv(csv)
t.verify()
# 'first'
self.assertEqual(t[0].offset, 0x10000) # in CSV
self.assertEqual(t[0].size, 0x200000 - t[0].offset) # Up to 2M
# 'second'
self.assertEqual(t[1].offset, 0x200000) # prev offset+size
def test_overlapping_offsets_fail(self):
csv = """
first, app, factory, 0x100000, 2M
second, app, ota_0, 0x200000, 1M
"""
t = PartitionTable.from_csv(csv)
with self.assertRaisesRegexp(InputError, "overlap"):
t.verify()
class BinaryOutputTests(unittest.TestCase):
def test_binary_entry(self):
csv = """
first, 0x30, 0xEE, 0x100400, 0x300000
"""
t = PartitionTable.from_csv(csv)
tb = _strip_trailing_ffs(t.to_binary())
self.assertEqual(len(tb), 64)
self.assertEqual(b'\xAA\x50', tb[0:2]) # magic
self.assertEqual(b'\x30\xee', tb[2:4]) # type, subtype
eo, es = struct.unpack("<LL", tb[4:12])
self.assertEqual(eo, 0x100400) # offset
self.assertEqual(es, 0x300000) # size
def test_multiple_entries(self):
csv = """
first, 0x30, 0xEE, 0x100400, 0x300000
second,0x31, 0xEF, , 0x100000
"""
t = PartitionTable.from_csv(csv)
tb = _strip_trailing_ffs(t.to_binary())
self.assertEqual(len(tb), 96)
self.assertEqual(b'\xAA\x50', tb[0:2])
self.assertEqual(b'\xAA\x50', tb[32:34])
def test_encrypted_flag(self):
csv = """
# Name, Type, Subtype, Offset, Size, Flags
first, app, factory,, 1M, encrypted
"""
t = PartitionTable.from_csv(csv)
self.assertTrue(t[0].encrypted)
tb = _strip_trailing_ffs(t.to_binary())
tr = PartitionTable.from_binary(tb)
self.assertTrue(tr[0].encrypted)
class BinaryParserTests(unittest.TestCase):
def test_parse_one_entry(self):
# type 0x30, subtype 0xee,
# offset 1MB, size 2MB
entry = b"\xAA\x50\x30\xee" + \
b"\x00\x00\x10\x00" + \
b"\x00\x00\x20\x00" + \
b"0123456789abc\0\0\0" + \
b"\x00\x00\x00\x00" + \
b"\xFF" * 32
# verify that parsing 32 bytes as a table
# or as a single Definition are the same thing
t = PartitionTable.from_binary(entry)
self.assertEqual(len(t), 1)
t[0].verify()
e = PartitionDefinition.from_binary(entry[:32])
self.assertEqual(t[0], e)
e.verify()
self.assertEqual(e.type, 0x30)
self.assertEqual(e.subtype, 0xEE)
self.assertEqual(e.offset, 0x100000)
self.assertEqual(e.size, 0x200000)
self.assertEqual(e.name, "0123456789abc")
def test_multiple_entries(self):
t = PartitionTable.from_binary(LONGER_BINARY_TABLE)
t.verify()
self.assertEqual(3, len(t))
self.assertEqual(t[0].type, PartitionDefinition.APP_TYPE)
self.assertEqual(t[0].name, "factory")
self.assertEqual(t[1].type, PartitionDefinition.DATA_TYPE)
self.assertEqual(t[1].name, "data")
self.assertEqual(t[2].type, 0x10)
self.assertEqual(t[2].name, "second")
round_trip = _strip_trailing_ffs(t.to_binary())
self.assertEqual(round_trip, LONGER_BINARY_TABLE)
def test_bad_magic(self):
bad_magic = b"OHAI" + \
b"\x00\x00\x10\x00" + \
b"\x00\x00\x20\x00" + \
b"0123456789abc\0\0\0" + \
b"\x00\x00\x00\x00"
with self.assertRaisesRegexp(InputError, "Invalid magic bytes"):
PartitionTable.from_binary(bad_magic)
def test_bad_length(self):
bad_length = b"OHAI" + \
b"\x00\x00\x10\x00" + \
b"\x00\x00\x20\x00" + \
b"0123456789"
with self.assertRaisesRegexp(InputError, "32 bytes"):
PartitionTable.from_binary(bad_length)
class CSVOutputTests(unittest.TestCase):
def _readcsv(self, source_str):
return list(csv.reader(source_str.split("\n")))
def test_output_simple_formatting(self):
table = PartitionTable.from_csv(SIMPLE_CSV)
as_csv = table.to_csv(True)
c = self._readcsv(as_csv)
# first two lines should start with comments
self.assertEqual(c[0][0][0], "#")
self.assertEqual(c[1][0][0], "#")
row = c[2]
self.assertEqual(row[0], "factory")
self.assertEqual(row[1], "0")
self.assertEqual(row[2], "2")
self.assertEqual(row[3], "0x10000") # reformatted as hex
self.assertEqual(row[4], "0x100000") # also hex
# round trip back to a PartitionTable and check is identical
roundtrip = PartitionTable.from_csv(as_csv)
self.assertEqual(roundtrip, table)
def test_output_smart_formatting(self):
table = PartitionTable.from_csv(SIMPLE_CSV)
as_csv = table.to_csv(False)
c = self._readcsv(as_csv)
# first two lines should start with comments
self.assertEqual(c[0][0][0], "#")
self.assertEqual(c[1][0][0], "#")
row = c[2]
self.assertEqual(row[0], "factory")
self.assertEqual(row[1], "app")
self.assertEqual(row[2], "2")
self.assertEqual(row[3], "0x10000")
self.assertEqual(row[4], "1M")
# round trip back to a PartitionTable and check is identical
roundtrip = PartitionTable.from_csv(as_csv)
self.assertEqual(roundtrip, table)
class CommandLineTests(unittest.TestCase):
def test_basic_cmdline(self):
try:
binpath = tempfile.mktemp()
csvpath = tempfile.mktemp()
# copy binary contents to temp file
with open(binpath, 'wb') as f:
f.write(LONGER_BINARY_TABLE)
# run gen_esp32part.py to convert binary file to CSV
subprocess.check_call([sys.executable, "../gen_esp32part.py",
binpath, csvpath])
# reopen the CSV and check the generated binary is identical
with open(csvpath, 'r') as f:
from_csv = PartitionTable.from_csv(f.read())
self.assertEqual(_strip_trailing_ffs(from_csv.to_binary()), LONGER_BINARY_TABLE)
# run gen_esp32part.py to conver the CSV to binary again
subprocess.check_call([sys.executable, "../gen_esp32part.py",
csvpath, binpath])
# assert that file reads back as identical
with open(binpath, 'rb') as f:
binary_readback = f.read()
binary_readback = _strip_trailing_ffs(binary_readback)
self.assertEqual(binary_readback, LONGER_BINARY_TABLE)
finally:
for path in binpath, csvpath:
try:
os.remove(path)
except OSError:
pass
class VerificationTests(unittest.TestCase):
def test_bad_alignment(self):
csv = """
# Name,Type, SubType,Offset,Size
app,app, factory, 32K, 1M
"""
with self.assertRaisesRegexp(ValidationError,
r"Offset.+not aligned"):
t = PartitionTable.from_csv(csv)
t.verify()
if __name__ =="__main__":
unittest.main()