Minor driver-model fixes and tweaks

A few device-tree fixes
 Binman support for extracting files from an image
 -----BEGIN PGP SIGNATURE-----
 
 iQFFBAABCgAvFiEEslwAIq+Gp8wWVbYnfxc6PpAIreYFAl04t50RHHNqZ0BjaHJv
 bWl1bS5vcmcACgkQfxc6PpAIreYGPwgAlK9Jw/UoLuag8I1rd6nS8U/EYFP4VRrS
 ND5Vpe+3WxuMZvrTZyeg1oOkC6UwCJZJMJPFDN2j+VD8LzUYHymTfBykJZLq11Ks
 wAieldpK75ZqKcafHP8TI3L5a2fjTj20Rgg9R3IXjy8pPp9sdtqr/GiupaEY3AJf
 Y8SQrL7NRZBKxfaRZZAp2MzbzjyUDwmrgntx/xd0Tr/WwZlOf+dojyAdzKP4udfF
 9JcEDi4UOyF/9YBzaSfhYO5h38ZdFan+oXpeXDwjWK5w5YV9uheXLChzloF3d9T0
 CRPGfFcyl1EowDO1KB3L73HROAURzEJ8gn76IEqHraHm6dqdmU372g==
 =9x0F
 -----END PGP SIGNATURE-----

Merge tag 'dm-pull-24jul19-take3' of https://gitlab.denx.de/u-boot/custodians/u-boot-dm

Minor driver-model fixes and tweaks
A few device-tree fixes
Binman support for extracting files from an image
This commit is contained in:
Tom Rini 2019-07-24 16:24:50 -04:00
commit f9b65c76b4
87 changed files with 8162 additions and 927 deletions

View File

@ -176,7 +176,7 @@ Run binman and dtoc testsuite:
./tools/buildman/buildman -P sandbox_spl &&
export PYTHONPATH="${UBOOT_TRAVIS_BUILD_DIR}/scripts/dtc/pylibfdt";
export PATH="${UBOOT_TRAVIS_BUILD_DIR}/scripts/dtc:${PATH}";
./tools/binman/binman -t &&
./tools/binman/binman --toolpath ${UBOOT_TRAVIS_BUILD_DIR}/tools test &&
./tools/dtoc/dtoc -t
# Test sandbox with test.py

View File

@ -32,6 +32,7 @@ addons:
- device-tree-compiler
- lzop
- liblz4-tool
- lzma-alone
- libisl15
- clang-7
- srecord
@ -146,7 +147,7 @@ script:
if [[ -n "${TEST_PY_TOOLS}" ]]; then
PYTHONPATH="${UBOOT_TRAVIS_BUILD_DIR}/scripts/dtc/pylibfdt"
PATH="${UBOOT_TRAVIS_BUILD_DIR}/scripts/dtc:${PATH}"
./tools/binman/binman -t &&
./tools/binman/binman --toolpath ${UBOOT_TRAVIS_BUILD_DIR}/tools test &&
./tools/patman/patman --test &&
./tools/buildman/buildman -t &&
PYTHONPATH="${UBOOT_TRAVIS_BUILD_DIR}/scripts/dtc/pylibfdt"

View File

@ -1196,9 +1196,9 @@ u-boot.ldr: u-boot
# ---------------------------------------------------------------------------
# Use 'make BINMAN_DEBUG=1' to enable debugging
quiet_cmd_binman = BINMAN $@
cmd_binman = $(srctree)/tools/binman/binman -u -d u-boot.dtb -O . -m \
cmd_binman = $(srctree)/tools/binman/binman build -u -d u-boot.dtb -O . -m \
-I . -I $(srctree) -I $(srctree)/board/$(BOARDDIR) \
$(if $(BINMAN_DEBUG),-D) $(BINMAN_$(@F)) $<
$(if $(BINMAN_DEBUG),-D) $(BINMAN_$(@F))
OBJCOPYFLAGS_u-boot.ldr.hex := -I binary -O ihex

View File

@ -671,30 +671,33 @@ int fdt_pci_dma_ranges(void *blob, int phb_off, struct pci_controller *hose) {
dma_range[0] = 0;
if (size >= 0x100000000ull)
dma_range[0] |= FDT_PCI_MEM64;
dma_range[0] |= cpu_to_fdt32(FDT_PCI_MEM64);
else
dma_range[0] |= FDT_PCI_MEM32;
dma_range[0] |= cpu_to_fdt32(FDT_PCI_MEM32);
if (hose->regions[r].flags & PCI_REGION_PREFETCH)
dma_range[0] |= FDT_PCI_PREFETCH;
dma_range[0] |= cpu_to_fdt32(FDT_PCI_PREFETCH);
#ifdef CONFIG_SYS_PCI_64BIT
dma_range[1] = bus_start >> 32;
dma_range[1] = cpu_to_fdt32(bus_start >> 32);
#else
dma_range[1] = 0;
#endif
dma_range[2] = bus_start & 0xffffffff;
dma_range[2] = cpu_to_fdt32(bus_start & 0xffffffff);
if (addrcell == 2) {
dma_range[3] = phys_start >> 32;
dma_range[4] = phys_start & 0xffffffff;
dma_range[3] = cpu_to_fdt32(phys_start >> 32);
dma_range[4] = cpu_to_fdt32(phys_start & 0xffffffff);
} else {
dma_range[3] = phys_start & 0xffffffff;
dma_range[3] = cpu_to_fdt32(phys_start & 0xffffffff);
}
if (sizecell == 2) {
dma_range[3 + addrcell + 0] = size >> 32;
dma_range[3 + addrcell + 1] = size & 0xffffffff;
dma_range[3 + addrcell + 0] =
cpu_to_fdt32(size >> 32);
dma_range[3 + addrcell + 1] =
cpu_to_fdt32(size & 0xffffffff);
} else {
dma_range[3 + addrcell + 0] = size & 0xffffffff;
dma_range[3 + addrcell + 0] =
cpu_to_fdt32(size & 0xffffffff);
}
dma_range += (3 + addrcell + sizecell);
@ -1552,7 +1555,7 @@ u64 fdt_get_base_address(const void *fdt, int node)
prop = fdt_getprop(fdt, node, "reg", &size);
return prop ? fdt_translate_address(fdt, node, prop) : 0;
return prop ? fdt_translate_address(fdt, node, prop) : OF_BAD_ADDR;
}
/*

View File

@ -51,6 +51,8 @@ static int clk_of_xlate_default(struct clk *clk,
else
clk->id = 0;
clk->data = 0;
return 0;
}

View File

@ -388,7 +388,8 @@ int device_probe(struct udevice *dev)
if (dev->parent && device_get_uclass_id(dev) != UCLASS_PINCTRL)
pinctrl_select_state(dev, "default");
if (dev->parent && device_get_uclass_id(dev) != UCLASS_POWER_DOMAIN) {
if (CONFIG_IS_ENABLED(POWER_DOMAIN) && dev->parent &&
device_get_uclass_id(dev) != UCLASS_POWER_DOMAIN) {
if (!power_domain_get(dev, &pd))
power_domain_on(&pd);
}
@ -409,10 +410,16 @@ int device_probe(struct udevice *dev)
goto fail;
}
/* Process 'assigned-{clocks/clock-parents/clock-rates}' properties */
ret = clk_set_defaults(dev);
if (ret)
goto fail;
/* Only handle devices that have a valid ofnode */
if (dev_of_valid(dev)) {
/*
* Process 'assigned-{clocks/clock-parents/clock-rates}'
* properties
*/
ret = clk_set_defaults(dev);
if (ret)
goto fail;
}
if (drv->probe) {
ret = drv->probe(dev);

View File

@ -884,5 +884,5 @@ int ofnode_set_enabled(ofnode node, bool value)
if (value)
return ofnode_write_string(node, "status", "okay");
else
return ofnode_write_string(node, "status", "disable");
return ofnode_write_string(node, "status", "disabled");
}

View File

@ -48,6 +48,10 @@ static int timer_pre_probe(struct udevice *dev)
int err;
ulong ret;
/* It is possible that a timer device has a null ofnode */
if (!dev_of_valid(dev))
return 0;
err = clk_get_by_index(dev, 0, &timer_clk);
if (!err) {
ret = clk_get_rate(&timer_clk);

View File

@ -55,7 +55,7 @@ static void swap_file_header(struct cbfs_fileheader *dest,
memcpy(&dest->magic, &src->magic, sizeof(dest->magic));
dest->len = be32_to_cpu(src->len);
dest->type = be32_to_cpu(src->type);
dest->checksum = be32_to_cpu(src->checksum);
dest->attributes_offset = be32_to_cpu(src->attributes_offset);
dest->offset = be32_to_cpu(src->offset);
}
@ -108,7 +108,7 @@ static int file_cbfs_next_file(u8 *start, u32 size, u32 align,
newNode->name = (char *)fileHeader +
sizeof(struct cbfs_fileheader);
newNode->name_length = name_len;
newNode->checksum = header.checksum;
newNode->attributes_offset = header.attributes_offset;
step = header.len;
if (step % align)

View File

@ -40,6 +40,17 @@ enum cbfs_filetype {
CBFS_TYPE_CMOS_LAYOUT = 0x01aa
};
enum {
CBFS_HEADER_MAGIC = 0x4f524243,
};
/**
* struct cbfs_header - header at the start of a CBFS region
*
* All fields use big-endian format.
*
* @magic: Magic number (CBFS_HEADER_MAGIC)
*/
struct cbfs_header {
u32 magic;
u32 version;
@ -54,7 +65,8 @@ struct cbfs_fileheader {
u8 magic[8];
u32 len;
u32 type;
u32 checksum;
/* offset to struct cbfs_file_attribute or 0 */
u32 attributes_offset;
u32 offset;
} __packed;
@ -65,7 +77,7 @@ struct cbfs_cachenode {
u32 data_length;
char *name;
u32 name_length;
u32 checksum;
u32 attributes_offset;
} __packed;
extern enum cbfs_result file_cbfs_result;

View File

@ -227,7 +227,7 @@ fdt_addr_t dev_read_addr_size(struct udevice *dev, const char *propname,
/**
* dev_read_name() - get the name of a device's node
*
* @node: valid node to look up
* @dev: Device to read from
* @return name of node
*/
const char *dev_read_name(struct udevice *dev);

View File

@ -297,7 +297,7 @@ int uclass_first_device_err(enum uclass_id id, struct udevice **devp);
*
* The device returned is probed if necessary, and ready for use
*
* This function is useful to start iterating through a list of devices which
* This function is useful to iterate through a list of devices which
* are functioning correctly and can be probed.
*
* @devp: On entry, pointer to device to lookup. On exit, returns pointer

View File

@ -33,12 +33,14 @@ run_test "sandbox_flattree" ./test/py/test.py --bd sandbox_flattree --build \
-k test_ut
# Set up a path to dtc (device-tree compiler) and libfdt.py, a library it
# provides and which is built by the sandbox_spl config.
# provides and which is built by the sandbox_spl config. Also set up the path
# to tools build by the build.
DTC_DIR=build-sandbox_spl/scripts/dtc
export PYTHONPATH=${DTC_DIR}/pylibfdt
export DTC=${DTC_DIR}/dtc
TOOLS_DIR=build-sandbox_spl/tools
run_test "binman" ./tools/binman/binman -t
run_test "binman" ./tools/binman/binman --toolpath ${TOOLS_DIR} test
run_test "patman" ./tools/patman/patman --test
[ "$1" == "quick" ] && skip=--skip-net-tests
@ -49,7 +51,8 @@ run_test "dtoc" ./tools/dtoc/dtoc -t
# This needs you to set up Python test coverage tools.
# To enable Python test coverage on Debian-type distributions (e.g. Ubuntu):
# $ sudo apt-get install python-pytest python-coverage
run_test "binman code coverage" ./tools/binman/binman -T
export PATH=$PATH:${TOOLS_DIR}
run_test "binman code coverage" ./tools/binman/binman test -T
run_test "dtoc code coverage" ./tools/dtoc/dtoc -T
run_test "fdt code coverage" ./tools/dtoc/test_fdt -T

View File

@ -175,6 +175,9 @@ HOSTCFLAGS_mkexynosspl.o := -pedantic
ifdtool-objs := $(LIBFDT_OBJS) ifdtool.o
hostprogs-$(CONFIG_X86) += ifdtool
ifwitool-objs := ifwitool.o
hostprogs-$(CONFIG_X86)$(CONFIG_SANDBOX) += ifwitool
hostprogs-$(CONFIG_MX23) += mxsboot
hostprogs-$(CONFIG_MX28) += mxsboot
HOSTCFLAGS_mxsboot.o := -pedantic

View File

@ -36,10 +36,9 @@ suitable padding and alignment. It provides a way to process binaries before
they are included, by adding a Python plug-in. The device tree is available
to U-Boot at run-time so that the images can be interpreted.
Binman does not yet update the device tree with the final location of
everything when it is done. A simple C structure could be generated for
constrained environments like SPL (using dtoc) but this is also not
implemented.
Binman can update the device tree with the final location of everything when it
is done. Entry positions can be provided to U-Boot SPL as run-time symbols,
avoiding device-tree code overhead.
Binman can also support incorporating filesystems in the image if required.
For example x86 platforms may use CBFS in some cases.
@ -181,9 +180,14 @@ the configuration of the Intel-format descriptor.
Running binman
--------------
First install prerequisites, e.g.
sudo apt-get install python-pyelftools python3-pyelftools lzma-alone \
liblz4-tool
Type:
binman -b <board_name>
binman build -b <board_name>
to build an image for a board. The board name is the same name used when
configuring U-Boot (e.g. for sandbox_defconfig the board name is 'sandbox').
@ -191,7 +195,7 @@ Binman assumes that the input files for the build are in ../b/<board_name>.
Or you can specify this explicitly:
binman -I <build_path>
binman build -I <build_path>
where <build_path> is the build directory containing the output of the U-Boot
build.
@ -335,6 +339,10 @@ expand-size:
limited by the size of the image/section and the position of the next
entry.
compress:
Sets the compression algortihm to use (for blobs only). See the entry
documentation for details.
The attributes supported for images and sections are described below. Several
are similar to those for entries.
@ -479,7 +487,92 @@ Entry Documentation
For details on the various entry types supported by binman and how to use them,
see README.entries. This is generated from the source code using:
binman -E >tools/binman/README.entries
binman entry-docs >tools/binman/README.entries
Listing images
--------------
It is possible to list the entries in an existing firmware image created by
binman, provided that there is an 'fdtmap' entry in the image. For example:
$ binman ls -i image.bin
Name Image-pos Size Entry-type Offset Uncomp-size
----------------------------------------------------------------------
main-section c00 section 0
u-boot 0 4 u-boot 0
section 5fc section 4
cbfs 100 400 cbfs 0
u-boot 138 4 u-boot 38
u-boot-dtb 180 108 u-boot-dtb 80 3b5
u-boot-dtb 500 1ff u-boot-dtb 400 3b5
fdtmap 6fc 381 fdtmap 6fc
image-header bf8 8 image-header bf8
This shows the hierarchy of the image, the position, size and type of each
entry, the offset of each entry within its parent and the uncompressed size if
the entry is compressed.
It is also possible to list just some files in an image, e.g.
$ binman ls -i image.bin section/cbfs
Name Image-pos Size Entry-type Offset Uncomp-size
--------------------------------------------------------------------
cbfs 100 400 cbfs 0
u-boot 138 4 u-boot 38
u-boot-dtb 180 108 u-boot-dtb 80 3b5
or with wildcards:
$ binman ls -i image.bin "*cb*" "*head*"
Name Image-pos Size Entry-type Offset Uncomp-size
----------------------------------------------------------------------
cbfs 100 400 cbfs 0
u-boot 138 4 u-boot 38
u-boot-dtb 180 108 u-boot-dtb 80 3b5
image-header bf8 8 image-header bf8
Extracting files from images
----------------------------
You can extract files from an existing firmware image created by binman,
provided that there is an 'fdtmap' entry in the image. For example:
$ binman extract -i image.bin section/cbfs/u-boot
which will write the uncompressed contents of that entry to the file 'u-boot' in
the current directory. You can also extract to a particular file, in this case
u-boot.bin:
$ binman extract -i image.bin section/cbfs/u-boot -f u-boot.bin
It is possible to extract all files into a destination directory, which will
put files in subdirectories matching the entry hierarchy:
$ binman extract -i image.bin -O outdir
or just a selection:
$ binman extract -i image.bin "*u-boot*" -O outdir
Logging
-------
Binman normally operates silently unless there is an error, in which case it
just displays the error. The -D/--debug option can be used to create a full
backtrace when errors occur.
Internally binman logs some output while it is running. This can be displayed
by increasing the -v/--verbosity from the default of 1:
0: silent
1: warnings only
2: notices (important messages)
3: info about major operations
4: detailed information about each operation
5: debug (all output)
Hashing Entries
@ -558,7 +651,8 @@ tree. This sets the correct 'offset' and 'size' vaues, for example.
The default implementatoin does nothing. This can be overriden to adjust the
contents of an entry in some way. For example, it would be possible to create
an entry containing a hash of the contents of some other entries. At this
stage the offset and size of entries should not be adjusted.
stage the offset and size of entries should not be adjusted unless absolutely
necessary, since it requires a repack (going back to PackEntries()).
10. WriteSymbols() - write the value of symbols into the U-Boot SPL binary.
See 'Access to binman entry offsets at run time' below for a description of
@ -634,20 +728,27 @@ the image definition, binman calculates the final values and writes these to
the device tree. These can be used by U-Boot at run-time to find the location
of each entry.
Alternatively, an FDT map entry can be used to add a special FDT containing
just the information about the image. This is preceded by a magic string so can
be located anywhere in the image. An image header (typically at the start or end
of the image) can be used to point to the FDT map. See fdtmap and image-header
entries for more information.
Compression
-----------
Binman support compression for 'blob' entries (those of type 'blob' and
derivatives). To enable this for an entry, add a 'compression' property:
derivatives). To enable this for an entry, add a 'compress' property:
blob {
filename = "datafile";
compression = "lz4";
compress = "lz4";
};
The entry will then contain the compressed data, using the 'lz4' compression
algorithm. Currently this is the only one that is supported.
algorithm. Currently this is the only one that is supported. The uncompressed
size is written to the node in an 'uncomp-size' property, if -u is used.
@ -691,15 +792,25 @@ Not all properties can be provided this way. Only some entries support it,
typically for filenames.
External tools
--------------
Binman can make use of external command-line tools to handle processing of
entry contents or to generate entry contents. These tools are executed using
the 'tools' module's Run() method. The tools generally must exist on the PATH,
but the --toolpath option can be used to specify additional search paths to
use. This option can be specified multiple times to add more than one path.
Code coverage
-------------
Binman is a critical tool and is designed to be very testable. Entry
implementations target 100% test coverage. Run 'binman -T' to check this.
implementations target 100% test coverage. Run 'binman test -T' to check this.
To enable Python test coverage on Debian-type distributions (e.g. Ubuntu):
$ sudo apt-get install python-coverage python-pytest
$ sudo apt-get install python-coverage python3-coverage python-pytest
Concurrent tests
@ -716,6 +827,14 @@ Use '-P 1' to disable this. It is automatically disabled when code coverage is
being used (-T) since they are incompatible.
Debugging tests
---------------
Sometimes when debugging tests it is useful to keep the input and output
directories so they can be examined later. Use -X or --test-preserve-dirs for
this.
Advanced Features / Technical docs
----------------------------------
@ -788,13 +907,12 @@ Some ideas:
- Use of-platdata to make the information available to code that is unable
to use device tree (such as a very small SPL image)
- Allow easy building of images by specifying just the board name
- Produce a full Python binding for libfdt (for upstream). This is nearing
completion but some work remains
- Add an option to decode an image into the constituent binaries
- Support building an image for a board (-b) more completely, with a
configurable build directory
- Consider making binman work with buildman, although if it is used in the
Makefile, this will be automatic
- Support updating binaries in an image (with no size change / repacking)
- Support updating binaries in an image (with repacking)
- Support adding FITs to an image
- Support for ARM Trusted Firmware (ATF)
--
Simon Glass <sjg@chromium.org>

View File

@ -60,6 +60,158 @@ See cros_ec_rw for an example of this.
Entry: cbfs: Entry containing a Coreboot Filesystem (CBFS)
----------------------------------------------------------
A CBFS provides a way to group files into a group. It has a simple directory
structure and allows the position of individual files to be set, since it is
designed to support execute-in-place in an x86 SPI-flash device. Where XIP
is not used, it supports compression and storing ELF files.
CBFS is used by coreboot as its way of orgnanising SPI-flash contents.
The contents of the CBFS are defined by subnodes of the cbfs entry, e.g.:
cbfs {
size = <0x100000>;
u-boot {
cbfs-type = "raw";
};
u-boot-dtb {
cbfs-type = "raw";
};
};
This creates a CBFS 1MB in size two files in it: u-boot.bin and u-boot.dtb.
Note that the size is required since binman does not support calculating it.
The contents of each entry is just what binman would normally provide if it
were not a CBFS node. A blob type can be used to import arbitrary files as
with the second subnode below:
cbfs {
size = <0x100000>;
u-boot {
cbfs-name = "BOOT";
cbfs-type = "raw";
};
dtb {
type = "blob";
filename = "u-boot.dtb";
cbfs-type = "raw";
cbfs-compress = "lz4";
cbfs-offset = <0x100000>;
};
};
This creates a CBFS 1MB in size with u-boot.bin (named "BOOT") and
u-boot.dtb (named "dtb") and compressed with the lz4 algorithm.
Properties supported in the top-level CBFS node:
cbfs-arch:
Defaults to "x86", but you can specify the architecture if needed.
Properties supported in the CBFS entry subnodes:
cbfs-name:
This is the name of the file created in CBFS. It defaults to the entry
name (which is the node name), but you can override it with this
property.
cbfs-type:
This is the CBFS file type. The following are supported:
raw:
This is a 'raw' file, although compression is supported. It can be
used to store any file in CBFS.
stage:
This is an ELF file that has been loaded (i.e. mapped to memory), so
appears in the CBFS as a flat binary. The input file must be an ELF
image, for example this puts "u-boot" (the ELF image) into a 'stage'
entry:
cbfs {
size = <0x100000>;
u-boot-elf {
cbfs-name = "BOOT";
cbfs-type = "stage";
};
};
You can use your own ELF file with something like:
cbfs {
size = <0x100000>;
something {
type = "blob";
filename = "cbfs-stage.elf";
cbfs-type = "stage";
};
};
As mentioned, the file is converted to a flat binary, so it is
equivalent to adding "u-boot.bin", for example, but with the load and
start addresses specified by the ELF. At present there is no option
to add a flat binary with a load/start address, similar to the
'add-flat-binary' option in cbfstool.
cbfs-offset:
This is the offset of the file's data within the CBFS. It is used to
specify where the file should be placed in cases where a fixed position
is needed. Typical uses are for code which is not relocatable and must
execute in-place from a particular address. This works because SPI flash
is generally mapped into memory on x86 devices. The file header is
placed before this offset so that the data start lines up exactly with
the chosen offset. If this property is not provided, then the file is
placed in the next available spot.
The current implementation supports only a subset of CBFS features. It does
not support other file types (e.g. payload), adding multiple files (like the
'files' entry with a pattern supported by binman), putting files at a
particular offset in the CBFS and a few other things.
Of course binman can create images containing multiple CBFSs, simply by
defining these in the binman config:
binman {
size = <0x800000>;
cbfs {
offset = <0x100000>;
size = <0x100000>;
u-boot {
cbfs-type = "raw";
};
u-boot-dtb {
cbfs-type = "raw";
};
};
cbfs2 {
offset = <0x700000>;
size = <0x100000>;
u-boot {
cbfs-type = "raw";
};
u-boot-dtb {
cbfs-type = "raw";
};
image {
type = "blob";
filename = "image.jpg";
};
};
};
This creates an 8MB image with two CBFSs, one at offset 1MB, one at 7MB,
both of size 1MB.
Entry: cros-ec-rw: A blob entry which contains a Chromium OS read-write EC image
--------------------------------------------------------------------------------
@ -71,6 +223,44 @@ updating the EC on startup via software sync.
Entry: fdtmap: An entry which contains an FDT map
-------------------------------------------------
Properties / Entry arguments:
None
An FDT map is just a header followed by an FDT containing a list of all the
entries in the image.
The header is the string _FDTMAP_ followed by 8 unused bytes.
When used, this entry will be populated with an FDT map which reflects the
entries in the current image. Hierarchy is preserved, and all offsets and
sizes are included.
Note that the -u option must be provided to ensure that binman updates the
FDT with the position of each entry.
Example output for a simple image with U-Boot and an FDT map:
/ {
size = <0x00000112>;
image-pos = <0x00000000>;
offset = <0x00000000>;
u-boot {
size = <0x00000004>;
image-pos = <0x00000000>;
offset = <0x00000000>;
};
fdtmap {
size = <0x0000010e>;
image-pos = <0x00000004>;
offset = <0x00000004>;
};
};
Entry: files: Entry containing a set of files
---------------------------------------------
@ -141,6 +331,25 @@ README.chromium for how to obtain the required keys and tools.
Entry: image-header: An entry which contains a pointer to the FDT map
---------------------------------------------------------------------
Properties / Entry arguments:
location: Location of header ("start" or "end" of image). This is
optional. If omitted then the entry must have an offset property.
This adds an 8-byte entry to the start or end of the image, pointing to the
location of the FDT map. The format is a magic number followed by an offset
from the start or end of the image, in twos-compliment format.
This entry must be in the top-level part of the image.
NOTE: If the location is at the start/end, you will probably need to specify
sort-by-offset for the image, unless you actually put the image header
first/last in the entry list.
Entry: intel-cmc: Entry containing an Intel Chipset Micro Code (CMC) file
-------------------------------------------------------------------------
@ -192,6 +401,34 @@ See README.x86 for information about x86 binary blobs.
Entry: intel-ifwi: Entry containing an Intel Integrated Firmware Image (IFWI) file
----------------------------------------------------------------------------------
Properties / Entry arguments:
- filename: Filename of file to read into entry. This is either the
IFWI file itself, or a file that can be converted into one using a
tool
- convert-fit: If present this indicates that the ifwitool should be
used to convert the provided file into a IFWI.
This file contains code and data used by the SoC that is required to make
it work. It includes U-Boot TPL, microcode, things related to the CSE
(Converged Security Engine, the microcontroller that loads all the firmware)
and other items beyond the wit of man.
A typical filename is 'ifwi.bin' for an IFWI file, or 'fitimage.bin' for a
file that will be converted to an IFWI.
The position of this entry is generally set by the intel-descriptor entry.
The contents of the IFWI are specified by the subnodes of the IFWI node.
Each subnode describes an entry which is placed into the IFWFI with a given
sub-partition (and optional entry name).
See README.x86 for information about x86 binary blobs.
Entry: intel-me: Entry containing an Intel Management Engine (ME) file
----------------------------------------------------------------------
@ -206,6 +443,8 @@ does not directly execute code in the ME binary.
A typical filename is 'me.bin'.
The position of this entry is generally set by the intel-descriptor entry.
See README.x86 for information about x86 binary blobs.
@ -282,16 +521,21 @@ Entry: section: Entry that contains other entries
-------------------------------------------------
Properties / Entry arguments: (see binman README for more information)
- size: Size of section in bytes
- align-size: Align size to a particular power of two
- pad-before: Add padding before the entry
- pad-after: Add padding after the entry
- pad-byte: Pad byte to use when padding
- sort-by-offset: Reorder the entries by offset
- end-at-4gb: Used to build an x86 ROM which ends at 4GB (2^32)
- name-prefix: Adds a prefix to the name of every entry in the section
pad-byte: Pad byte to use when padding
sort-by-offset: True if entries should be sorted by offset, False if
they must be in-order in the device tree description
end-at-4gb: Used to build an x86 ROM which ends at 4GB (2^32)
skip-at-start: Number of bytes before the first entry starts. These
effectively adjust the starting offset of entries. For example,
if this is 16, then the first entry would start at 16. An entry
with offset = 20 would in fact be written at offset 4 in the image
file, since the first 16 bytes are skipped when writing.
name-prefix: Adds a prefix to the name of every entry in the section
when writing out the map
Since a section is also an entry, it inherits all the properies of entries
too.
A section is an entry which can contain other entries, thus allowing
hierarchical images to be created. See 'Sections and hierarchical images'
in the binman README for more information.
@ -310,6 +554,8 @@ Properties / Entry arguments:
that contains the string to place in the entry
<xxx> (actual name is the value of text-label): contains the string to
place in the entry.
<text>: The text to place in the entry (overrides the above mechanism).
This is useful when the text is constant.
Example node:
@ -332,6 +578,13 @@ It is also possible to put the string directly in the node:
message = "a message directly in the node"
};
or just:
text {
size = <8>;
text = "some text directly in the node"
};
The text is not itself nul-terminated. This can be achieved, if required,
by setting the size of the entry to something larger than the text.
@ -485,7 +738,7 @@ Entry: u-boot-spl-elf: U-Boot SPL ELF image
-------------------------------------------
Properties / Entry arguments:
- filename: Filename of SPL u-boot (default 'spl/u-boot')
- filename: Filename of SPL u-boot (default 'spl/u-boot-spl')
This is the U-Boot SPL ELF image. It does not include a device tree but can
be relocated to any address for execution.
@ -563,6 +816,17 @@ process.
Entry: u-boot-tpl-elf: U-Boot TPL ELF image
-------------------------------------------
Properties / Entry arguments:
- filename: Filename of TPL u-boot (default 'tpl/u-boot-tpl')
This is the U-Boot TPL ELF image. It does not include a device tree but can
be relocated to any address for execution.
Entry: u-boot-tpl-with-ucode-ptr: U-Boot TPL with embedded microcode pointer
----------------------------------------------------------------------------

View File

@ -11,23 +11,32 @@
from __future__ import print_function
from distutils.sysconfig import get_python_lib
import glob
import multiprocessing
import os
import site
import sys
import traceback
import unittest
# Bring in the patman and dtoc libraries
# Bring in the patman and dtoc libraries (but don't override the first path
# in PYTHONPATH)
our_path = os.path.dirname(os.path.realpath(__file__))
for dirname in ['../patman', '../dtoc', '..', '../concurrencytest']:
sys.path.insert(0, os.path.join(our_path, dirname))
sys.path.insert(2, os.path.join(our_path, dirname))
# Bring in the libfdt module
sys.path.insert(0, 'scripts/dtc/pylibfdt')
sys.path.insert(0, os.path.join(our_path,
sys.path.insert(2, 'scripts/dtc/pylibfdt')
sys.path.insert(2, os.path.join(our_path,
'../../build-sandbox_spl/scripts/dtc/pylibfdt'))
# When running under python-coverage on Ubuntu 16.04, the dist-packages
# directories are dropped from the python path. Add them in so that we can find
# the elffile module. We could use site.getsitepackages() here but unfortunately
# that is not available in a virtualenv.
sys.path.append(get_python_lib())
import cmdline
import command
use_concurrent = True
@ -38,15 +47,23 @@ except:
import control
import test_util
def RunTests(debug, processes, args):
def RunTests(debug, verbosity, processes, test_preserve_dirs, args, toolpath):
"""Run the functional tests and any embedded doctests
Args:
debug: True to enable debugging, which shows a full stack trace on error
args: List of positional args provided to binman. This can hold a test
name to execute (as in 'binman -t testSections', for example)
verbosity: Verbosity level to use
test_preserve_dirs: True to preserve the input directory used by tests
so that it can be examined afterwards (only useful for debugging
tests). If a single test is selected (in args[0]) it also preserves
the output directory for this test. Both directories are displayed
on the command line.
processes: Number of processes to use to run tests (None=same as #CPUs)
args: List of positional args provided to binman. This can hold a test
name to execute (as in 'binman test testSections', for example)
toolpath: List of paths to use for tools
"""
import cbfs_util_test
import elf_test
import entry_test
import fdt_test
@ -63,8 +80,11 @@ def RunTests(debug, processes, args):
sys.argv = [sys.argv[0]]
if debug:
sys.argv.append('-D')
if debug:
sys.argv.append('-D')
if verbosity:
sys.argv.append('-v%d' % verbosity)
if toolpath:
for path in toolpath:
sys.argv += ['--toolpath', path]
# Run the entry tests first ,since these need to be the first to import the
# 'entry' module.
@ -72,7 +92,14 @@ def RunTests(debug, processes, args):
suite = unittest.TestSuite()
loader = unittest.TestLoader()
for module in (entry_test.TestEntry, ftest.TestFunctional, fdt_test.TestFdt,
elf_test.TestElf, image_test.TestImage):
elf_test.TestElf, image_test.TestImage,
cbfs_util_test.TestCbfs):
# Test the test module about our arguments, if it is interested
if hasattr(module, 'setup_test_args'):
setup_test_args = getattr(module, 'setup_test_args')
setup_test_args(preserve_indir=test_preserve_dirs,
preserve_outdirs=test_preserve_dirs and test_name is not None,
toolpath=toolpath, verbosity=verbosity)
if test_name:
try:
suite.addTests(loader.loadTestsFromName(test_name, module))
@ -104,9 +131,14 @@ def RunTests(debug, processes, args):
print(test.id(), err)
for test, err in result.failures:
print(err, result.failures)
if result.skipped:
print('%d binman test%s SKIPPED:' %
(len(result.skipped), 's' if len(result.skipped) > 1 else ''))
for skip_info in result.skipped:
print('%s: %s' % (skip_info[0], skip_info[1]))
if result.errors or result.failures:
print('binman tests FAILED')
return 1
print('binman tests FAILED')
return 1
return 0
def GetEntryModules(include_testing=True):
@ -127,38 +159,36 @@ def RunTestCoverage():
for item in glob_list if '_testing' not in item])
test_util.RunTestCoverage('tools/binman/binman.py', None,
['*test*', '*binman.py', 'tools/patman/*', 'tools/dtoc/*'],
options.build_dir, all_set)
args.build_dir, all_set)
def RunBinman(options, args):
def RunBinman(args):
"""Main entry point to binman once arguments are parsed
Args:
options: Command-line options
args: Non-option arguments
args: Command line arguments Namespace object
"""
ret_code = 0
# For testing: This enables full exception traces.
#options.debug = True
if not options.debug:
if not args.debug:
sys.tracebacklimit = 0
if options.test:
ret_code = RunTests(options.debug, options.processes, args[1:])
if args.cmd == 'test':
if args.test_coverage:
RunTestCoverage()
else:
ret_code = RunTests(args.debug, args.verbosity, args.processes,
args.test_preserve_dirs, args.tests,
args.toolpath)
elif options.test_coverage:
RunTestCoverage()
elif options.entry_docs:
elif args.cmd == 'entry-docs':
control.WriteEntryDocs(GetEntryModules())
else:
try:
ret_code = control.Binman(options, args)
ret_code = control.Binman(args)
except Exception as e:
print('binman: %s' % e)
if options.debug:
if args.debug:
print()
traceback.print_exc()
ret_code = 1
@ -166,6 +196,7 @@ def RunBinman(options, args):
if __name__ == "__main__":
(options, args) = cmdline.ParseArgs(sys.argv)
ret_code = RunBinman(options, args)
args = cmdline.ParseArgs(sys.argv[1:])
ret_code = RunBinman(args)
sys.exit(ret_code)

View File

@ -1,464 +0,0 @@
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2018 Google, Inc
# Written by Simon Glass <sjg@chromium.org>
#
# Base class for sections (collections of entries)
#
from __future__ import print_function
from collections import OrderedDict
import sys
import fdt_util
import re
import state
import tools
class Section(object):
"""A section which contains multiple entries
A section represents a collection of entries. There must be one or more
sections in an image. Sections are used to group entries together.
Attributes:
_node: Node object that contains the section definition in device tree
_parent_section: Parent Section object which created this Section
_size: Section size in bytes, or None if not known yet
_align_size: Section size alignment, or None
_pad_before: Number of bytes before the first entry starts. This
effectively changes the place where entry offset 0 starts
_pad_after: Number of bytes after the last entry ends. The last
entry will finish on or before this boundary
_pad_byte: Byte to use to pad the section where there is no entry
_sort: True if entries should be sorted by offset, False if they
must be in-order in the device tree description
_skip_at_start: Number of bytes before the first entry starts. These
effectively adjust the starting offset of entries. For example,
if _pad_before is 16, then the first entry would start at 16.
An entry with offset = 20 would in fact be written at offset 4
in the image file.
_end_4gb: Indicates that the section ends at the 4GB boundary. This is
used for x86 images, which want to use offsets such that a memory
address (like 0xff800000) is the first entry offset. This causes
_skip_at_start to be set to the starting memory address.
_name_prefix: Prefix to add to the name of all entries within this
section
_entries: OrderedDict() of entries
"""
def __init__(self, name, parent_section, node, image, test=False):
global entry
global Entry
import entry
from entry import Entry
self._parent_section = parent_section
self._name = name
self._node = node
self._image = image
self._offset = None
self._size = None
self._align_size = None
self._pad_before = 0
self._pad_after = 0
self._pad_byte = 0
self._sort = False
self._skip_at_start = None
self._end_4gb = False
self._name_prefix = ''
self._entries = OrderedDict()
self._image_pos = None
if not test:
self._ReadNode()
self._ReadEntries()
def _ReadNode(self):
"""Read properties from the section node"""
self._offset = fdt_util.GetInt(self._node, 'offset')
self._size = fdt_util.GetInt(self._node, 'size')
self._align_size = fdt_util.GetInt(self._node, 'align-size')
if tools.NotPowerOfTwo(self._align_size):
self._Raise("Alignment size %s must be a power of two" %
self._align_size)
self._pad_before = fdt_util.GetInt(self._node, 'pad-before', 0)
self._pad_after = fdt_util.GetInt(self._node, 'pad-after', 0)
self._pad_byte = fdt_util.GetInt(self._node, 'pad-byte', 0)
self._sort = fdt_util.GetBool(self._node, 'sort-by-offset')
self._end_4gb = fdt_util.GetBool(self._node, 'end-at-4gb')
self._skip_at_start = fdt_util.GetInt(self._node, 'skip-at-start')
if self._end_4gb:
if not self._size:
self._Raise("Section size must be provided when using end-at-4gb")
if self._skip_at_start is not None:
self._Raise("Provide either 'end-at-4gb' or 'skip-at-start'")
else:
self._skip_at_start = 0x100000000 - self._size
else:
if self._skip_at_start is None:
self._skip_at_start = 0
self._name_prefix = fdt_util.GetString(self._node, 'name-prefix')
def _ReadEntries(self):
for node in self._node.subnodes:
if node.name == 'hash':
continue
entry = Entry.Create(self, node)
entry.SetPrefix(self._name_prefix)
self._entries[node.name] = entry
def GetFdtSet(self):
"""Get the set of device tree files used by this image"""
fdt_set = set()
for entry in self._entries.values():
fdt_set.update(entry.GetFdtSet())
return fdt_set
def SetOffset(self, offset):
self._offset = offset
def ExpandEntries(self):
for entry in self._entries.values():
entry.ExpandEntries()
def AddMissingProperties(self):
"""Add new properties to the device tree as needed for this entry"""
for prop in ['offset', 'size', 'image-pos']:
if not prop in self._node.props:
state.AddZeroProp(self._node, prop)
state.CheckAddHashProp(self._node)
for entry in self._entries.values():
entry.AddMissingProperties()
def SetCalculatedProperties(self):
state.SetInt(self._node, 'offset', self._offset or 0)
state.SetInt(self._node, 'size', self._size)
image_pos = self._image_pos
if self._parent_section:
image_pos -= self._parent_section.GetRootSkipAtStart()
state.SetInt(self._node, 'image-pos', image_pos)
for entry in self._entries.values():
entry.SetCalculatedProperties()
def ProcessFdt(self, fdt):
todo = self._entries.values()
for passnum in range(3):
next_todo = []
for entry in todo:
if not entry.ProcessFdt(fdt):
next_todo.append(entry)
todo = next_todo
if not todo:
break
if todo:
self._Raise('Internal error: Could not complete processing of Fdt: '
'remaining %s' % todo)
return True
def CheckSize(self):
"""Check that the section contents does not exceed its size, etc."""
contents_size = 0
for entry in self._entries.values():
contents_size = max(contents_size, entry.offset + entry.size)
contents_size -= self._skip_at_start
size = self._size
if not size:
size = self._pad_before + contents_size + self._pad_after
size = tools.Align(size, self._align_size)
if self._size and contents_size > self._size:
self._Raise("contents size %#x (%d) exceeds section size %#x (%d)" %
(contents_size, contents_size, self._size, self._size))
if not self._size:
self._size = size
if self._size != tools.Align(self._size, self._align_size):
self._Raise("Size %#x (%d) does not match align-size %#x (%d)" %
(self._size, self._size, self._align_size, self._align_size))
return size
def _Raise(self, msg):
"""Raises an error for this section
Args:
msg: Error message to use in the raise string
Raises:
ValueError()
"""
raise ValueError("Section '%s': %s" % (self._node.path, msg))
def GetPath(self):
"""Get the path of an image (in the FDT)
Returns:
Full path of the node for this image
"""
return self._node.path
def FindEntryType(self, etype):
"""Find an entry type in the section
Args:
etype: Entry type to find
Returns:
entry matching that type, or None if not found
"""
for entry in self._entries.values():
if entry.etype == etype:
return entry
return None
def GetEntryContents(self):
"""Call ObtainContents() for each entry
This calls each entry's ObtainContents() a few times until they all
return True. We stop calling an entry's function once it returns
True. This allows the contents of one entry to depend on another.
After 3 rounds we give up since it's likely an error.
"""
todo = self._entries.values()
for passnum in range(3):
next_todo = []
for entry in todo:
if not entry.ObtainContents():
next_todo.append(entry)
todo = next_todo
if not todo:
break
if todo:
self._Raise('Internal error: Could not complete processing of '
'contents: remaining %s' % todo)
return True
def _SetEntryOffsetSize(self, name, offset, size):
"""Set the offset and size of an entry
Args:
name: Entry name to update
offset: New offset
size: New size
"""
entry = self._entries.get(name)
if not entry:
self._Raise("Unable to set offset/size for unknown entry '%s'" %
name)
entry.SetOffsetSize(self._skip_at_start + offset, size)
def GetEntryOffsets(self):
"""Handle entries that want to set the offset/size of other entries
This calls each entry's GetOffsets() method. If it returns a list
of entries to update, it updates them.
"""
for entry in self._entries.values():
offset_dict = entry.GetOffsets()
for name, info in offset_dict.items():
self._SetEntryOffsetSize(name, *info)
def PackEntries(self):
"""Pack all entries into the section"""
offset = self._skip_at_start
for entry in self._entries.values():
offset = entry.Pack(offset)
self._size = self.CheckSize()
def _SortEntries(self):
"""Sort entries by offset"""
entries = sorted(self._entries.values(), key=lambda entry: entry.offset)
self._entries.clear()
for entry in entries:
self._entries[entry._node.name] = entry
def _ExpandEntries(self):
"""Expand any entries that are permitted to"""
exp_entry = None
for entry in self._entries.values():
if exp_entry:
exp_entry.ExpandToLimit(entry.offset)
exp_entry = None
if entry.expand_size:
exp_entry = entry
if exp_entry:
exp_entry.ExpandToLimit(self._size)
def CheckEntries(self):
"""Check that entries do not overlap or extend outside the section
This also sorts entries, if needed and expands
"""
if self._sort:
self._SortEntries()
self._ExpandEntries()
offset = 0
prev_name = 'None'
for entry in self._entries.values():
entry.CheckOffset()
if (entry.offset < self._skip_at_start or
entry.offset + entry.size > self._skip_at_start + self._size):
entry.Raise("Offset %#x (%d) is outside the section starting "
"at %#x (%d)" %
(entry.offset, entry.offset, self._skip_at_start,
self._skip_at_start))
if entry.offset < offset:
entry.Raise("Offset %#x (%d) overlaps with previous entry '%s' "
"ending at %#x (%d)" %
(entry.offset, entry.offset, prev_name, offset, offset))
offset = entry.offset + entry.size
prev_name = entry.GetPath()
def SetImagePos(self, image_pos):
self._image_pos = image_pos
for entry in self._entries.values():
entry.SetImagePos(image_pos)
def ProcessEntryContents(self):
"""Call the ProcessContents() method for each entry
This is intended to adjust the contents as needed by the entry type.
"""
for entry in self._entries.values():
entry.ProcessContents()
def WriteSymbols(self):
"""Write symbol values into binary files for access at run time"""
for entry in self._entries.values():
entry.WriteSymbols(self)
def BuildSection(self, fd, base_offset):
"""Write the section to a file"""
fd.seek(base_offset)
fd.write(self.GetData())
def GetData(self):
"""Get the contents of the section"""
section_data = tools.GetBytes(self._pad_byte, self._size)
for entry in self._entries.values():
data = entry.GetData()
base = self._pad_before + entry.offset - self._skip_at_start
section_data = (section_data[:base] + data +
section_data[base + len(data):])
return section_data
def LookupSymbol(self, sym_name, optional, msg):
"""Look up a symbol in an ELF file
Looks up a symbol in an ELF file. Only entry types which come from an
ELF image can be used by this function.
At present the only entry property supported is offset.
Args:
sym_name: Symbol name in the ELF file to look up in the format
_binman_<entry>_prop_<property> where <entry> is the name of
the entry and <property> is the property to find (e.g.
_binman_u_boot_prop_offset). As a special case, you can append
_any to <entry> to have it search for any matching entry. E.g.
_binman_u_boot_any_prop_offset will match entries called u-boot,
u-boot-img and u-boot-nodtb)
optional: True if the symbol is optional. If False this function
will raise if the symbol is not found
msg: Message to display if an error occurs
Returns:
Value that should be assigned to that symbol, or None if it was
optional and not found
Raises:
ValueError if the symbol is invalid or not found, or references a
property which is not supported
"""
m = re.match(r'^_binman_(\w+)_prop_(\w+)$', sym_name)
if not m:
raise ValueError("%s: Symbol '%s' has invalid format" %
(msg, sym_name))
entry_name, prop_name = m.groups()
entry_name = entry_name.replace('_', '-')
entry = self._entries.get(entry_name)
if not entry:
if entry_name.endswith('-any'):
root = entry_name[:-4]
for name in self._entries:
if name.startswith(root):
rest = name[len(root):]
if rest in ['', '-img', '-nodtb']:
entry = self._entries[name]
if not entry:
err = ("%s: Entry '%s' not found in list (%s)" %
(msg, entry_name, ','.join(self._entries.keys())))
if optional:
print('Warning: %s' % err, file=sys.stderr)
return None
raise ValueError(err)
if prop_name == 'offset':
return entry.offset
elif prop_name == 'image_pos':
return entry.image_pos
else:
raise ValueError("%s: No such property '%s'" % (msg, prop_name))
def GetEntries(self):
"""Get the number of entries in a section
Returns:
Number of entries in a section
"""
return self._entries
def GetSize(self):
"""Get the size of a section in bytes
This is only meaningful if the section has a pre-defined size, or the
entries within it have been packed, so that the size has been
calculated.
Returns:
Entry size in bytes
"""
return self._size
def WriteMap(self, fd, indent):
"""Write a map of the section to a .map file
Args:
fd: File to write the map to
"""
Entry.WriteMapLine(fd, indent, self._name, self._offset or 0,
self._size, self._image_pos)
for entry in self._entries.values():
entry.WriteMap(fd, indent + 1)
def GetContentsByPhandle(self, phandle, source_entry):
"""Get the data contents of an entry specified by a phandle
This uses a phandle to look up a node and and find the entry
associated with it. Then it returnst he contents of that entry.
Args:
phandle: Phandle to look up (integer)
source_entry: Entry containing that phandle (used for error
reporting)
Returns:
data from associated entry (as a string), or None if not found
"""
node = self._node.GetFdt().LookupPhandle(phandle)
if not node:
source_entry.Raise("Cannot find node for phandle %d" % phandle)
for entry in self._entries.values():
if entry._node == node:
return entry.GetData()
source_entry.Raise("Cannot find entry for node '%s'" % node.name)
def ExpandSize(self, size):
if size != self._size:
self._size = size
def GetRootSkipAtStart(self):
if self._parent_section:
return self._parent_section.GetRootSkipAtStart()
return self._skip_at_start
def GetImageSize(self):
return self._image._size

887
tools/binman/cbfs_util.py Normal file
View File

@ -0,0 +1,887 @@
# SPDX-License-Identifier: GPL-2.0+
# Copyright 2019 Google LLC
# Written by Simon Glass <sjg@chromium.org>
"""Support for coreboot's CBFS format
CBFS supports a header followed by a number of files, generally targeted at SPI
flash.
The format is somewhat defined by documentation in the coreboot tree although
it is necessary to rely on the C structures and source code (mostly cbfstool)
to fully understand it.
Currently supported: raw and stage types with compression, padding empty areas
with empty files, fixed-offset files
"""
from __future__ import print_function
from collections import OrderedDict
import io
import struct
import sys
import command
import elf
import tools
# Set to True to enable printing output while working
DEBUG = False
# Set to True to enable output from running cbfstool for debugging
VERBOSE = False
# The master header, at the start of the CBFS
HEADER_FORMAT = '>IIIIIIII'
HEADER_LEN = 0x20
HEADER_MAGIC = 0x4f524243
HEADER_VERSION1 = 0x31313131
HEADER_VERSION2 = 0x31313132
# The file header, at the start of each file in the CBFS
FILE_HEADER_FORMAT = b'>8sIIII'
FILE_HEADER_LEN = 0x18
FILE_MAGIC = b'LARCHIVE'
FILENAME_ALIGN = 16 # Filename lengths are aligned to this
# A stage header containing information about 'stage' files
# Yes this is correct: this header is in litte-endian format
STAGE_FORMAT = '<IQQII'
STAGE_LEN = 0x1c
# An attribute describring the compression used in a file
ATTR_COMPRESSION_FORMAT = '>IIII'
ATTR_COMPRESSION_LEN = 0x10
# Attribute tags
# Depending on how the header was initialised, it may be backed with 0x00 or
# 0xff. Support both.
FILE_ATTR_TAG_UNUSED = 0
FILE_ATTR_TAG_UNUSED2 = 0xffffffff
FILE_ATTR_TAG_COMPRESSION = 0x42435a4c
FILE_ATTR_TAG_HASH = 0x68736148
FILE_ATTR_TAG_POSITION = 0x42435350 # PSCB
FILE_ATTR_TAG_ALIGNMENT = 0x42434c41 # ALCB
FILE_ATTR_TAG_PADDING = 0x47444150 # PDNG
# This is 'the size of bootblock reserved in firmware image (cbfs.txt)'
# Not much more info is available, but we set it to 4, due to this comment in
# cbfstool.c:
# This causes 4 bytes to be left out at the end of the image, for two reasons:
# 1. The cbfs master header pointer resides there
# 2. Ssme cbfs implementations assume that an image that resides below 4GB has
# a bootblock and get confused when the end of the image is at 4GB == 0.
MIN_BOOTBLOCK_SIZE = 4
# Files start aligned to this boundary in the CBFS
ENTRY_ALIGN = 0x40
# CBFSs must declare an architecture since much of the logic is designed with
# x86 in mind. The effect of setting this value is not well documented, but in
# general x86 is used and this makes use of a boot block and an image that ends
# at the end of 32-bit address space.
ARCHITECTURE_UNKNOWN = 0xffffffff
ARCHITECTURE_X86 = 0x00000001
ARCHITECTURE_ARM = 0x00000010
ARCHITECTURE_AARCH64 = 0x0000aa64
ARCHITECTURE_MIPS = 0x00000100
ARCHITECTURE_RISCV = 0xc001d0de
ARCHITECTURE_PPC64 = 0x407570ff
ARCH_NAMES = {
ARCHITECTURE_UNKNOWN : 'unknown',
ARCHITECTURE_X86 : 'x86',
ARCHITECTURE_ARM : 'arm',
ARCHITECTURE_AARCH64 : 'arm64',
ARCHITECTURE_MIPS : 'mips',
ARCHITECTURE_RISCV : 'riscv',
ARCHITECTURE_PPC64 : 'ppc64',
}
# File types. Only supported ones are included here
TYPE_CBFSHEADER = 0x02 # Master header, HEADER_FORMAT
TYPE_STAGE = 0x10 # Stage, holding an executable, see STAGE_FORMAT
TYPE_RAW = 0x50 # Raw file, possibly compressed
TYPE_EMPTY = 0xffffffff # Empty data
# Compression types
COMPRESS_NONE, COMPRESS_LZMA, COMPRESS_LZ4 = range(3)
COMPRESS_NAMES = {
COMPRESS_NONE : 'none',
COMPRESS_LZMA : 'lzma',
COMPRESS_LZ4 : 'lz4',
}
def find_arch(find_name):
"""Look up an architecture name
Args:
find_name: Architecture name to find
Returns:
ARCHITECTURE_... value or None if not found
"""
for arch, name in ARCH_NAMES.items():
if name == find_name:
return arch
return None
def find_compress(find_name):
"""Look up a compression algorithm name
Args:
find_name: Compression algorithm name to find
Returns:
COMPRESS_... value or None if not found
"""
for compress, name in COMPRESS_NAMES.items():
if name == find_name:
return compress
return None
def compress_name(compress):
"""Look up the name of a compression algorithm
Args:
compress: Compression algorithm number to find (COMPRESS_...)
Returns:
Compression algorithm name (string)
Raises:
KeyError if the algorithm number is invalid
"""
return COMPRESS_NAMES[compress]
def align_int(val, align):
"""Align a value up to the given alignment
Args:
val: Integer value to align
align: Integer alignment value (e.g. 4 to align to 4-byte boundary)
Returns:
integer value aligned to the required boundary, rounding up if necessary
"""
return int((val + align - 1) / align) * align
def align_int_down(val, align):
"""Align a value down to the given alignment
Args:
val: Integer value to align
align: Integer alignment value (e.g. 4 to align to 4-byte boundary)
Returns:
integer value aligned to the required boundary, rounding down if
necessary
"""
return int(val / align) * align
def _pack_string(instr):
"""Pack a string to the required aligned size by adding padding
Args:
instr: String to process
Returns:
String with required padding (at least one 0x00 byte) at the end
"""
val = tools.ToBytes(instr)
pad_len = align_int(len(val) + 1, FILENAME_ALIGN)
return val + tools.GetBytes(0, pad_len - len(val))
class CbfsFile(object):
"""Class to represent a single CBFS file
This is used to hold the information about a file, including its contents.
Use the get_data_and_offset() method to obtain the raw output for writing to
CBFS.
Properties:
name: Name of file
offset: Offset of file data from start of file header
cbfs_offset: Offset of file data in bytes from start of CBFS, or None to
place this file anyway
data: Contents of file, uncompressed
data_len: Length of (possibly compressed) data in bytes
ftype: File type (TYPE_...)
compression: Compression type (COMPRESS_...)
memlen: Length of data in memory, i.e. the uncompressed length, None if
no compression algortihm is selected
load: Load address in memory if known, else None
entry: Entry address in memory if known, else None. This is where
execution starts after the file is loaded
base_address: Base address to use for 'stage' files
erase_byte: Erase byte to use for padding between the file header and
contents (used for empty files)
size: Size of the file in bytes (used for empty files)
"""
def __init__(self, name, ftype, data, cbfs_offset, compress=COMPRESS_NONE):
self.name = name
self.offset = None
self.cbfs_offset = cbfs_offset
self.data = data
self.ftype = ftype
self.compress = compress
self.memlen = None
self.load = None
self.entry = None
self.base_address = None
self.data_len = len(data)
self.erase_byte = None
self.size = None
def decompress(self):
"""Handle decompressing data if necessary"""
indata = self.data
if self.compress == COMPRESS_LZ4:
data = tools.Decompress(indata, 'lz4')
elif self.compress == COMPRESS_LZMA:
data = tools.Decompress(indata, 'lzma')
else:
data = indata
self.memlen = len(data)
self.data = data
self.data_len = len(indata)
@classmethod
def stage(cls, base_address, name, data, cbfs_offset):
"""Create a new stage file
Args:
base_address: Int base address for memory-mapping of ELF file
name: String file name to put in CBFS (does not need to correspond
to the name that the file originally came from)
data: Contents of file
cbfs_offset: Offset of file data in bytes from start of CBFS, or
None to place this file anyway
Returns:
CbfsFile object containing the file information
"""
cfile = CbfsFile(name, TYPE_STAGE, data, cbfs_offset)
cfile.base_address = base_address
return cfile
@classmethod
def raw(cls, name, data, cbfs_offset, compress):
"""Create a new raw file
Args:
name: String file name to put in CBFS (does not need to correspond
to the name that the file originally came from)
data: Contents of file
cbfs_offset: Offset of file data in bytes from start of CBFS, or
None to place this file anyway
compress: Compression algorithm to use (COMPRESS_...)
Returns:
CbfsFile object containing the file information
"""
return CbfsFile(name, TYPE_RAW, data, cbfs_offset, compress)
@classmethod
def empty(cls, space_to_use, erase_byte):
"""Create a new empty file of a given size
Args:
space_to_use:: Size of available space, which must be at least as
large as the alignment size for this CBFS
erase_byte: Byte to use for contents of file (repeated through the
whole file)
Returns:
CbfsFile object containing the file information
"""
cfile = CbfsFile('', TYPE_EMPTY, b'', None)
cfile.size = space_to_use - FILE_HEADER_LEN - FILENAME_ALIGN
cfile.erase_byte = erase_byte
return cfile
def calc_start_offset(self):
"""Check if this file needs to start at a particular offset in CBFS
Returns:
None if the file can be placed anywhere, or
the largest offset where the file could start (integer)
"""
if self.cbfs_offset is None:
return None
return self.cbfs_offset - self.get_header_len()
def get_header_len(self):
"""Get the length of headers required for a file
This is the minimum length required before the actual data for this file
could start. It might start later if there is padding.
Returns:
Total length of all non-data fields, in bytes
"""
name = _pack_string(self.name)
hdr_len = len(name) + FILE_HEADER_LEN
if self.ftype == TYPE_STAGE:
pass
elif self.ftype == TYPE_RAW:
hdr_len += ATTR_COMPRESSION_LEN
elif self.ftype == TYPE_EMPTY:
pass
else:
raise ValueError('Unknown file type %#x\n' % self.ftype)
return hdr_len
def get_data_and_offset(self, offset=None, pad_byte=None):
"""Obtain the contents of the file, in CBFS format and the offset of
the data within the file
Returns:
tuple:
bytes representing the contents of this file, packed and aligned
for directly inserting into the final CBFS output
offset to the file data from the start of the returned data.
"""
name = _pack_string(self.name)
hdr_len = len(name) + FILE_HEADER_LEN
attr_pos = 0
content = b''
attr = b''
pad = b''
data = self.data
if self.ftype == TYPE_STAGE:
elf_data = elf.DecodeElf(data, self.base_address)
content = struct.pack(STAGE_FORMAT, self.compress,
elf_data.entry, elf_data.load,
len(elf_data.data), elf_data.memsize)
data = elf_data.data
elif self.ftype == TYPE_RAW:
orig_data = data
if self.compress == COMPRESS_LZ4:
data = tools.Compress(orig_data, 'lz4')
elif self.compress == COMPRESS_LZMA:
data = tools.Compress(orig_data, 'lzma')
self.memlen = len(orig_data)
self.data_len = len(data)
attr = struct.pack(ATTR_COMPRESSION_FORMAT,
FILE_ATTR_TAG_COMPRESSION, ATTR_COMPRESSION_LEN,
self.compress, self.memlen)
elif self.ftype == TYPE_EMPTY:
data = tools.GetBytes(self.erase_byte, self.size)
else:
raise ValueError('Unknown type %#x when writing\n' % self.ftype)
if attr:
attr_pos = hdr_len
hdr_len += len(attr)
if self.cbfs_offset is not None:
pad_len = self.cbfs_offset - offset - hdr_len
if pad_len < 0: # pragma: no cover
# Test coverage of this is not available since this should never
# happen. It indicates that get_header_len() provided an
# incorrect value (too small) so that we decided that we could
# put this file at the requested place, but in fact a previous
# file extends far enough into the CBFS that this is not
# possible.
raise ValueError("Internal error: CBFS file '%s': Requested offset %#x but current output position is %#x" %
(self.name, self.cbfs_offset, offset))
pad = tools.GetBytes(pad_byte, pad_len)
hdr_len += pad_len
# This is the offset of the start of the file's data,
size = len(content) + len(data)
hdr = struct.pack(FILE_HEADER_FORMAT, FILE_MAGIC, size,
self.ftype, attr_pos, hdr_len)
# Do a sanity check of the get_header_len() function, to ensure that it
# stays in lockstep with this function
expected_len = self.get_header_len()
actual_len = len(hdr + name + attr)
if expected_len != actual_len: # pragma: no cover
# Test coverage of this is not available since this should never
# happen. It probably indicates that get_header_len() is broken.
raise ValueError("Internal error: CBFS file '%s': Expected headers of %#x bytes, got %#d" %
(self.name, expected_len, actual_len))
return hdr + name + attr + pad + content + data, hdr_len
class CbfsWriter(object):
"""Class to handle writing a Coreboot File System (CBFS)
Usage is something like:
cbw = CbfsWriter(size)
cbw.add_file_raw('u-boot', tools.ReadFile('u-boot.bin'))
...
data, cbfs_offset = cbw.get_data_and_offset()
Attributes:
_master_name: Name of the file containing the master header
_size: Size of the filesystem, in bytes
_files: Ordered list of files in the CBFS, each a CbfsFile
_arch: Architecture of the CBFS (ARCHITECTURE_...)
_bootblock_size: Size of the bootblock, typically at the end of the CBFS
_erase_byte: Byte to use for empty space in the CBFS
_align: Alignment to use for files, typically ENTRY_ALIGN
_base_address: Boot block offset in bytes from the start of CBFS.
Typically this is located at top of the CBFS. It is 0 when there is
no boot block
_header_offset: Offset of master header in bytes from start of CBFS
_contents_offset: Offset of first file header
_hdr_at_start: True if the master header is at the start of the CBFS,
instead of the end as normal for x86
_add_fileheader: True to add a fileheader around the master header
"""
def __init__(self, size, arch=ARCHITECTURE_X86):
"""Set up a new CBFS
This sets up all properties to default values. Files can be added using
add_file_raw(), etc.
Args:
size: Size of CBFS in bytes
arch: Architecture to declare for CBFS
"""
self._master_name = 'cbfs master header'
self._size = size
self._files = OrderedDict()
self._arch = arch
self._bootblock_size = 0
self._erase_byte = 0xff
self._align = ENTRY_ALIGN
self._add_fileheader = False
if self._arch == ARCHITECTURE_X86:
# Allow 4 bytes for the header pointer. That holds the
# twos-compliment negative offset of the master header in bytes
# measured from one byte past the end of the CBFS
self._base_address = self._size - max(self._bootblock_size,
MIN_BOOTBLOCK_SIZE)
self._header_offset = self._base_address - HEADER_LEN
self._contents_offset = 0
self._hdr_at_start = False
else:
# For non-x86, different rules apply
self._base_address = 0
self._header_offset = align_int(self._base_address +
self._bootblock_size, 4)
self._contents_offset = align_int(self._header_offset +
FILE_HEADER_LEN +
self._bootblock_size, self._align)
self._hdr_at_start = True
def _skip_to(self, fd, offset):
"""Write out pad bytes until a given offset
Args:
fd: File objext to write to
offset: Offset to write to
"""
if fd.tell() > offset:
raise ValueError('No space for data before offset %#x (current offset %#x)' %
(offset, fd.tell()))
fd.write(tools.GetBytes(self._erase_byte, offset - fd.tell()))
def _pad_to(self, fd, offset):
"""Write out pad bytes and/or an empty file until a given offset
Args:
fd: File objext to write to
offset: Offset to write to
"""
self._align_to(fd, self._align)
upto = fd.tell()
if upto > offset:
raise ValueError('No space for data before pad offset %#x (current offset %#x)' %
(offset, upto))
todo = align_int_down(offset - upto, self._align)
if todo:
cbf = CbfsFile.empty(todo, self._erase_byte)
fd.write(cbf.get_data_and_offset()[0])
self._skip_to(fd, offset)
def _align_to(self, fd, align):
"""Write out pad bytes until a given alignment is reached
This only aligns if the resulting output would not reach the end of the
CBFS, since we want to leave the last 4 bytes for the master-header
pointer.
Args:
fd: File objext to write to
align: Alignment to require (e.g. 4 means pad to next 4-byte
boundary)
"""
offset = align_int(fd.tell(), align)
if offset < self._size:
self._skip_to(fd, offset)
def add_file_stage(self, name, data, cbfs_offset=None):
"""Add a new stage file to the CBFS
Args:
name: String file name to put in CBFS (does not need to correspond
to the name that the file originally came from)
data: Contents of file
cbfs_offset: Offset of this file's data within the CBFS, in bytes,
or None to place this file anywhere
Returns:
CbfsFile object created
"""
cfile = CbfsFile.stage(self._base_address, name, data, cbfs_offset)
self._files[name] = cfile
return cfile
def add_file_raw(self, name, data, cbfs_offset=None,
compress=COMPRESS_NONE):
"""Create a new raw file
Args:
name: String file name to put in CBFS (does not need to correspond
to the name that the file originally came from)
data: Contents of file
cbfs_offset: Offset of this file's data within the CBFS, in bytes,
or None to place this file anywhere
compress: Compression algorithm to use (COMPRESS_...)
Returns:
CbfsFile object created
"""
cfile = CbfsFile.raw(name, data, cbfs_offset, compress)
self._files[name] = cfile
return cfile
def _write_header(self, fd, add_fileheader):
"""Write out the master header to a CBFS
Args:
fd: File object
add_fileheader: True to place the master header in a file header
record
"""
if fd.tell() > self._header_offset:
raise ValueError('No space for header at offset %#x (current offset %#x)' %
(self._header_offset, fd.tell()))
if not add_fileheader:
self._pad_to(fd, self._header_offset)
hdr = struct.pack(HEADER_FORMAT, HEADER_MAGIC, HEADER_VERSION2,
self._size, self._bootblock_size, self._align,
self._contents_offset, self._arch, 0xffffffff)
if add_fileheader:
name = _pack_string(self._master_name)
fd.write(struct.pack(FILE_HEADER_FORMAT, FILE_MAGIC, len(hdr),
TYPE_CBFSHEADER, 0,
FILE_HEADER_LEN + len(name)))
fd.write(name)
self._header_offset = fd.tell()
fd.write(hdr)
self._align_to(fd, self._align)
else:
fd.write(hdr)
def get_data(self):
"""Obtain the full contents of the CBFS
Thhis builds the CBFS with headers and all required files.
Returns:
'bytes' type containing the data
"""
fd = io.BytesIO()
# THe header can go at the start in some cases
if self._hdr_at_start:
self._write_header(fd, add_fileheader=self._add_fileheader)
self._skip_to(fd, self._contents_offset)
# Write out each file
for cbf in self._files.values():
# Place the file at its requested place, if any
offset = cbf.calc_start_offset()
if offset is not None:
self._pad_to(fd, align_int_down(offset, self._align))
pos = fd.tell()
data, data_offset = cbf.get_data_and_offset(pos, self._erase_byte)
fd.write(data)
self._align_to(fd, self._align)
cbf.calced_cbfs_offset = pos + data_offset
if not self._hdr_at_start:
self._write_header(fd, add_fileheader=self._add_fileheader)
# Pad to the end and write a pointer to the CBFS master header
self._pad_to(fd, self._base_address or self._size - 4)
rel_offset = self._header_offset - self._size
fd.write(struct.pack('<I', rel_offset & 0xffffffff))
return fd.getvalue()
class CbfsReader(object):
"""Class to handle reading a Coreboot File System (CBFS)
Usage is something like:
cbfs = cbfs_util.CbfsReader(data)
cfile = cbfs.files['u-boot']
self.WriteFile('u-boot.bin', cfile.data)
Attributes:
files: Ordered list of CbfsFile objects
align: Alignment to use for files, typically ENTRT_ALIGN
stage_base_address: Base address to use when mapping ELF files into the
CBFS for TYPE_STAGE files. If this is larger than the code address
of the ELF file, then data at the start of the ELF file will not
appear in the CBFS. Currently there are no tests for behaviour as
documentation is sparse
magic: Integer magic number from master header (HEADER_MAGIC)
version: Version number of CBFS (HEADER_VERSION2)
rom_size: Size of CBFS
boot_block_size: Size of boot block
cbfs_offset: Offset of the first file in bytes from start of CBFS
arch: Architecture of CBFS file (ARCHITECTURE_...)
"""
def __init__(self, data, read=True):
self.align = ENTRY_ALIGN
self.arch = None
self.boot_block_size = None
self.cbfs_offset = None
self.files = OrderedDict()
self.magic = None
self.rom_size = None
self.stage_base_address = 0
self.version = None
self.data = data
if read:
self.read()
def read(self):
"""Read all the files in the CBFS and add them to self.files"""
with io.BytesIO(self.data) as fd:
# First, get the master header
if not self._find_and_read_header(fd, len(self.data)):
raise ValueError('Cannot find master header')
fd.seek(self.cbfs_offset)
# Now read in the files one at a time
while True:
cfile = self._read_next_file(fd)
if cfile:
self.files[cfile.name] = cfile
elif cfile is False:
break
def _find_and_read_header(self, fd, size):
"""Find and read the master header in the CBFS
This looks at the pointer word at the very end of the CBFS. This is an
offset to the header relative to the size of the CBFS, which is assumed
to be known. Note that the offset is in *little endian* format.
Args:
fd: File to read from
size: Size of file
Returns:
True if header was found, False if not
"""
orig_pos = fd.tell()
fd.seek(size - 4)
rel_offset, = struct.unpack('<I', fd.read(4))
pos = (size + rel_offset) & 0xffffffff
fd.seek(pos)
found = self._read_header(fd)
if not found:
print('Relative offset seems wrong, scanning whole image')
for pos in range(0, size - HEADER_LEN, 4):
fd.seek(pos)
found = self._read_header(fd)
if found:
break
fd.seek(orig_pos)
return found
def _read_next_file(self, fd):
"""Read the next file from a CBFS
Args:
fd: File to read from
Returns:
CbfsFile object, if found
None if no object found, but data was parsed (e.g. TYPE_CBFSHEADER)
False if at end of CBFS and reading should stop
"""
file_pos = fd.tell()
data = fd.read(FILE_HEADER_LEN)
if len(data) < FILE_HEADER_LEN:
print('File header at %x ran out of data' % file_pos)
return False
magic, size, ftype, attr, offset = struct.unpack(FILE_HEADER_FORMAT,
data)
if magic != FILE_MAGIC:
return False
pos = fd.tell()
name = self._read_string(fd)
if name is None:
print('String at %x ran out of data' % pos)
return False
if DEBUG:
print('name', name)
# If there are attribute headers present, read those
compress = self._read_attr(fd, file_pos, attr, offset)
if compress is None:
return False
# Create the correct CbfsFile object depending on the type
cfile = None
cbfs_offset = file_pos + offset
fd.seek(cbfs_offset, io.SEEK_SET)
if ftype == TYPE_CBFSHEADER:
self._read_header(fd)
elif ftype == TYPE_STAGE:
data = fd.read(STAGE_LEN)
cfile = CbfsFile.stage(self.stage_base_address, name, b'',
cbfs_offset)
(cfile.compress, cfile.entry, cfile.load, cfile.data_len,
cfile.memlen) = struct.unpack(STAGE_FORMAT, data)
cfile.data = fd.read(cfile.data_len)
elif ftype == TYPE_RAW:
data = fd.read(size)
cfile = CbfsFile.raw(name, data, cbfs_offset, compress)
cfile.decompress()
if DEBUG:
print('data', data)
elif ftype == TYPE_EMPTY:
# Just read the data and discard it, since it is only padding
fd.read(size)
cfile = CbfsFile('', TYPE_EMPTY, b'', cbfs_offset)
else:
raise ValueError('Unknown type %#x when reading\n' % ftype)
if cfile:
cfile.offset = offset
# Move past the padding to the start of a possible next file. If we are
# already at an alignment boundary, then there is no padding.
pad = (self.align - fd.tell() % self.align) % self.align
fd.seek(pad, io.SEEK_CUR)
return cfile
@classmethod
def _read_attr(cls, fd, file_pos, attr, offset):
"""Read attributes from the file
CBFS files can have attributes which are things that cannot fit into the
header. The only attributes currently supported are compression and the
unused tag.
Args:
fd: File to read from
file_pos: Position of file in fd
attr: Offset of attributes, 0 if none
offset: Offset of file data (used to indicate the end of the
attributes)
Returns:
Compression to use for the file (COMPRESS_...)
"""
compress = COMPRESS_NONE
if not attr:
return compress
attr_size = offset - attr
fd.seek(file_pos + attr, io.SEEK_SET)
while attr_size:
pos = fd.tell()
hdr = fd.read(8)
if len(hdr) < 8:
print('Attribute tag at %x ran out of data' % pos)
return None
atag, alen = struct.unpack(">II", hdr)
data = hdr + fd.read(alen - 8)
if atag == FILE_ATTR_TAG_COMPRESSION:
# We don't currently use this information
atag, alen, compress, _decomp_size = struct.unpack(
ATTR_COMPRESSION_FORMAT, data)
elif atag == FILE_ATTR_TAG_UNUSED2:
break
else:
print('Unknown attribute tag %x' % atag)
attr_size -= len(data)
return compress
def _read_header(self, fd):
"""Read the master header
Reads the header and stores the information obtained into the member
variables.
Args:
fd: File to read from
Returns:
True if header was read OK, False if it is truncated or has the
wrong magic or version
"""
pos = fd.tell()
data = fd.read(HEADER_LEN)
if len(data) < HEADER_LEN:
print('Header at %x ran out of data' % pos)
return False
(self.magic, self.version, self.rom_size, self.boot_block_size,
self.align, self.cbfs_offset, self.arch, _) = struct.unpack(
HEADER_FORMAT, data)
return self.magic == HEADER_MAGIC and (
self.version == HEADER_VERSION1 or
self.version == HEADER_VERSION2)
@classmethod
def _read_string(cls, fd):
"""Read a string from a file
This reads a string and aligns the data to the next alignment boundary
Args:
fd: File to read from
Returns:
string read ('str' type) encoded to UTF-8, or None if we ran out of
data
"""
val = b''
while True:
data = fd.read(FILENAME_ALIGN)
if len(data) < FILENAME_ALIGN:
return None
pos = data.find(b'\0')
if pos == -1:
val += data
else:
val += data[:pos]
break
return val.decode('utf-8')
def cbfstool(fname, *cbfs_args, **kwargs):
"""Run cbfstool with provided arguments
If the tool fails then this function raises an exception and prints out the
output and stderr.
Args:
fname: Filename of CBFS
*cbfs_args: List of arguments to pass to cbfstool
Returns:
CommandResult object containing the results
"""
args = ['cbfstool', fname] + list(cbfs_args)
if kwargs.get('base') is not None:
args += ['-b', '%#x' % kwargs['base']]
result = command.RunPipe([args], capture=not VERBOSE,
capture_stderr=not VERBOSE, raise_on_error=False)
if result.return_code:
print(result.stderr, file=sys.stderr)
raise Exception("Failed to run (error %d): '%s'" %
(result.return_code, ' '.join(args)))

625
tools/binman/cbfs_util_test.py Executable file
View File

@ -0,0 +1,625 @@
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0+
# Copyright 2019 Google LLC
# Written by Simon Glass <sjg@chromium.org>
"""Tests for cbfs_util
These create and read various CBFSs and compare the results with expected
values and with cbfstool
"""
from __future__ import print_function
import io
import os
import shutil
import struct
import tempfile
import unittest
import cbfs_util
from cbfs_util import CbfsWriter
import elf
import test_util
import tools
U_BOOT_DATA = b'1234'
U_BOOT_DTB_DATA = b'udtb'
COMPRESS_DATA = b'compress xxxxxxxxxxxxxxxxxxxxxx data'
class TestCbfs(unittest.TestCase):
"""Test of cbfs_util classes"""
#pylint: disable=W0212
@classmethod
def setUpClass(cls):
# Create a temporary directory for test files
cls._indir = tempfile.mkdtemp(prefix='cbfs_util.')
tools.SetInputDirs([cls._indir])
# Set up some useful data files
TestCbfs._make_input_file('u-boot.bin', U_BOOT_DATA)
TestCbfs._make_input_file('u-boot.dtb', U_BOOT_DTB_DATA)
TestCbfs._make_input_file('compress', COMPRESS_DATA)
# Set up a temporary output directory, used by the tools library when
# compressing files
tools.PrepareOutputDir(None)
cls.have_cbfstool = True
try:
tools.Run('which', 'cbfstool')
except:
cls.have_cbfstool = False
cls.have_lz4 = True
try:
tools.Run('lz4', '--no-frame-crc', '-c',
tools.GetInputFilename('u-boot.bin'))
except:
cls.have_lz4 = False
@classmethod
def tearDownClass(cls):
"""Remove the temporary input directory and its contents"""
if cls._indir:
shutil.rmtree(cls._indir)
cls._indir = None
tools.FinaliseOutputDir()
@classmethod
def _make_input_file(cls, fname, contents):
"""Create a new test input file, creating directories as needed
Args:
fname: Filename to create
contents: File contents to write in to the file
Returns:
Full pathname of file created
"""
pathname = os.path.join(cls._indir, fname)
tools.WriteFile(pathname, contents)
return pathname
def _check_hdr(self, data, size, offset=0, arch=cbfs_util.ARCHITECTURE_X86):
"""Check that the CBFS has the expected header
Args:
data: Data to check
size: Expected ROM size
offset: Expected offset to first CBFS file
arch: Expected architecture
Returns:
CbfsReader object containing the CBFS
"""
cbfs = cbfs_util.CbfsReader(data)
self.assertEqual(cbfs_util.HEADER_MAGIC, cbfs.magic)
self.assertEqual(cbfs_util.HEADER_VERSION2, cbfs.version)
self.assertEqual(size, cbfs.rom_size)
self.assertEqual(0, cbfs.boot_block_size)
self.assertEqual(cbfs_util.ENTRY_ALIGN, cbfs.align)
self.assertEqual(offset, cbfs.cbfs_offset)
self.assertEqual(arch, cbfs.arch)
return cbfs
def _check_uboot(self, cbfs, ftype=cbfs_util.TYPE_RAW, offset=0x38,
data=U_BOOT_DATA, cbfs_offset=None):
"""Check that the U-Boot file is as expected
Args:
cbfs: CbfsReader object to check
ftype: Expected file type
offset: Expected offset of file
data: Expected data in file
cbfs_offset: Expected CBFS offset for file's data
Returns:
CbfsFile object containing the file
"""
self.assertIn('u-boot', cbfs.files)
cfile = cbfs.files['u-boot']
self.assertEqual('u-boot', cfile.name)
self.assertEqual(offset, cfile.offset)
if cbfs_offset is not None:
self.assertEqual(cbfs_offset, cfile.cbfs_offset)
self.assertEqual(data, cfile.data)
self.assertEqual(ftype, cfile.ftype)
self.assertEqual(cbfs_util.COMPRESS_NONE, cfile.compress)
self.assertEqual(len(data), cfile.memlen)
return cfile
def _check_dtb(self, cbfs, offset=0x38, data=U_BOOT_DTB_DATA,
cbfs_offset=None):
"""Check that the U-Boot dtb file is as expected
Args:
cbfs: CbfsReader object to check
offset: Expected offset of file
data: Expected data in file
cbfs_offset: Expected CBFS offset for file's data
"""
self.assertIn('u-boot-dtb', cbfs.files)
cfile = cbfs.files['u-boot-dtb']
self.assertEqual('u-boot-dtb', cfile.name)
self.assertEqual(offset, cfile.offset)
if cbfs_offset is not None:
self.assertEqual(cbfs_offset, cfile.cbfs_offset)
self.assertEqual(U_BOOT_DTB_DATA, cfile.data)
self.assertEqual(cbfs_util.TYPE_RAW, cfile.ftype)
self.assertEqual(cbfs_util.COMPRESS_NONE, cfile.compress)
self.assertEqual(len(U_BOOT_DTB_DATA), cfile.memlen)
def _check_raw(self, data, size, offset=0, arch=cbfs_util.ARCHITECTURE_X86):
"""Check that two raw files are added as expected
Args:
data: Data to check
size: Expected ROM size
offset: Expected offset to first CBFS file
arch: Expected architecture
"""
cbfs = self._check_hdr(data, size, offset=offset, arch=arch)
self._check_uboot(cbfs)
self._check_dtb(cbfs)
def _get_expected_cbfs(self, size, arch='x86', compress=None, base=None):
"""Get the file created by cbfstool for a particular scenario
Args:
size: Size of the CBFS in bytes
arch: Architecture of the CBFS, as a string
compress: Compression to use, e.g. cbfs_util.COMPRESS_LZMA
base: Base address of file, or None to put it anywhere
Returns:
Resulting CBFS file, or None if cbfstool is not available
"""
if not self.have_cbfstool or not self.have_lz4:
return None
cbfs_fname = os.path.join(self._indir, 'test.cbfs')
cbfs_util.cbfstool(cbfs_fname, 'create', '-m', arch, '-s', '%#x' % size)
if base:
base = [(1 << 32) - size + b for b in base]
cbfs_util.cbfstool(cbfs_fname, 'add', '-n', 'u-boot', '-t', 'raw',
'-c', compress and compress[0] or 'none',
'-f', tools.GetInputFilename(
compress and 'compress' or 'u-boot.bin'),
base=base[0] if base else None)
cbfs_util.cbfstool(cbfs_fname, 'add', '-n', 'u-boot-dtb', '-t', 'raw',
'-c', compress and compress[1] or 'none',
'-f', tools.GetInputFilename(
compress and 'compress' or 'u-boot.dtb'),
base=base[1] if base else None)
return cbfs_fname
def _compare_expected_cbfs(self, data, cbfstool_fname):
"""Compare against what cbfstool creates
This compares what binman creates with what cbfstool creates for what
is proportedly the same thing.
Args:
data: CBFS created by binman
cbfstool_fname: CBFS created by cbfstool
"""
if not self.have_cbfstool or not self.have_lz4:
return
expect = tools.ReadFile(cbfstool_fname)
if expect != data:
tools.WriteFile('/tmp/expect', expect)
tools.WriteFile('/tmp/actual', data)
print('diff -y <(xxd -g1 /tmp/expect) <(xxd -g1 /tmp/actual) | colordiff')
self.fail('cbfstool produced a different result')
def test_cbfs_functions(self):
"""Test global functions of cbfs_util"""
self.assertEqual(cbfs_util.ARCHITECTURE_X86, cbfs_util.find_arch('x86'))
self.assertIsNone(cbfs_util.find_arch('bad-arch'))
self.assertEqual(cbfs_util.COMPRESS_LZMA, cbfs_util.find_compress('lzma'))
self.assertIsNone(cbfs_util.find_compress('bad-comp'))
def test_cbfstool_failure(self):
"""Test failure to run cbfstool"""
if not self.have_cbfstool:
self.skipTest('No cbfstool available')
try:
# In verbose mode this test fails since stderr is not captured. Fix
# this by turning off verbosity.
old_verbose = cbfs_util.VERBOSE
cbfs_util.VERBOSE = False
with test_util.capture_sys_output() as (_stdout, stderr):
with self.assertRaises(Exception) as e:
cbfs_util.cbfstool('missing-file', 'bad-command')
finally:
cbfs_util.VERBOSE = old_verbose
self.assertIn('Unknown command', stderr.getvalue())
self.assertIn('Failed to run', str(e.exception))
def test_cbfs_raw(self):
"""Test base handling of a Coreboot Filesystem (CBFS)"""
size = 0xb0
cbw = CbfsWriter(size)
cbw.add_file_raw('u-boot', U_BOOT_DATA)
cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA)
data = cbw.get_data()
self._check_raw(data, size)
cbfs_fname = self._get_expected_cbfs(size=size)
self._compare_expected_cbfs(data, cbfs_fname)
def test_cbfs_invalid_file_type(self):
"""Check handling of an invalid file type when outputiing a CBFS"""
size = 0xb0
cbw = CbfsWriter(size)
cfile = cbw.add_file_raw('u-boot', U_BOOT_DATA)
# Change the type manually before generating the CBFS, and make sure
# that the generator complains
cfile.ftype = 0xff
with self.assertRaises(ValueError) as e:
cbw.get_data()
self.assertIn('Unknown type 0xff when writing', str(e.exception))
def test_cbfs_invalid_file_type_on_read(self):
"""Check handling of an invalid file type when reading the CBFS"""
size = 0xb0
cbw = CbfsWriter(size)
cbw.add_file_raw('u-boot', U_BOOT_DATA)
data = cbw.get_data()
# Read in the first file header
cbr = cbfs_util.CbfsReader(data, read=False)
with io.BytesIO(data) as fd:
self.assertTrue(cbr._find_and_read_header(fd, len(data)))
pos = fd.tell()
hdr_data = fd.read(cbfs_util.FILE_HEADER_LEN)
magic, size, ftype, attr, offset = struct.unpack(
cbfs_util.FILE_HEADER_FORMAT, hdr_data)
# Create a new CBFS with a change to the file type
ftype = 0xff
newdata = data[:pos]
newdata += struct.pack(cbfs_util.FILE_HEADER_FORMAT, magic, size, ftype,
attr, offset)
newdata += data[pos + cbfs_util.FILE_HEADER_LEN:]
# Read in this CBFS and make sure that the reader complains
with self.assertRaises(ValueError) as e:
cbfs_util.CbfsReader(newdata)
self.assertIn('Unknown type 0xff when reading', str(e.exception))
def test_cbfs_no_space(self):
"""Check handling of running out of space in the CBFS"""
size = 0x60
cbw = CbfsWriter(size)
cbw.add_file_raw('u-boot', U_BOOT_DATA)
with self.assertRaises(ValueError) as e:
cbw.get_data()
self.assertIn('No space for header', str(e.exception))
def test_cbfs_no_space_skip(self):
"""Check handling of running out of space in CBFS with file header"""
size = 0x5c
cbw = CbfsWriter(size, arch=cbfs_util.ARCHITECTURE_PPC64)
cbw._add_fileheader = True
cbw.add_file_raw('u-boot', U_BOOT_DATA)
with self.assertRaises(ValueError) as e:
cbw.get_data()
self.assertIn('No space for data before offset', str(e.exception))
def test_cbfs_no_space_pad(self):
"""Check handling of running out of space in CBFS with file header"""
size = 0x70
cbw = CbfsWriter(size)
cbw._add_fileheader = True
cbw.add_file_raw('u-boot', U_BOOT_DATA)
with self.assertRaises(ValueError) as e:
cbw.get_data()
self.assertIn('No space for data before pad offset', str(e.exception))
def test_cbfs_bad_header_ptr(self):
"""Check handling of a bad master-header pointer"""
size = 0x70
cbw = CbfsWriter(size)
cbw.add_file_raw('u-boot', U_BOOT_DATA)
data = cbw.get_data()
# Add one to the pointer to make it invalid
newdata = data[:-4] + struct.pack('<I', cbw._header_offset + 1)
# We should still be able to find the master header by searching
with test_util.capture_sys_output() as (stdout, _stderr):
cbfs = cbfs_util.CbfsReader(newdata)
self.assertIn('Relative offset seems wrong', stdout.getvalue())
self.assertIn('u-boot', cbfs.files)
self.assertEqual(size, cbfs.rom_size)
def test_cbfs_bad_header(self):
"""Check handling of a bad master header"""
size = 0x70
cbw = CbfsWriter(size)
cbw.add_file_raw('u-boot', U_BOOT_DATA)
data = cbw.get_data()
# Drop most of the header and try reading the modified CBFS
newdata = data[:cbw._header_offset + 4]
with test_util.capture_sys_output() as (stdout, _stderr):
with self.assertRaises(ValueError) as e:
cbfs_util.CbfsReader(newdata)
self.assertIn('Relative offset seems wrong', stdout.getvalue())
self.assertIn('Cannot find master header', str(e.exception))
def test_cbfs_bad_file_header(self):
"""Check handling of a bad file header"""
size = 0x70
cbw = CbfsWriter(size)
cbw.add_file_raw('u-boot', U_BOOT_DATA)
data = cbw.get_data()
# Read in the CBFS master header (only), then stop
cbr = cbfs_util.CbfsReader(data, read=False)
with io.BytesIO(data) as fd:
self.assertTrue(cbr._find_and_read_header(fd, len(data)))
pos = fd.tell()
# Remove all but 4 bytes of the file headerm and try to read the file
newdata = data[:pos + 4]
with test_util.capture_sys_output() as (stdout, _stderr):
with io.BytesIO(newdata) as fd:
fd.seek(pos)
self.assertEqual(False, cbr._read_next_file(fd))
self.assertIn('File header at 0 ran out of data', stdout.getvalue())
def test_cbfs_bad_file_string(self):
"""Check handling of an incomplete filename string"""
size = 0x70
cbw = CbfsWriter(size)
cbw.add_file_raw('16-characters xx', U_BOOT_DATA)
data = cbw.get_data()
# Read in the CBFS master header (only), then stop
cbr = cbfs_util.CbfsReader(data, read=False)
with io.BytesIO(data) as fd:
self.assertTrue(cbr._find_and_read_header(fd, len(data)))
pos = fd.tell()
# Create a new CBFS with only the first 16 bytes of the file name, then
# try to read the file
newdata = data[:pos + cbfs_util.FILE_HEADER_LEN + 16]
with test_util.capture_sys_output() as (stdout, _stderr):
with io.BytesIO(newdata) as fd:
fd.seek(pos)
self.assertEqual(False, cbr._read_next_file(fd))
self.assertIn('String at %x ran out of data' %
cbfs_util.FILE_HEADER_LEN, stdout.getvalue())
def test_cbfs_debug(self):
"""Check debug output"""
size = 0x70
cbw = CbfsWriter(size)
cbw.add_file_raw('u-boot', U_BOOT_DATA)
data = cbw.get_data()
try:
cbfs_util.DEBUG = True
with test_util.capture_sys_output() as (stdout, _stderr):
cbfs_util.CbfsReader(data)
self.assertEqual('name u-boot\ndata %s\n' % U_BOOT_DATA,
stdout.getvalue())
finally:
cbfs_util.DEBUG = False
def test_cbfs_bad_attribute(self):
"""Check handling of bad attribute tag"""
if not self.have_lz4:
self.skipTest('lz4 --no-frame-crc not available')
size = 0x140
cbw = CbfsWriter(size)
cbw.add_file_raw('u-boot', COMPRESS_DATA, None,
compress=cbfs_util.COMPRESS_LZ4)
data = cbw.get_data()
# Search the CBFS for the expected compression tag
with io.BytesIO(data) as fd:
while True:
pos = fd.tell()
tag, = struct.unpack('>I', fd.read(4))
if tag == cbfs_util.FILE_ATTR_TAG_COMPRESSION:
break
# Create a new CBFS with the tag changed to something invalid
newdata = data[:pos] + struct.pack('>I', 0x123) + data[pos + 4:]
with test_util.capture_sys_output() as (stdout, _stderr):
cbfs_util.CbfsReader(newdata)
self.assertEqual('Unknown attribute tag 123\n', stdout.getvalue())
def test_cbfs_missing_attribute(self):
"""Check handling of an incomplete attribute tag"""
if not self.have_lz4:
self.skipTest('lz4 --no-frame-crc not available')
size = 0x140
cbw = CbfsWriter(size)
cbw.add_file_raw('u-boot', COMPRESS_DATA, None,
compress=cbfs_util.COMPRESS_LZ4)
data = cbw.get_data()
# Read in the CBFS master header (only), then stop
cbr = cbfs_util.CbfsReader(data, read=False)
with io.BytesIO(data) as fd:
self.assertTrue(cbr._find_and_read_header(fd, len(data)))
pos = fd.tell()
# Create a new CBFS with only the first 4 bytes of the compression tag,
# then try to read the file
tag_pos = pos + cbfs_util.FILE_HEADER_LEN + cbfs_util.FILENAME_ALIGN
newdata = data[:tag_pos + 4]
with test_util.capture_sys_output() as (stdout, _stderr):
with io.BytesIO(newdata) as fd:
fd.seek(pos)
self.assertEqual(False, cbr._read_next_file(fd))
self.assertIn('Attribute tag at %x ran out of data' % tag_pos,
stdout.getvalue())
def test_cbfs_file_master_header(self):
"""Check handling of a file containing a master header"""
size = 0x100
cbw = CbfsWriter(size)
cbw._add_fileheader = True
cbw.add_file_raw('u-boot', U_BOOT_DATA)
data = cbw.get_data()
cbr = cbfs_util.CbfsReader(data)
self.assertIn('u-boot', cbr.files)
self.assertEqual(size, cbr.rom_size)
def test_cbfs_arch(self):
"""Test on non-x86 architecture"""
size = 0x100
cbw = CbfsWriter(size, arch=cbfs_util.ARCHITECTURE_PPC64)
cbw.add_file_raw('u-boot', U_BOOT_DATA)
cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA)
data = cbw.get_data()
self._check_raw(data, size, offset=0x40,
arch=cbfs_util.ARCHITECTURE_PPC64)
# Compare against what cbfstool creates
cbfs_fname = self._get_expected_cbfs(size=size, arch='ppc64')
self._compare_expected_cbfs(data, cbfs_fname)
def test_cbfs_stage(self):
"""Tests handling of a Coreboot Filesystem (CBFS)"""
if not elf.ELF_TOOLS:
self.skipTest('Python elftools not available')
elf_fname = os.path.join(self._indir, 'cbfs-stage.elf')
elf.MakeElf(elf_fname, U_BOOT_DATA, U_BOOT_DTB_DATA)
size = 0xb0
cbw = CbfsWriter(size)
cbw.add_file_stage('u-boot', tools.ReadFile(elf_fname))
data = cbw.get_data()
cbfs = self._check_hdr(data, size)
load = 0xfef20000
entry = load + 2
cfile = self._check_uboot(cbfs, cbfs_util.TYPE_STAGE, offset=0x28,
data=U_BOOT_DATA + U_BOOT_DTB_DATA)
self.assertEqual(entry, cfile.entry)
self.assertEqual(load, cfile.load)
self.assertEqual(len(U_BOOT_DATA) + len(U_BOOT_DTB_DATA),
cfile.data_len)
# Compare against what cbfstool creates
if self.have_cbfstool:
cbfs_fname = os.path.join(self._indir, 'test.cbfs')
cbfs_util.cbfstool(cbfs_fname, 'create', '-m', 'x86', '-s',
'%#x' % size)
cbfs_util.cbfstool(cbfs_fname, 'add-stage', '-n', 'u-boot',
'-f', elf_fname)
self._compare_expected_cbfs(data, cbfs_fname)
def test_cbfs_raw_compress(self):
"""Test base handling of compressing raw files"""
if not self.have_lz4:
self.skipTest('lz4 --no-frame-crc not available')
size = 0x140
cbw = CbfsWriter(size)
cbw.add_file_raw('u-boot', COMPRESS_DATA, None,
compress=cbfs_util.COMPRESS_LZ4)
cbw.add_file_raw('u-boot-dtb', COMPRESS_DATA, None,
compress=cbfs_util.COMPRESS_LZMA)
data = cbw.get_data()
cbfs = self._check_hdr(data, size)
self.assertIn('u-boot', cbfs.files)
cfile = cbfs.files['u-boot']
self.assertEqual(cfile.name, 'u-boot')
self.assertEqual(cfile.offset, 56)
self.assertEqual(cfile.data, COMPRESS_DATA)
self.assertEqual(cfile.ftype, cbfs_util.TYPE_RAW)
self.assertEqual(cfile.compress, cbfs_util.COMPRESS_LZ4)
self.assertEqual(cfile.memlen, len(COMPRESS_DATA))
self.assertIn('u-boot-dtb', cbfs.files)
cfile = cbfs.files['u-boot-dtb']
self.assertEqual(cfile.name, 'u-boot-dtb')
self.assertEqual(cfile.offset, 56)
self.assertEqual(cfile.data, COMPRESS_DATA)
self.assertEqual(cfile.ftype, cbfs_util.TYPE_RAW)
self.assertEqual(cfile.compress, cbfs_util.COMPRESS_LZMA)
self.assertEqual(cfile.memlen, len(COMPRESS_DATA))
cbfs_fname = self._get_expected_cbfs(size=size, compress=['lz4', 'lzma'])
self._compare_expected_cbfs(data, cbfs_fname)
def test_cbfs_raw_space(self):
"""Test files with unused space in the CBFS"""
size = 0xf0
cbw = CbfsWriter(size)
cbw.add_file_raw('u-boot', U_BOOT_DATA)
cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA)
data = cbw.get_data()
self._check_raw(data, size)
cbfs_fname = self._get_expected_cbfs(size=size)
self._compare_expected_cbfs(data, cbfs_fname)
def test_cbfs_offset(self):
"""Test a CBFS with files at particular offsets"""
size = 0x200
cbw = CbfsWriter(size)
cbw.add_file_raw('u-boot', U_BOOT_DATA, 0x40)
cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA, 0x140)
data = cbw.get_data()
cbfs = self._check_hdr(data, size)
self._check_uboot(cbfs, ftype=cbfs_util.TYPE_RAW, offset=0x40,
cbfs_offset=0x40)
self._check_dtb(cbfs, offset=0x40, cbfs_offset=0x140)
cbfs_fname = self._get_expected_cbfs(size=size, base=(0x40, 0x140))
self._compare_expected_cbfs(data, cbfs_fname)
def test_cbfs_invalid_file_type_header(self):
"""Check handling of an invalid file type when outputting a header"""
size = 0xb0
cbw = CbfsWriter(size)
cfile = cbw.add_file_raw('u-boot', U_BOOT_DATA, 0)
# Change the type manually before generating the CBFS, and make sure
# that the generator complains
cfile.ftype = 0xff
with self.assertRaises(ValueError) as e:
cbw.get_data()
self.assertIn('Unknown file type 0xff', str(e.exception))
def test_cbfs_offset_conflict(self):
"""Test a CBFS with files that want to overlap"""
size = 0x200
cbw = CbfsWriter(size)
cbw.add_file_raw('u-boot', U_BOOT_DATA, 0x40)
cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA, 0x80)
with self.assertRaises(ValueError) as e:
cbw.get_data()
self.assertIn('No space for data before pad offset', str(e.exception))
def test_cbfs_check_offset(self):
"""Test that we can discover the offset of a file after writing it"""
size = 0xb0
cbw = CbfsWriter(size)
cbw.add_file_raw('u-boot', U_BOOT_DATA)
cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA)
data = cbw.get_data()
cbfs = cbfs_util.CbfsReader(data)
self.assertEqual(0x38, cbfs.files['u-boot'].cbfs_offset)
self.assertEqual(0x78, cbfs.files['u-boot-dtb'].cbfs_offset)
if __name__ == '__main__':
unittest.main()

View File

@ -5,7 +5,7 @@
# Command-line parser for binman
#
from optparse import OptionParser
from argparse import ArgumentParser
def ParseArgs(argv):
"""Parse the binman command-line arguments
@ -17,50 +17,83 @@ def ParseArgs(argv):
options provides access to the options (e.g. option.debug)
args is a list of string arguments
"""
parser = OptionParser()
parser.add_option('-a', '--entry-arg', type='string', action='append',
help='Set argument value arg=value')
parser.add_option('-b', '--board', type='string',
help='Board name to build')
parser.add_option('-B', '--build-dir', type='string', default='b',
help='Directory containing the build output')
parser.add_option('-d', '--dt', type='string',
help='Configuration file (.dtb) to use')
parser.add_option('-D', '--debug', action='store_true',
help='Enabling debugging (provides a full traceback on error)')
parser.add_option('-E', '--entry-docs', action='store_true',
help='Write out entry documentation (see README.entries)')
parser.add_option('--fake-dtb', action='store_true',
help='Use fake device tree contents (for testing only)')
parser.add_option('-i', '--image', type='string', action='append',
help='Image filename to build (if not specified, build all)')
parser.add_option('-I', '--indir', action='append',
help='Add a path to a directory to use for input files')
parser.add_option('-H', '--full-help', action='store_true',
if '-H' in argv:
argv.append('build')
epilog = '''Binman creates and manipulate images for a board from a set of binaries. Binman is
controlled by a description in the board device tree.'''
parser = ArgumentParser(epilog=epilog)
parser.add_argument('-B', '--build-dir', type=str, default='b',
help='Directory containing the build output')
parser.add_argument('-D', '--debug', action='store_true',
help='Enabling debugging (provides a full traceback on error)')
parser.add_argument('-H', '--full-help', action='store_true',
default=False, help='Display the README file')
parser.add_option('-m', '--map', action='store_true',
parser.add_argument('--toolpath', type=str, action='append',
help='Add a path to the directories containing tools')
parser.add_argument('-v', '--verbosity', default=1,
type=int, help='Control verbosity: 0=silent, 1=warnings, 2=notices, '
'3=info, 4=detail, 5=debug')
subparsers = parser.add_subparsers(dest='cmd')
build_parser = subparsers.add_parser('build', help='Build firmware image')
build_parser.add_argument('-a', '--entry-arg', type=str, action='append',
help='Set argument value arg=value')
build_parser.add_argument('-b', '--board', type=str,
help='Board name to build')
build_parser.add_argument('-d', '--dt', type=str,
help='Configuration file (.dtb) to use')
build_parser.add_argument('--fake-dtb', action='store_true',
help='Use fake device tree contents (for testing only)')
build_parser.add_argument('-i', '--image', type=str, action='append',
help='Image filename to build (if not specified, build all)')
build_parser.add_argument('-I', '--indir', action='append',
help='Add a path to the list of directories to use for input files')
build_parser.add_argument('-m', '--map', action='store_true',
default=False, help='Output a map file for each image')
parser.add_option('-O', '--outdir', type='string',
build_parser.add_argument('-O', '--outdir', type=str,
action='store', help='Path to directory to use for intermediate and '
'output files')
parser.add_option('-p', '--preserve', action='store_true',\
build_parser.add_argument('-p', '--preserve', action='store_true',\
help='Preserve temporary output directory even if option -O is not '
'given')
parser.add_option('-P', '--processes', type=int,
help='set number of processes to use for running tests')
parser.add_option('-t', '--test', action='store_true',
default=False, help='run tests')
parser.add_option('-T', '--test-coverage', action='store_true',
default=False, help='run tests and check for 100% coverage')
parser.add_option('-u', '--update-fdt', action='store_true',
build_parser.add_argument('-u', '--update-fdt', action='store_true',
default=False, help='Update the binman node with offset/size info')
parser.add_option('-v', '--verbosity', default=1,
type='int', help='Control verbosity: 0=silent, 1=progress, 3=full, '
'4=debug')
parser.usage += """
entry_parser = subparsers.add_parser('entry-docs',
help='Write out entry documentation (see README.entries)')
Create images for a board from a set of binaries. It is controlled by a
description in the board device tree."""
list_parser = subparsers.add_parser('ls', help='List files in an image')
list_parser.add_argument('-i', '--image', type=str, required=True,
help='Image filename to list')
list_parser.add_argument('paths', type=str, nargs='*',
help='Paths within file to list (wildcard)')
extract_parser = subparsers.add_parser('extract',
help='Extract files from an image')
extract_parser.add_argument('-i', '--image', type=str, required=True,
help='Image filename to extract')
extract_parser.add_argument('-f', '--filename', type=str,
help='Output filename to write to')
extract_parser.add_argument('-O', '--outdir', type=str, default='',
help='Path to directory to use for output files')
extract_parser.add_argument('paths', type=str, nargs='*',
help='Paths within file to extract (wildcard)')
extract_parser.add_argument('-U', '--uncompressed', action='store_true',
help='Output raw uncompressed data for compressed entries')
test_parser = subparsers.add_parser('test', help='Run tests')
test_parser.add_argument('-P', '--processes', type=int,
help='set number of processes to use for running tests')
test_parser.add_argument('-T', '--test-coverage', action='store_true',
default=False, help='run tests and check for 100%% coverage')
test_parser.add_argument('-X', '--test-preserve-dirs', action='store_true',
help='Preserve and display test-created input directories; also '
'preserve the output directory if a single test is run (pass test '
'name at the end of the command line')
test_parser.add_argument('tests', nargs='*',
help='Test names to run (omit for all)')
return parser.parse_args(argv)

View File

@ -12,6 +12,7 @@ import os
import sys
import tools
import cbfs_util
import command
import elf
from image import Image
@ -66,19 +67,120 @@ def WriteEntryDocs(modules, test_missing=None):
from entry import Entry
Entry.WriteDocs(modules, test_missing)
def Binman(options, args):
def ListEntries(image_fname, entry_paths):
"""List the entries in an image
This decodes the supplied image and displays a table of entries from that
image, preceded by a header.
Args:
image_fname: Image filename to process
entry_paths: List of wildcarded paths (e.g. ['*dtb*', 'u-boot*',
'section/u-boot'])
"""
image = Image.FromFile(image_fname)
entries, lines, widths = image.GetListEntries(entry_paths)
num_columns = len(widths)
for linenum, line in enumerate(lines):
if linenum == 1:
# Print header line
print('-' * (sum(widths) + num_columns * 2))
out = ''
for i, item in enumerate(line):
width = -widths[i]
if item.startswith('>'):
width = -width
item = item[1:]
txt = '%*s ' % (width, item)
out += txt
print(out.rstrip())
def ReadEntry(image_fname, entry_path, decomp=True):
"""Extract an entry from an image
This extracts the data from a particular entry in an image
Args:
image_fname: Image filename to process
entry_path: Path to entry to extract
decomp: True to return uncompressed data, if the data is compress
False to return the raw data
Returns:
data extracted from the entry
"""
image = Image.FromFile(image_fname)
entry = image.FindEntryPath(entry_path)
return entry.ReadData(decomp)
def ExtractEntries(image_fname, output_fname, outdir, entry_paths,
decomp=True):
"""Extract the data from one or more entries and write it to files
Args:
image_fname: Image filename to process
output_fname: Single output filename to use if extracting one file, None
otherwise
outdir: Output directory to use (for any number of files), else None
entry_paths: List of entry paths to extract
decomp: True to compress the entry data
Returns:
List of EntryInfo records that were written
"""
image = Image.FromFile(image_fname)
# Output an entry to a single file, as a special case
if output_fname:
if not entry_paths:
raise ValueError('Must specify an entry path to write with -o')
if len(entry_paths) != 1:
raise ValueError('Must specify exactly one entry path to write with -o')
entry = image.FindEntryPath(entry_paths[0])
data = entry.ReadData(decomp)
tools.WriteFile(output_fname, data)
tout.Notice("Wrote %#x bytes to file '%s'" % (len(data), output_fname))
return
# Otherwise we will output to a path given by the entry path of each entry.
# This means that entries will appear in subdirectories if they are part of
# a sub-section.
einfos = image.GetListEntries(entry_paths)[0]
tout.Notice('%d entries match and will be written' % len(einfos))
for einfo in einfos:
entry = einfo.entry
data = entry.ReadData(decomp)
path = entry.GetPath()[1:]
fname = os.path.join(outdir, path)
# If this entry has children, create a directory for it and put its
# data in a file called 'root' in that directory
if entry.GetEntries():
if not os.path.exists(fname):
os.makedirs(fname)
fname = os.path.join(fname, 'root')
tout.Notice("Write entry '%s' to '%s'" % (entry.GetPath(), fname))
tools.WriteFile(fname, data)
return einfos
def Binman(args):
"""The main control code for binman
This assumes that help and test options have already been dealt with. It
deals with the core task of building images.
Args:
options: Command line options object
args: Command line arguments (list of strings)
args: Command line arguments Namespace object
"""
global images
if options.full_help:
if args.full_help:
pager = os.getenv('PAGER')
if not pager:
pager = 'more'
@ -87,18 +189,31 @@ def Binman(options, args):
command.Run(pager, fname)
return 0
if args.cmd == 'ls':
ListEntries(args.image, args.paths)
return 0
if args.cmd == 'extract':
try:
tools.PrepareOutputDir(None)
ExtractEntries(args.image, args.filename, args.outdir, args.paths,
not args.uncompressed)
finally:
tools.FinaliseOutputDir()
return 0
# Try to figure out which device tree contains our image description
if options.dt:
dtb_fname = options.dt
if args.dt:
dtb_fname = args.dt
else:
board = options.board
board = args.board
if not board:
raise ValueError('Must provide a board to process (use -b <board>)')
board_pathname = os.path.join(options.build_dir, board)
board_pathname = os.path.join(args.build_dir, board)
dtb_fname = os.path.join(board_pathname, 'u-boot.dtb')
if not options.indir:
options.indir = ['.']
options.indir.append(board_pathname)
if not args.indir:
args.indir = ['.']
args.indir.append(board_pathname)
try:
# Import these here in case libfdt.py is not available, in which case
@ -106,13 +221,15 @@ def Binman(options, args):
import fdt
import fdt_util
tout.Init(options.verbosity)
elf.debug = options.debug
state.use_fake_dtb = options.fake_dtb
tout.Init(args.verbosity)
elf.debug = args.debug
cbfs_util.VERBOSE = args.verbosity > 2
state.use_fake_dtb = args.fake_dtb
try:
tools.SetInputDirs(options.indir)
tools.PrepareOutputDir(options.outdir, options.preserve)
state.SetEntryArgs(options.entry_arg)
tools.SetInputDirs(args.indir)
tools.PrepareOutputDir(args.outdir, args.preserve)
tools.SetToolPaths(args.toolpath)
state.SetEntryArgs(args.entry_arg)
# Get the device tree ready by compiling it and copying the compiled
# output into a file in our output directly. Then scan it for use
@ -129,16 +246,16 @@ def Binman(options, args):
images = _ReadImageDesc(node)
if options.image:
if args.image:
skip = []
new_images = OrderedDict()
for name, image in images.items():
if name in options.image:
if name in args.image:
new_images[name] = image
else:
skip.append(name)
images = new_images
if skip and options.verbosity >= 2:
if skip and args.verbosity >= 2:
print('Skipping images: %s' % ', '.join(skip))
state.Prepare(images, dtb)
@ -152,7 +269,7 @@ def Binman(options, args):
# entry offsets remain the same.
for image in images.values():
image.ExpandEntries()
if options.update_fdt:
if args.update_fdt:
image.AddMissingProperties()
image.ProcessFdt(dtb)
@ -168,24 +285,45 @@ def Binman(options, args):
# completed and written, but that does not seem important.
image.GetEntryContents()
image.GetEntryOffsets()
try:
image.PackEntries()
image.CheckSize()
image.CheckEntries()
except Exception as e:
if options.map:
fname = image.WriteMap()
print("Wrote map file '%s' to show errors" % fname)
raise
image.SetImagePos()
if options.update_fdt:
image.SetCalculatedProperties()
for dtb_item in state.GetFdts():
dtb_item.Sync()
image.ProcessEntryContents()
# We need to pack the entries to figure out where everything
# should be placed. This sets the offset/size of each entry.
# However, after packing we call ProcessEntryContents() which
# may result in an entry changing size. In that case we need to
# do another pass. Since the device tree often contains the
# final offset/size information we try to make space for this in
# AddMissingProperties() above. However, if the device is
# compressed we cannot know this compressed size in advance,
# since changing an offset from 0x100 to 0x104 (for example) can
# alter the compressed size of the device tree. So we need a
# third pass for this.
passes = 3
for pack_pass in range(passes):
try:
image.PackEntries()
image.CheckSize()
image.CheckEntries()
except Exception as e:
if args.map:
fname = image.WriteMap()
print("Wrote map file '%s' to show errors" % fname)
raise
image.SetImagePos()
if args.update_fdt:
image.SetCalculatedProperties()
for dtb_item in state.GetFdts():
dtb_item.Sync()
sizes_ok = image.ProcessEntryContents()
if sizes_ok:
break
image.ResetForPack()
if not sizes_ok:
image.Raise('Entries expanded after packing (tried %s passes)' %
passes)
image.WriteSymbols()
image.BuildImage()
if options.map:
if args.map:
image.WriteMap()
# Write the updated FDTs to our output files

View File

@ -5,19 +5,39 @@
# Handle various things related to ELF images
#
from __future__ import print_function
from collections import namedtuple, OrderedDict
import command
import io
import os
import re
import shutil
import struct
import tempfile
import tools
ELF_TOOLS = True
try:
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
except: # pragma: no cover
ELF_TOOLS = False
# This is enabled from control.py
debug = False
Symbol = namedtuple('Symbol', ['section', 'address', 'size', 'weak'])
# Information about an ELF file:
# data: Extracted program contents of ELF file (this would be loaded by an
# ELF loader when reading this file
# load: Load address of code
# entry: Entry address of code
# memsize: Number of bytes in memory occupied by loading this ELF file
ElfInfo = namedtuple('ElfInfo', ['data', 'load', 'entry', 'memsize'])
def GetSymbols(fname, patterns):
"""Get the symbols from an ELF file
@ -128,3 +148,157 @@ def LookupAndWriteSymbols(elf_fname, entry, section):
(msg, name, offset, value, len(value_bytes)))
entry.data = (entry.data[:offset] + value_bytes +
entry.data[offset + sym.size:])
def MakeElf(elf_fname, text, data):
"""Make an elf file with the given data in a single section
The output file has a several section including '.text' and '.data',
containing the info provided in arguments.
Args:
elf_fname: Output filename
text: Text (code) to put in the file's .text section
data: Data to put in the file's .data section
"""
outdir = tempfile.mkdtemp(prefix='binman.elf.')
s_file = os.path.join(outdir, 'elf.S')
# Spilt the text into two parts so that we can make the entry point two
# bytes after the start of the text section
text_bytes1 = ['\t.byte\t%#x' % tools.ToByte(byte) for byte in text[:2]]
text_bytes2 = ['\t.byte\t%#x' % tools.ToByte(byte) for byte in text[2:]]
data_bytes = ['\t.byte\t%#x' % tools.ToByte(byte) for byte in data]
with open(s_file, 'w') as fd:
print('''/* Auto-generated C program to produce an ELF file for testing */
.section .text
.code32
.globl _start
.type _start, @function
%s
_start:
%s
.ident "comment"
.comm fred,8,4
.section .empty
.globl _empty
_empty:
.byte 1
.globl ernie
.data
.type ernie, @object
.size ernie, 4
ernie:
%s
''' % ('\n'.join(text_bytes1), '\n'.join(text_bytes2), '\n'.join(data_bytes)),
file=fd)
lds_file = os.path.join(outdir, 'elf.lds')
# Use a linker script to set the alignment and text address.
with open(lds_file, 'w') as fd:
print('''/* Auto-generated linker script to produce an ELF file for testing */
PHDRS
{
text PT_LOAD ;
data PT_LOAD ;
empty PT_LOAD FLAGS ( 6 ) ;
note PT_NOTE ;
}
SECTIONS
{
. = 0xfef20000;
ENTRY(_start)
.text . : SUBALIGN(0)
{
*(.text)
} :text
.data : {
*(.data)
} :data
_bss_start = .;
.empty : {
*(.empty)
} :empty
.note : {
*(.comment)
} :note
.bss _bss_start (OVERLAY) : {
*(.bss)
}
}
''', file=fd)
# -static: Avoid requiring any shared libraries
# -nostdlib: Don't link with C library
# -Wl,--build-id=none: Don't generate a build ID, so that we just get the
# text section at the start
# -m32: Build for 32-bit x86
# -T...: Specifies the link script, which sets the start address
stdout = command.Output('cc', '-static', '-nostdlib', '-Wl,--build-id=none',
'-m32','-T', lds_file, '-o', elf_fname, s_file)
shutil.rmtree(outdir)
def DecodeElf(data, location):
"""Decode an ELF file and return information about it
Args:
data: Data from ELF file
location: Start address of data to return
Returns:
ElfInfo object containing information about the decoded ELF file
"""
file_size = len(data)
with io.BytesIO(data) as fd:
elf = ELFFile(fd)
data_start = 0xffffffff;
data_end = 0;
mem_end = 0;
virt_to_phys = 0;
for i in range(elf.num_segments()):
segment = elf.get_segment(i)
if segment['p_type'] != 'PT_LOAD' or not segment['p_memsz']:
skipped = 1 # To make code-coverage see this line
continue
start = segment['p_paddr']
mend = start + segment['p_memsz']
rend = start + segment['p_filesz']
data_start = min(data_start, start)
data_end = max(data_end, rend)
mem_end = max(mem_end, mend)
if not virt_to_phys:
virt_to_phys = segment['p_paddr'] - segment['p_vaddr']
output = bytearray(data_end - data_start)
for i in range(elf.num_segments()):
segment = elf.get_segment(i)
if segment['p_type'] != 'PT_LOAD' or not segment['p_memsz']:
skipped = 1 # To make code-coverage see this line
continue
start = segment['p_paddr']
offset = 0
if start < location:
offset = location - start
start = location
# A legal ELF file can have a program header with non-zero length
# but zero-length file size and a non-zero offset which, added
# together, are greater than input->size (i.e. the total file size).
# So we need to not even test in the case that p_filesz is zero.
# Note: All of this code is commented out since we don't have a test
# case for it.
size = segment['p_filesz']
#if not size:
#continue
#end = segment['p_offset'] + segment['p_filesz']
#if end > file_size:
#raise ValueError('Underflow copying out the segment. File has %#x bytes left, segment end is %#x\n',
#file_size, end)
output[start - data_start:start - data_start + size] = (
segment.data()[offset:])
return ElfInfo(output, data_start, elf.header['e_entry'] + virt_to_phys,
mem_end - data_start)

View File

@ -5,9 +5,12 @@
# Test for the elf module
import os
import shutil
import sys
import tempfile
import unittest
import command
import elf
import test_util
import tools
@ -136,6 +139,44 @@ class TestElf(unittest.TestCase):
elf.debug = False
self.assertTrue(len(stdout.getvalue()) > 0)
def testMakeElf(self):
"""Test for the MakeElf function"""
outdir = tempfile.mkdtemp(prefix='elf.')
expected_text = b'1234'
expected_data = b'wxyz'
elf_fname = os.path.join(outdir, 'elf')
bin_fname = os.path.join(outdir, 'elf')
# Make an Elf file and then convert it to a fkat binary file. This
# should produce the original data.
elf.MakeElf(elf_fname, expected_text, expected_data)
stdout = command.Output('objcopy', '-O', 'binary', elf_fname, bin_fname)
with open(bin_fname, 'rb') as fd:
data = fd.read()
self.assertEqual(expected_text + expected_data, data)
shutil.rmtree(outdir)
def testDecodeElf(self):
"""Test for the MakeElf function"""
if not elf.ELF_TOOLS:
self.skipTest('Python elftools not available')
outdir = tempfile.mkdtemp(prefix='elf.')
expected_text = b'1234'
expected_data = b'wxyz'
elf_fname = os.path.join(outdir, 'elf')
elf.MakeElf(elf_fname, expected_text, expected_data)
data = tools.ReadFile(elf_fname)
load = 0xfef20000
entry = load + 2
expected = expected_text + expected_data
self.assertEqual(elf.ElfInfo(expected, load, entry, len(expected)),
elf.DecodeElf(data, 0))
self.assertEqual(elf.ElfInfo(b'\0\0' + expected[2:],
load, entry, len(expected)),
elf.DecodeElf(data, load + 2))
#shutil.rmtree(outdir)
if __name__ == '__main__':
unittest.main()

View File

@ -23,6 +23,7 @@ import sys
import fdt_util
import state
import tools
import tout
modules = {}
@ -33,6 +34,10 @@ our_path = os.path.dirname(os.path.realpath(__file__))
# device-tree properties.
EntryArg = namedtuple('EntryArg', ['name', 'datatype'])
# Information about an entry for use when displaying summaries
EntryInfo = namedtuple('EntryInfo', ['indent', 'name', 'etype', 'size',
'image_pos', 'uncomp_size', 'offset',
'entry'])
class Entry(object):
"""An Entry in the section
@ -51,6 +56,8 @@ class Entry(object):
offset: Offset of entry within the section, None if not known yet (in
which case it will be calculated by Pack())
size: Entry size in bytes, None if not known
uncomp_size: Size of uncompressed data in bytes, if the entry is
compressed, else None
contents_size: Size of contents in bytes, 0 by default
align: Entry start offset alignment, or None
align_size: Entry size alignment, or None
@ -58,6 +65,9 @@ class Entry(object):
pad_before: Number of pad bytes before the contents, 0 if none
pad_after: Number of pad bytes after the contents, 0 if none
data: Contents of entry (string of bytes)
compress: Compression algoithm used (e.g. 'lz4'), 'none' if none
orig_offset: Original offset value read from node
orig_size: Original size value read from node
"""
def __init__(self, section, etype, node, read_node=True, name_prefix=''):
self.section = section
@ -66,6 +76,7 @@ class Entry(object):
self.name = node and (name_prefix + node.name) or 'none'
self.offset = None
self.size = None
self.uncomp_size = None
self.data = None
self.contents_size = 0
self.align = None
@ -76,15 +87,15 @@ class Entry(object):
self.offset_unset = False
self.image_pos = None
self._expand_size = False
self.compress = 'none'
if read_node:
self.ReadNode()
@staticmethod
def Lookup(section, node_path, etype):
def Lookup(node_path, etype):
"""Look up the entry class for a node.
Args:
section: Section object containing this node
node_node: Path name of Node object containing information about
the entry to create (used for errors)
etype: Entry type to use
@ -135,7 +146,7 @@ class Entry(object):
"""
if not etype:
etype = fdt_util.GetString(node, 'type', node.name)
obj = Entry.Lookup(section, node.path, etype)
obj = Entry.Lookup(node.path, etype)
# Call its constructor to get the object we want.
return obj(section, etype, node)
@ -149,6 +160,14 @@ class Entry(object):
self.Raise("Please use 'offset' instead of 'pos'")
self.offset = fdt_util.GetInt(self._node, 'offset')
self.size = fdt_util.GetInt(self._node, 'size')
self.orig_offset = self.offset
self.orig_size = self.size
# These should not be set in input files, but are set in an FDT map,
# which is also read by this code.
self.image_pos = fdt_util.GetInt(self._node, 'image-pos')
self.uncomp_size = fdt_util.GetInt(self._node, 'uncomp-size')
self.align = fdt_util.GetInt(self._node, 'align')
if tools.NotPowerOfTwo(self.align):
raise ValueError("Node '%s': Alignment %s must be a power of two" %
@ -157,8 +176,8 @@ class Entry(object):
self.pad_after = fdt_util.GetInt(self._node, 'pad-after', 0)
self.align_size = fdt_util.GetInt(self._node, 'align-size')
if tools.NotPowerOfTwo(self.align_size):
raise ValueError("Node '%s': Alignment size %s must be a power "
"of two" % (self._node.path, self.align_size))
self.Raise("Alignment size %s must be a power of two" %
self.align_size)
self.align_end = fdt_util.GetInt(self._node, 'align-end')
self.offset_unset = fdt_util.GetBool(self._node, 'offset-unset')
self.expand_size = fdt_util.GetBool(self._node, 'expand-size')
@ -188,6 +207,8 @@ class Entry(object):
for prop in ['offset', 'size', 'image-pos']:
if not prop in self._node.props:
state.AddZeroProp(self._node, prop)
if self.compress != 'none':
state.AddZeroProp(self._node, 'uncomp-size')
err = state.CheckAddHashProp(self._node)
if err:
self.Raise(err)
@ -196,8 +217,10 @@ class Entry(object):
"""Set the value of device-tree properties calculated by binman"""
state.SetInt(self._node, 'offset', self.offset)
state.SetInt(self._node, 'size', self.size)
state.SetInt(self._node, 'image-pos',
self.image_pos - self.section.GetRootSkipAtStart())
base = self.section.GetRootSkipAtStart() if self.section else 0
state.SetInt(self._node, 'image-pos', self.image_pos - base)
if self.uncomp_size is not None:
state.SetInt(self._node, 'uncomp-size', self.uncomp_size)
state.CheckSetHashValue(self._node, self.GetData)
def ProcessFdt(self, fdt):
@ -229,26 +252,36 @@ class Entry(object):
This sets both the data and content_size properties
Args:
data: Data to set to the contents (string)
data: Data to set to the contents (bytes)
"""
self.data = data
self.contents_size = len(self.data)
def ProcessContentsUpdate(self, data):
"""Update the contens of an entry, after the size is fixed
"""Update the contents of an entry, after the size is fixed
This checks that the new data is the same size as the old.
This checks that the new data is the same size as the old. If the size
has changed, this triggers a re-run of the packing algorithm.
Args:
data: Data to set to the contents (string)
data: Data to set to the contents (bytes)
Raises:
ValueError if the new data size is not the same as the old
"""
if len(data) != self.contents_size:
size_ok = True
new_size = len(data)
if state.AllowEntryExpansion():
if new_size > self.contents_size:
tout.Debug("Entry '%s' size change from %#x to %#x" % (
self._node.path, self.contents_size, new_size))
# self.data will indicate the new size needed
size_ok = False
elif new_size != self.contents_size:
self.Raise('Cannot update entry size from %d to %d' %
(len(data), self.contents_size))
(self.contents_size, new_size))
self.SetContents(data)
return size_ok
def ObtainContents(self):
"""Figure out the contents of an entry.
@ -260,6 +293,11 @@ class Entry(object):
# No contents by default: subclasses can implement this
return True
def ResetForPack(self):
"""Reset offset/size fields so that packing can be done again"""
self.offset = self.orig_offset
self.size = self.orig_size
def Pack(self, offset):
"""Figure out how to pack the entry into the section
@ -355,11 +393,34 @@ class Entry(object):
return self.data
def GetOffsets(self):
"""Get the offsets for siblings
Some entry types can contain information about the position or size of
other entries. An example of this is the Intel Flash Descriptor, which
knows where the Intel Management Engine section should go.
If this entry knows about the position of other entries, it can specify
this by returning values here
Returns:
Dict:
key: Entry type
value: List containing position and size of the given entry
type. Either can be None if not known
"""
return {}
def SetOffsetSize(self, pos, size):
self.offset = pos
self.size = size
def SetOffsetSize(self, offset, size):
"""Set the offset and/or size of an entry
Args:
offset: New offset, or None to leave alone
size: New size, or None to leave alone
"""
if offset is not None:
self.offset = offset
if size is not None:
self.size = size
def SetImagePos(self, image_pos):
"""Set the position in the image
@ -370,7 +431,22 @@ class Entry(object):
self.image_pos = image_pos + self.offset
def ProcessContents(self):
pass
"""Do any post-packing updates of entry contents
This function should call ProcessContentsUpdate() to update the entry
contents, if necessary, returning its return value here.
Args:
data: Data to set to the contents (bytes)
Returns:
True if the new data size is OK, False if expansion is needed
Raises:
ValueError if the new data size is not the same as the old and
state.AllowEntryExpansion() is False
"""
return True
def WriteSymbols(self, section):
"""Write symbol values into binary files for access at run time
@ -482,7 +558,9 @@ features to produce new behaviours.
modules.remove('_testing')
missing = []
for name in modules:
module = Entry.Lookup(name, name, name)
if name.startswith('__'):
continue
module = Entry.Lookup(name, name)
docs = getattr(module, '__doc__')
if test_missing == name:
docs = None
@ -529,3 +607,76 @@ features to produce new behaviours.
# the data grows. This should not fail, but check it to be sure.
if not self.ObtainContents():
self.Raise('Cannot obtain contents when expanding entry')
def HasSibling(self, name):
"""Check if there is a sibling of a given name
Returns:
True if there is an entry with this name in the the same section,
else False
"""
return name in self.section.GetEntries()
def GetSiblingImagePos(self, name):
"""Return the image position of the given sibling
Returns:
Image position of sibling, or None if the sibling has no position,
or False if there is no such sibling
"""
if not self.HasSibling(name):
return False
return self.section.GetEntries()[name].image_pos
@staticmethod
def AddEntryInfo(entries, indent, name, etype, size, image_pos,
uncomp_size, offset, entry):
"""Add a new entry to the entries list
Args:
entries: List (of EntryInfo objects) to add to
indent: Current indent level to add to list
name: Entry name (string)
etype: Entry type (string)
size: Entry size in bytes (int)
image_pos: Position within image in bytes (int)
uncomp_size: Uncompressed size if the entry uses compression, else
None
offset: Entry offset within parent in bytes (int)
entry: Entry object
"""
entries.append(EntryInfo(indent, name, etype, size, image_pos,
uncomp_size, offset, entry))
def ListEntries(self, entries, indent):
"""Add files in this entry to the list of entries
This can be overridden by subclasses which need different behaviour.
Args:
entries: List (of EntryInfo objects) to add to
indent: Current indent level to add to list
"""
self.AddEntryInfo(entries, indent, self.name, self.etype, self.size,
self.image_pos, self.uncomp_size, self.offset, self)
def ReadData(self, decomp=True):
"""Read the data for an entry from the image
This is used when the image has been read in and we want to extract the
data for a particular entry from that image.
Args:
decomp: True to decompress any compressed data before returning it;
False to return the raw, uncompressed data
Returns:
Entry data (bytes)
"""
# Use True here so that we get an uncompressed section to work from,
# although compressed sections are currently not supported
data = self.section.ReadData(True)
tout.Info('%s: Reading data from offset %#x-%#x, size %#x (avail %#x)' %
(self.GetPath(), self.offset, self.offset + self.size,
self.size, len(data)))
return data[self.offset:self.offset + self.size]

View File

@ -9,12 +9,11 @@ import os
import sys
import unittest
import entry
import fdt
import fdt_util
import tools
entry = None
class TestEntry(unittest.TestCase):
def setUp(self):
tools.PrepareOutputDir(None)
@ -29,16 +28,7 @@ class TestEntry(unittest.TestCase):
dtb = fdt.FdtScan(fname)
return dtb.GetNode('/binman/u-boot')
def test1EntryNoImportLib(self):
"""Test that we can import Entry subclassess successfully"""
sys.modules['importlib'] = None
global entry
import entry
entry.Entry.Create(None, self.GetNode(), 'u-boot')
def test2EntryImportLib(self):
del sys.modules['importlib']
def _ReloadEntry(self):
global entry
if entry:
if sys.version_info[0] >= 3:
@ -48,8 +38,21 @@ class TestEntry(unittest.TestCase):
reload(entry)
else:
import entry
def test1EntryNoImportLib(self):
"""Test that we can import Entry subclassess successfully"""
sys.modules['importlib'] = None
global entry
self._ReloadEntry()
entry.Entry.Create(None, self.GetNode(), 'u-boot')
self.assertFalse(entry.have_importlib)
def test2EntryImportLib(self):
del sys.modules['importlib']
global entry
self._ReloadEntry()
entry.Entry.Create(None, self.GetNode(), 'u-boot-spl')
del entry
self.assertTrue(entry.have_importlib)
def testEntryContents(self):
"""Test the Entry bass class"""
@ -59,7 +62,6 @@ class TestEntry(unittest.TestCase):
def testUnknownEntry(self):
"""Test that unknown entry types are detected"""
import entry
Node = collections.namedtuple('Node', ['name', 'path'])
node = Node('invalid-name', 'invalid-path')
with self.assertRaises(ValueError) as e:
@ -69,7 +71,6 @@ class TestEntry(unittest.TestCase):
def testUniqueName(self):
"""Test Entry.GetUniqueName"""
import entry
Node = collections.namedtuple('Node', ['name', 'parent'])
base_node = Node('root', None)
base_entry = entry.Entry(None, None, base_node, read_node=False)
@ -80,7 +81,6 @@ class TestEntry(unittest.TestCase):
def testGetDefaultFilename(self):
"""Trivial test for this base class function"""
import entry
base_entry = entry.Entry(None, None, None, read_node=False)
self.assertIsNone(base_entry.GetDefaultFilename())

View File

View File

@ -50,6 +50,8 @@ class Entry__testing(Entry):
'bad-update-contents')
self.return_contents_once = fdt_util.GetBool(self._node,
'return-contents-once')
self.bad_update_contents_twice = fdt_util.GetBool(self._node,
'bad-update-contents-twice')
# Set to True when the entry is ready to process the FDT.
self.process_fdt_ready = False
@ -71,11 +73,12 @@ class Entry__testing(Entry):
if self.force_bad_datatype:
self.GetEntryArgsOrProps([EntryArg('test-bad-datatype-arg', bool)])
self.return_contents = True
self.contents = b'a'
def ObtainContents(self):
if self.return_unknown_contents or not self.return_contents:
return False
self.data = b'a'
self.data = self.contents
self.contents_size = len(self.data)
if self.return_contents_once:
self.return_contents = False
@ -88,9 +91,14 @@ class Entry__testing(Entry):
def ProcessContents(self):
if self.bad_update_contents:
# Request to update the conents with something larger, to cause a
# Request to update the contents with something larger, to cause a
# failure.
self.ProcessContentsUpdate('aa')
if self.bad_update_contents_twice:
self.contents += b'a'
else:
self.contents = b'aa'
return self.ProcessContentsUpdate(self.contents)
return True
def ProcessFdt(self, fdt):
"""Force reprocessing the first time"""

View File

@ -9,6 +9,7 @@ from entry import Entry
import fdt_util
import state
import tools
import tout
class Entry_blob(Entry):
"""Entry containing an arbitrary binary blob
@ -33,8 +34,7 @@ class Entry_blob(Entry):
def __init__(self, section, etype, node):
Entry.__init__(self, section, etype, node)
self._filename = fdt_util.GetString(self._node, 'filename', self.etype)
self._compress = fdt_util.GetString(self._node, 'compress', 'none')
self._uncompressed_size = None
self.compress = fdt_util.GetString(self._node, 'compress', 'none')
def ObtainContents(self):
self._filename = self.GetDefaultFilename()
@ -42,37 +42,40 @@ class Entry_blob(Entry):
self.ReadBlobContents()
return True
def ReadBlobContents(self):
# We assume the data is small enough to fit into memory. If this
# is used for large filesystem image that might not be true.
# In that case, Image.BuildImage() could be adjusted to use a
# new Entry method which can read in chunks. Then we could copy
# the data in chunks and avoid reading it all at once. For now
# this seems like an unnecessary complication.
data = tools.ReadFile(self._pathname)
if self._compress == 'lz4':
self._uncompressed_size = len(data)
'''
import lz4 # Import this only if needed (python-lz4 dependency)
def CompressData(self, indata):
if self.compress != 'none':
self.uncomp_size = len(indata)
data = tools.Compress(indata, self.compress)
return data
try:
data = lz4.frame.compress(data)
except AttributeError:
data = lz4.compress(data)
'''
data = tools.Run('lz4', '-c', self._pathname, binary=True)
def ReadBlobContents(self):
"""Read blob contents into memory
This function compresses the data before storing if needed.
We assume the data is small enough to fit into memory. If this
is used for large filesystem image that might not be true.
In that case, Image.BuildImage() could be adjusted to use a
new Entry method which can read in chunks. Then we could copy
the data in chunks and avoid reading it all at once. For now
this seems like an unnecessary complication.
"""
indata = tools.ReadFile(self._pathname)
data = self.CompressData(indata)
self.SetContents(data)
return True
def GetDefaultFilename(self):
return self._filename
def AddMissingProperties(self):
Entry.AddMissingProperties(self)
if self._compress != 'none':
state.AddZeroProp(self._node, 'uncomp-size')
def SetCalculatedProperties(self):
Entry.SetCalculatedProperties(self)
if self._uncompressed_size is not None:
state.SetInt(self._node, 'uncomp-size', self._uncompressed_size)
def ReadData(self, decomp=True):
indata = Entry.ReadData(self, decomp)
if decomp:
data = tools.Decompress(indata, self.compress)
if self.uncomp_size:
tout.Info("%s: Decompressing data size %#x with algo '%s' to data size %#x" %
(self.GetPath(), len(indata), self.compress,
len(data)))
else:
data = indata
return data

View File

@ -23,11 +23,11 @@ class Entry_blob_dtb(Entry_blob):
def ObtainContents(self):
"""Get the device-tree from the list held by the 'state' module"""
self._filename = self.GetDefaultFilename()
self._pathname, data = state.GetFdtContents(self._filename)
self.SetContents(data)
return True
self._pathname, _ = state.GetFdtContents(self._filename)
return Entry_blob.ReadBlobContents(self)
def ProcessContents(self):
"""Re-read the DTB contents so that we get any calculated properties"""
_, data = state.GetFdtContents(self._filename)
self.SetContents(data)
_, indata = state.GetFdtContents(self._filename)
data = self.CompressData(indata)
return self.ProcessContentsUpdate(data)

263
tools/binman/etype/cbfs.py Normal file
View File

@ -0,0 +1,263 @@
# SPDX-License-Identifier: GPL-2.0+
# Copyright 2019 Google LLC
# Written by Simon Glass <sjg@chromium.org>
#
# Entry-type module for a Coreboot Filesystem (CBFS)
#
from collections import OrderedDict
import cbfs_util
from cbfs_util import CbfsWriter
from entry import Entry
import fdt_util
import state
class Entry_cbfs(Entry):
"""Entry containing a Coreboot Filesystem (CBFS)
A CBFS provides a way to group files into a group. It has a simple directory
structure and allows the position of individual files to be set, since it is
designed to support execute-in-place in an x86 SPI-flash device. Where XIP
is not used, it supports compression and storing ELF files.
CBFS is used by coreboot as its way of orgnanising SPI-flash contents.
The contents of the CBFS are defined by subnodes of the cbfs entry, e.g.:
cbfs {
size = <0x100000>;
u-boot {
cbfs-type = "raw";
};
u-boot-dtb {
cbfs-type = "raw";
};
};
This creates a CBFS 1MB in size two files in it: u-boot.bin and u-boot.dtb.
Note that the size is required since binman does not support calculating it.
The contents of each entry is just what binman would normally provide if it
were not a CBFS node. A blob type can be used to import arbitrary files as
with the second subnode below:
cbfs {
size = <0x100000>;
u-boot {
cbfs-name = "BOOT";
cbfs-type = "raw";
};
dtb {
type = "blob";
filename = "u-boot.dtb";
cbfs-type = "raw";
cbfs-compress = "lz4";
cbfs-offset = <0x100000>;
};
};
This creates a CBFS 1MB in size with u-boot.bin (named "BOOT") and
u-boot.dtb (named "dtb") and compressed with the lz4 algorithm.
Properties supported in the top-level CBFS node:
cbfs-arch:
Defaults to "x86", but you can specify the architecture if needed.
Properties supported in the CBFS entry subnodes:
cbfs-name:
This is the name of the file created in CBFS. It defaults to the entry
name (which is the node name), but you can override it with this
property.
cbfs-type:
This is the CBFS file type. The following are supported:
raw:
This is a 'raw' file, although compression is supported. It can be
used to store any file in CBFS.
stage:
This is an ELF file that has been loaded (i.e. mapped to memory), so
appears in the CBFS as a flat binary. The input file must be an ELF
image, for example this puts "u-boot" (the ELF image) into a 'stage'
entry:
cbfs {
size = <0x100000>;
u-boot-elf {
cbfs-name = "BOOT";
cbfs-type = "stage";
};
};
You can use your own ELF file with something like:
cbfs {
size = <0x100000>;
something {
type = "blob";
filename = "cbfs-stage.elf";
cbfs-type = "stage";
};
};
As mentioned, the file is converted to a flat binary, so it is
equivalent to adding "u-boot.bin", for example, but with the load and
start addresses specified by the ELF. At present there is no option
to add a flat binary with a load/start address, similar to the
'add-flat-binary' option in cbfstool.
cbfs-offset:
This is the offset of the file's data within the CBFS. It is used to
specify where the file should be placed in cases where a fixed position
is needed. Typical uses are for code which is not relocatable and must
execute in-place from a particular address. This works because SPI flash
is generally mapped into memory on x86 devices. The file header is
placed before this offset so that the data start lines up exactly with
the chosen offset. If this property is not provided, then the file is
placed in the next available spot.
The current implementation supports only a subset of CBFS features. It does
not support other file types (e.g. payload), adding multiple files (like the
'files' entry with a pattern supported by binman), putting files at a
particular offset in the CBFS and a few other things.
Of course binman can create images containing multiple CBFSs, simply by
defining these in the binman config:
binman {
size = <0x800000>;
cbfs {
offset = <0x100000>;
size = <0x100000>;
u-boot {
cbfs-type = "raw";
};
u-boot-dtb {
cbfs-type = "raw";
};
};
cbfs2 {
offset = <0x700000>;
size = <0x100000>;
u-boot {
cbfs-type = "raw";
};
u-boot-dtb {
cbfs-type = "raw";
};
image {
type = "blob";
filename = "image.jpg";
};
};
};
This creates an 8MB image with two CBFSs, one at offset 1MB, one at 7MB,
both of size 1MB.
"""
def __init__(self, section, etype, node):
Entry.__init__(self, section, etype, node)
self._cbfs_arg = fdt_util.GetString(node, 'cbfs-arch', 'x86')
self._cbfs_entries = OrderedDict()
self._ReadSubnodes()
def ObtainContents(self):
arch = cbfs_util.find_arch(self._cbfs_arg)
if arch is None:
self.Raise("Invalid architecture '%s'" % self._cbfs_arg)
if self.size is None:
self.Raise("'cbfs' entry must have a size property")
cbfs = CbfsWriter(self.size, arch)
for entry in self._cbfs_entries.values():
# First get the input data and put it in a file. If not available,
# try later.
if not entry.ObtainContents():
return False
data = entry.GetData()
cfile = None
if entry._type == 'raw':
cfile = cbfs.add_file_raw(entry._cbfs_name, data,
entry._cbfs_offset,
entry._cbfs_compress)
elif entry._type == 'stage':
cfile = cbfs.add_file_stage(entry._cbfs_name, data,
entry._cbfs_offset)
else:
entry.Raise("Unknown cbfs-type '%s' (use 'raw', 'stage')" %
entry._type)
if cfile:
entry._cbfs_file = cfile
data = cbfs.get_data()
self.SetContents(data)
return True
def _ReadSubnodes(self):
"""Read the subnodes to find out what should go in this IFWI"""
for node in self._node.subnodes:
entry = Entry.Create(self.section, node)
entry._cbfs_name = fdt_util.GetString(node, 'cbfs-name', entry.name)
entry._type = fdt_util.GetString(node, 'cbfs-type')
compress = fdt_util.GetString(node, 'cbfs-compress', 'none')
entry._cbfs_offset = fdt_util.GetInt(node, 'cbfs-offset')
entry._cbfs_compress = cbfs_util.find_compress(compress)
if entry._cbfs_compress is None:
self.Raise("Invalid compression in '%s': '%s'" %
(node.name, compress))
self._cbfs_entries[entry._cbfs_name] = entry
def SetImagePos(self, image_pos):
"""Override this function to set all the entry properties from CBFS
We can only do this once image_pos is known
Args:
image_pos: Position of this entry in the image
"""
Entry.SetImagePos(self, image_pos)
# Now update the entries with info from the CBFS entries
for entry in self._cbfs_entries.values():
cfile = entry._cbfs_file
entry.size = cfile.data_len
entry.offset = cfile.calced_cbfs_offset
entry.image_pos = self.image_pos + entry.offset
if entry._cbfs_compress:
entry.uncomp_size = cfile.memlen
def AddMissingProperties(self):
Entry.AddMissingProperties(self)
for entry in self._cbfs_entries.values():
entry.AddMissingProperties()
if entry._cbfs_compress:
state.AddZeroProp(entry._node, 'uncomp-size')
# Store the 'compress' property, since we don't look at
# 'cbfs-compress' in Entry.ReadData()
state.AddString(entry._node, 'compress',
cbfs_util.compress_name(entry._cbfs_compress))
def SetCalculatedProperties(self):
"""Set the value of device-tree properties calculated by binman"""
Entry.SetCalculatedProperties(self)
for entry in self._cbfs_entries.values():
state.SetInt(entry._node, 'offset', entry.offset)
state.SetInt(entry._node, 'size', entry.size)
state.SetInt(entry._node, 'image-pos', entry.image_pos)
if entry.uncomp_size is not None:
state.SetInt(entry._node, 'uncomp-size', entry.uncomp_size)
def ListEntries(self, entries, indent):
"""Override this method to list all files in the section"""
Entry.ListEntries(self, entries, indent)
for entry in self._cbfs_entries.values():
entry.ListEntries(entries, indent + 1)
def GetEntries(self):
return self._cbfs_entries

View File

@ -0,0 +1,130 @@
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2018 Google, Inc
# Written by Simon Glass <sjg@chromium.org>
"""# Entry-type module for a full map of the firmware image
This handles putting an FDT into the image with just the information about the
image.
"""
import libfdt
from entry import Entry
from fdt import Fdt
import state
import tools
FDTMAP_MAGIC = b'_FDTMAP_'
FDTMAP_HDR_LEN = 16
def LocateFdtmap(data):
"""Search an image for an fdt map
Args:
data: Data to search
Returns:
Position of fdt map in data, or None if not found. Note that the
position returned is of the FDT header, i.e. before the FDT data
"""
hdr_pos = data.find(FDTMAP_MAGIC)
size = len(data)
if hdr_pos != -1:
hdr = data[hdr_pos:hdr_pos + FDTMAP_HDR_LEN]
if len(hdr) == FDTMAP_HDR_LEN:
return hdr_pos
return None
class Entry_fdtmap(Entry):
"""An entry which contains an FDT map
Properties / Entry arguments:
None
An FDT map is just a header followed by an FDT containing a list of all the
entries in the image. The root node corresponds to the image node in the
original FDT, and an image-name property indicates the image name in that
original tree.
The header is the string _FDTMAP_ followed by 8 unused bytes.
When used, this entry will be populated with an FDT map which reflects the
entries in the current image. Hierarchy is preserved, and all offsets and
sizes are included.
Note that the -u option must be provided to ensure that binman updates the
FDT with the position of each entry.
Example output for a simple image with U-Boot and an FDT map:
/ {
size = <0x00000112>;
image-pos = <0x00000000>;
offset = <0x00000000>;
u-boot {
size = <0x00000004>;
image-pos = <0x00000000>;
offset = <0x00000000>;
};
fdtmap {
size = <0x0000010e>;
image-pos = <0x00000004>;
offset = <0x00000004>;
};
};
"""
def __init__(self, section, etype, node):
Entry.__init__(self, section, etype, node)
def _GetFdtmap(self):
"""Build an FDT map from the entries in the current image
Returns:
FDT map binary data
"""
def _AddNode(node):
"""Add a node to the FDT map"""
for pname, prop in node.props.items():
fsw.property(pname, prop.bytes)
for subnode in node.subnodes:
with fsw.add_node(subnode.name):
_AddNode(subnode)
# Get the FDT data into an Fdt object
data = state.GetFdtContents()[1]
infdt = Fdt.FromData(data)
infdt.Scan()
# Find the node for the image containing the Fdt-map entry
path = self.section.GetPath()
node = infdt.GetNode(path)
if not node:
self.Raise("Internal error: Cannot locate node for path '%s'" %
path)
# Build a new tree with all nodes and properties starting from that node
fsw = libfdt.FdtSw()
fsw.finish_reservemap()
with fsw.add_node(''):
_AddNode(node)
fdt = fsw.as_fdt()
# Pack this new FDT and return its contents
fdt.pack()
outfdt = Fdt.FromData(fdt.as_bytearray())
data = FDTMAP_MAGIC + tools.GetBytes(0, 8) + outfdt.GetContents()
return data
def ObtainContents(self):
"""Obtain a placeholder for the fdt-map contents"""
self.SetContents(self._GetFdtmap())
return True
def ProcessContents(self):
"""Write an updated version of the FDT map to this entry
This is necessary since new data may have been written back to it during
processing, e.g. the image-pos properties.
"""
return self.ProcessContentsUpdate(self._GetFdtmap())

View File

@ -14,7 +14,6 @@ import fdt_util
import state
import tools
import bsection
class Entry_files(Entry_section):
"""Entry containing a set of files
@ -54,4 +53,4 @@ class Entry_files(Entry_section):
state.AddString(subnode, 'compress', self._compress)
# Read entries again, now that we have some
self._section._ReadEntries()
self._ReadEntries()

View File

@ -49,7 +49,7 @@ class Entry_fmap(Entry):
areas.append(fmap_util.FmapArea(pos or 0, entry.size or 0,
tools.FromUnicode(entry.name), 0))
entries = self.section._image.GetEntries()
entries = self.section.image.GetEntries()
areas = []
for entry in entries.values():
_AddEntries(areas, entry)
@ -62,4 +62,4 @@ class Entry_fmap(Entry):
return True
def ProcessContents(self):
self.SetContents(self._GetFmap())
return self.ProcessContentsUpdate(self._GetFmap())

View File

@ -0,0 +1,99 @@
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2018 Google, Inc
# Written by Simon Glass <sjg@chromium.org>
"""Entry-type module for an image header which points to the FDT map
This creates an 8-byte entry with a magic number and the offset of the FDT map
(which is another entry in the image), relative to the start or end of the
image.
"""
import struct
from entry import Entry
import fdt_util
IMAGE_HEADER_MAGIC = b'BinM'
IMAGE_HEADER_LEN = 8
def LocateHeaderOffset(data):
"""Search an image for an image header
Args:
data: Data to search
Returns:
Offset of image header in the image, or None if not found
"""
hdr_pos = data.find(IMAGE_HEADER_MAGIC)
if hdr_pos != -1:
size = len(data)
hdr = data[hdr_pos:hdr_pos + IMAGE_HEADER_LEN]
if len(hdr) == IMAGE_HEADER_LEN:
offset = struct.unpack('<I', hdr[4:])[0]
if hdr_pos == len(data) - IMAGE_HEADER_LEN:
pos = size + offset - (1 << 32)
else:
pos = offset
return pos
return None
class Entry_image_header(Entry):
"""An entry which contains a pointer to the FDT map
Properties / Entry arguments:
location: Location of header ("start" or "end" of image). This is
optional. If omitted then the entry must have an offset property.
This adds an 8-byte entry to the start or end of the image, pointing to the
location of the FDT map. The format is a magic number followed by an offset
from the start or end of the image, in twos-compliment format.
This entry must be in the top-level part of the image.
NOTE: If the location is at the start/end, you will probably need to specify
sort-by-offset for the image, unless you actually put the image header
first/last in the entry list.
"""
def __init__(self, section, etype, node):
Entry.__init__(self, section, etype, node)
self.location = fdt_util.GetString(self._node, 'location')
def _GetHeader(self):
image_pos = self.GetSiblingImagePos('fdtmap')
if image_pos == False:
self.Raise("'image_header' section must have an 'fdtmap' sibling")
elif image_pos is None:
# This will be available when called from ProcessContents(), but not
# when called from ObtainContents()
offset = 0xffffffff
else:
image_size = self.section.GetImageSize() or 0
base = (0 if self.location != 'end' else image_size)
offset = (image_pos - base) & 0xffffffff
data = IMAGE_HEADER_MAGIC + struct.pack('<I', offset)
return data
def ObtainContents(self):
"""Obtain a placeholder for the header contents"""
self.SetContents(self._GetHeader())
return True
def Pack(self, offset):
"""Special pack method to set the offset to start/end of image"""
if not self.offset:
if self.location not in ['start', 'end']:
self.Raise("Invalid location '%s', expected 'start' or 'end'" %
self.location)
image_size = self.section.GetImageSize() or 0
self.offset = (0 if self.location != 'end' else image_size - 8)
return Entry.Pack(self, offset)
def ProcessContents(self):
"""Write an updated version of the FDT map to this entry
This is necessary since image_pos is not available when ObtainContents()
is called, since by then the entries have not been packed in the image.
"""
return self.ProcessContentsUpdate(self._GetHeader())

View File

@ -47,17 +47,25 @@ class Entry_intel_descriptor(Entry_blob):
def __init__(self, section, etype, node):
Entry_blob.__init__(self, section, etype, node)
self._regions = []
if self.offset is None:
self.offset = self.section.GetStartOffset()
def GetOffsets(self):
offset = self.data.find(FD_SIGNATURE)
if offset == -1:
self.Raise('Cannot find FD signature')
self.Raise('Cannot find Intel Flash Descriptor (FD) signature')
flvalsig, flmap0, flmap1, flmap2 = struct.unpack('<LLLL',
self.data[offset:offset + 16])
frba = ((flmap0 >> 16) & 0xff) << 4
for i in range(MAX_REGIONS):
self._regions.append(Region(self.data, frba, i))
# Set the offset for ME only, for now, since the others are not used
return {'intel-me': [self._regions[REGION_ME].base,
self._regions[REGION_ME].size]}
# Set the offset for ME (Management Engine) and IFWI (Integrated
# Firmware Image), for now, since the others are not used.
info = {}
if self.HasSibling('intel-me'):
info['intel-me'] = [self._regions[REGION_ME].base,
self._regions[REGION_ME].size]
if self.HasSibling('intel-ifwi'):
info['intel-ifwi'] = [self._regions[REGION_BIOS].base, None]
return info

View File

@ -0,0 +1,100 @@
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2016 Google, Inc
# Written by Simon Glass <sjg@chromium.org>
#
# Entry-type module for Intel Management Engine binary blob
#
from collections import OrderedDict
from entry import Entry
from blob import Entry_blob
import fdt_util
import tools
class Entry_intel_ifwi(Entry_blob):
"""Entry containing an Intel Integrated Firmware Image (IFWI) file
Properties / Entry arguments:
- filename: Filename of file to read into entry. This is either the
IFWI file itself, or a file that can be converted into one using a
tool
- convert-fit: If present this indicates that the ifwitool should be
used to convert the provided file into a IFWI.
This file contains code and data used by the SoC that is required to make
it work. It includes U-Boot TPL, microcode, things related to the CSE
(Converged Security Engine, the microcontroller that loads all the firmware)
and other items beyond the wit of man.
A typical filename is 'ifwi.bin' for an IFWI file, or 'fitimage.bin' for a
file that will be converted to an IFWI.
The position of this entry is generally set by the intel-descriptor entry.
The contents of the IFWI are specified by the subnodes of the IFWI node.
Each subnode describes an entry which is placed into the IFWFI with a given
sub-partition (and optional entry name).
See README.x86 for information about x86 binary blobs.
"""
def __init__(self, section, etype, node):
Entry_blob.__init__(self, section, etype, node)
self._convert_fit = fdt_util.GetBool(self._node, 'convert-fit')
self._ifwi_entries = OrderedDict()
self._ReadSubnodes()
def ObtainContents(self):
"""Get the contects for the IFWI
Unfortunately we cannot create anything from scratch here, as Intel has
tools which create precursor binaries with lots of data and settings,
and these are not incorporated into binman.
The first step is to get a file in the IFWI format. This is either
supplied directly or is extracted from a fitimage using the 'create'
subcommand.
After that we delete the OBBP sub-partition and add each of the files
that we want in the IFWI file, one for each sub-entry of the IWFI node.
"""
self._pathname = tools.GetInputFilename(self._filename)
# Create the IFWI file if needed
if self._convert_fit:
inname = self._pathname
outname = tools.GetOutputFilename('ifwi.bin')
tools.RunIfwiTool(inname, tools.CMD_CREATE, outname)
self._filename = 'ifwi.bin'
self._pathname = outname
else:
# Provide a different code path here to ensure we have test coverage
inname = self._pathname
# Delete OBBP if it is there, then add the required new items.
tools.RunIfwiTool(inname, tools.CMD_DELETE, subpart='OBBP')
for entry in self._ifwi_entries.values():
# First get the input data and put it in a file
if not entry.ObtainContents():
return False
data = entry.GetData()
uniq = self.GetUniqueName()
input_fname = tools.GetOutputFilename('input.%s' % uniq)
tools.WriteFile(input_fname, data)
tools.RunIfwiTool(inname,
tools.CMD_REPLACE if entry._ifwi_replace else tools.CMD_ADD,
input_fname, entry._ifwi_subpart, entry._ifwi_entry_name)
self.ReadBlobContents()
return True
def _ReadSubnodes(self):
"""Read the subnodes to find out what should go in this IFWI"""
for node in self._node.subnodes:
entry = Entry.Create(self.section, node)
entry._ifwi_replace = fdt_util.GetBool(node, 'replace')
entry._ifwi_subpart = fdt_util.GetString(node, 'ifwi-subpart')
entry._ifwi_entry_name = fdt_util.GetString(node, 'ifwi-entry')
self._ifwi_entries[entry._ifwi_subpart] = entry

View File

@ -22,6 +22,8 @@ class Entry_intel_me(Entry_blob):
A typical filename is 'me.bin'.
The position of this entry is generally set by the intel-descriptor entry.
See README.x86 for information about x86 binary blobs.
"""
def __init__(self, section, etype, node):

View File

@ -1,59 +1,155 @@
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2018 Google, Inc
# Written by Simon Glass <sjg@chromium.org>
#
# Entry-type module for sections, which are entries which can contain other
# entries.
#
"""Entry-type module for sections (groups of entries)
Sections are entries which can contain other entries. This allows hierarchical
images to be created.
"""
from __future__ import print_function
from collections import OrderedDict
import re
import sys
from entry import Entry
import fdt_util
import tools
import bsection
class Entry_section(Entry):
"""Entry that contains other entries
Properties / Entry arguments: (see binman README for more information)
- size: Size of section in bytes
- align-size: Align size to a particular power of two
- pad-before: Add padding before the entry
- pad-after: Add padding after the entry
- pad-byte: Pad byte to use when padding
- sort-by-offset: Reorder the entries by offset
- end-at-4gb: Used to build an x86 ROM which ends at 4GB (2^32)
- name-prefix: Adds a prefix to the name of every entry in the section
pad-byte: Pad byte to use when padding
sort-by-offset: True if entries should be sorted by offset, False if
they must be in-order in the device tree description
end-at-4gb: Used to build an x86 ROM which ends at 4GB (2^32)
skip-at-start: Number of bytes before the first entry starts. These
effectively adjust the starting offset of entries. For example,
if this is 16, then the first entry would start at 16. An entry
with offset = 20 would in fact be written at offset 4 in the image
file, since the first 16 bytes are skipped when writing.
name-prefix: Adds a prefix to the name of every entry in the section
when writing out the map
Since a section is also an entry, it inherits all the properies of entries
too.
A section is an entry which can contain other entries, thus allowing
hierarchical images to be created. See 'Sections and hierarchical images'
in the binman README for more information.
"""
def __init__(self, section, etype, node):
Entry.__init__(self, section, etype, node)
self._section = bsection.Section(node.name, section, node,
section._image)
def __init__(self, section, etype, node, test=False):
if not test:
Entry.__init__(self, section, etype, node)
if section:
self.image = section.image
self._entries = OrderedDict()
self._pad_byte = 0
self._sort = False
self._skip_at_start = None
self._end_4gb = False
if not test:
self._ReadNode()
self._ReadEntries()
def _Raise(self, msg):
"""Raises an error for this section
Args:
msg: Error message to use in the raise string
Raises:
ValueError()
"""
raise ValueError("Section '%s': %s" % (self._node.path, msg))
def _ReadNode(self):
"""Read properties from the image node"""
self._pad_byte = fdt_util.GetInt(self._node, 'pad-byte', 0)
self._sort = fdt_util.GetBool(self._node, 'sort-by-offset')
self._end_4gb = fdt_util.GetBool(self._node, 'end-at-4gb')
self._skip_at_start = fdt_util.GetInt(self._node, 'skip-at-start')
if self._end_4gb:
if not self.size:
self.Raise("Section size must be provided when using end-at-4gb")
if self._skip_at_start is not None:
self.Raise("Provide either 'end-at-4gb' or 'skip-at-start'")
else:
self._skip_at_start = 0x100000000 - self.size
else:
if self._skip_at_start is None:
self._skip_at_start = 0
self._name_prefix = fdt_util.GetString(self._node, 'name-prefix')
filename = fdt_util.GetString(self._node, 'filename')
if filename:
self._filename = filename
def _ReadEntries(self):
for node in self._node.subnodes:
if node.name == 'hash':
continue
entry = Entry.Create(self, node)
entry.SetPrefix(self._name_prefix)
self._entries[node.name] = entry
def GetFdtSet(self):
return self._section.GetFdtSet()
fdt_set = set()
for entry in self._entries.values():
fdt_set.update(entry.GetFdtSet())
return fdt_set
def ProcessFdt(self, fdt):
return self._section.ProcessFdt(fdt)
"""Allow entries to adjust the device tree
Some entries need to adjust the device tree for their purposes. This
may involve adding or deleting properties.
"""
todo = self._entries.values()
for passnum in range(3):
next_todo = []
for entry in todo:
if not entry.ProcessFdt(fdt):
next_todo.append(entry)
todo = next_todo
if not todo:
break
if todo:
self.Raise('Internal error: Could not complete processing of Fdt: remaining %s' %
todo)
return True
def ExpandEntries(self):
"""Expand out any entries which have calculated sub-entries
Some entries are expanded out at runtime, e.g. 'files', which produces
a section containing a list of files. Process these entries so that
this information is added to the device tree.
"""
Entry.ExpandEntries(self)
self._section.ExpandEntries()
for entry in self._entries.values():
entry.ExpandEntries()
def AddMissingProperties(self):
"""Add new properties to the device tree as needed for this entry"""
Entry.AddMissingProperties(self)
self._section.AddMissingProperties()
for entry in self._entries.values():
entry.AddMissingProperties()
def ObtainContents(self):
return self._section.GetEntryContents()
return self.GetEntryContents()
def GetData(self):
return self._section.GetData()
section_data = tools.GetBytes(self._pad_byte, self.size)
for entry in self._entries.values():
data = entry.GetData()
base = self.pad_before + entry.offset - self._skip_at_start
section_data = (section_data[:base] + data +
section_data[base + len(data):])
return section_data
def GetOffsets(self):
"""Handle entries that want to set the offset/size of other entries
@ -61,35 +157,94 @@ class Entry_section(Entry):
This calls each entry's GetOffsets() method. If it returns a list
of entries to update, it updates them.
"""
self._section.GetEntryOffsets()
self.GetEntryOffsets()
return {}
def ResetForPack(self):
"""Reset offset/size fields so that packing can be done again"""
Entry.ResetForPack(self)
for entry in self._entries.values():
entry.ResetForPack()
def Pack(self, offset):
"""Pack all entries into the section"""
self._section.PackEntries()
if self._section._offset is None:
self._section.SetOffset(offset)
self.size = self._section.GetSize()
return super(Entry_section, self).Pack(offset)
self._PackEntries()
return Entry.Pack(self, offset)
def SetImagePos(self, image_pos):
Entry.SetImagePos(self, image_pos)
self._section.SetImagePos(image_pos + self.offset)
def _PackEntries(self):
"""Pack all entries into the image"""
offset = self._skip_at_start
for entry in self._entries.values():
offset = entry.Pack(offset)
self.size = self.CheckSize()
def _ExpandEntries(self):
"""Expand any entries that are permitted to"""
exp_entry = None
for entry in self._entries.values():
if exp_entry:
exp_entry.ExpandToLimit(entry.offset)
exp_entry = None
if entry.expand_size:
exp_entry = entry
if exp_entry:
exp_entry.ExpandToLimit(self.size)
def _SortEntries(self):
"""Sort entries by offset"""
entries = sorted(self._entries.values(), key=lambda entry: entry.offset)
self._entries.clear()
for entry in entries:
self._entries[entry._node.name] = entry
def CheckEntries(self):
"""Check that entries do not overlap or extend outside the image"""
if self._sort:
self._SortEntries()
self._ExpandEntries()
offset = 0
prev_name = 'None'
for entry in self._entries.values():
entry.CheckOffset()
if (entry.offset < self._skip_at_start or
entry.offset + entry.size > self._skip_at_start +
self.size):
entry.Raise("Offset %#x (%d) is outside the section starting "
"at %#x (%d)" %
(entry.offset, entry.offset, self._skip_at_start,
self._skip_at_start))
if entry.offset < offset:
entry.Raise("Offset %#x (%d) overlaps with previous entry '%s' "
"ending at %#x (%d)" %
(entry.offset, entry.offset, prev_name, offset, offset))
offset = entry.offset + entry.size
prev_name = entry.GetPath()
def WriteSymbols(self, section):
"""Write symbol values into binary files for access at run time"""
self._section.WriteSymbols()
for entry in self._entries.values():
entry.WriteSymbols(self)
def SetCalculatedProperties(self):
Entry.SetCalculatedProperties(self)
self._section.SetCalculatedProperties()
for entry in self._entries.values():
entry.SetCalculatedProperties()
def SetImagePos(self, image_pos):
Entry.SetImagePos(self, image_pos)
for entry in self._entries.values():
entry.SetImagePos(image_pos + self.offset)
def ProcessContents(self):
self._section.ProcessEntryContents()
super(Entry_section, self).ProcessContents()
sizes_ok_base = super(Entry_section, self).ProcessContents()
sizes_ok = True
for entry in self._entries.values():
if not entry.ProcessContents():
sizes_ok = False
return sizes_ok and sizes_ok_base
def CheckOffset(self):
self._section.CheckEntries()
self.CheckEntries()
def WriteMap(self, fd, indent):
"""Write a map of the section to a .map file
@ -97,11 +252,211 @@ class Entry_section(Entry):
Args:
fd: File to write the map to
"""
self._section.WriteMap(fd, indent)
Entry.WriteMapLine(fd, indent, self.name, self.offset or 0,
self.size, self.image_pos)
for entry in self._entries.values():
entry.WriteMap(fd, indent + 1)
def GetEntries(self):
return self._section.GetEntries()
return self._entries
def ExpandToLimit(self, limit):
super(Entry_section, self).ExpandToLimit(limit)
self._section.ExpandSize(self.size)
def GetContentsByPhandle(self, phandle, source_entry):
"""Get the data contents of an entry specified by a phandle
This uses a phandle to look up a node and and find the entry
associated with it. Then it returnst he contents of that entry.
Args:
phandle: Phandle to look up (integer)
source_entry: Entry containing that phandle (used for error
reporting)
Returns:
data from associated entry (as a string), or None if not found
"""
node = self._node.GetFdt().LookupPhandle(phandle)
if not node:
source_entry.Raise("Cannot find node for phandle %d" % phandle)
for entry in self._entries.values():
if entry._node == node:
return entry.GetData()
source_entry.Raise("Cannot find entry for node '%s'" % node.name)
def LookupSymbol(self, sym_name, optional, msg):
"""Look up a symbol in an ELF file
Looks up a symbol in an ELF file. Only entry types which come from an
ELF image can be used by this function.
At present the only entry property supported is offset.
Args:
sym_name: Symbol name in the ELF file to look up in the format
_binman_<entry>_prop_<property> where <entry> is the name of
the entry and <property> is the property to find (e.g.
_binman_u_boot_prop_offset). As a special case, you can append
_any to <entry> to have it search for any matching entry. E.g.
_binman_u_boot_any_prop_offset will match entries called u-boot,
u-boot-img and u-boot-nodtb)
optional: True if the symbol is optional. If False this function
will raise if the symbol is not found
msg: Message to display if an error occurs
Returns:
Value that should be assigned to that symbol, or None if it was
optional and not found
Raises:
ValueError if the symbol is invalid or not found, or references a
property which is not supported
"""
m = re.match(r'^_binman_(\w+)_prop_(\w+)$', sym_name)
if not m:
raise ValueError("%s: Symbol '%s' has invalid format" %
(msg, sym_name))
entry_name, prop_name = m.groups()
entry_name = entry_name.replace('_', '-')
entry = self._entries.get(entry_name)
if not entry:
if entry_name.endswith('-any'):
root = entry_name[:-4]
for name in self._entries:
if name.startswith(root):
rest = name[len(root):]
if rest in ['', '-img', '-nodtb']:
entry = self._entries[name]
if not entry:
err = ("%s: Entry '%s' not found in list (%s)" %
(msg, entry_name, ','.join(self._entries.keys())))
if optional:
print('Warning: %s' % err, file=sys.stderr)
return None
raise ValueError(err)
if prop_name == 'offset':
return entry.offset
elif prop_name == 'image_pos':
return entry.image_pos
else:
raise ValueError("%s: No such property '%s'" % (msg, prop_name))
def GetRootSkipAtStart(self):
"""Get the skip-at-start value for the top-level section
This is used to find out the starting offset for root section that
contains this section. If this is a top-level section then it returns
the skip-at-start offset for this section.
This is used to get the absolute position of section within the image.
Returns:
Integer skip-at-start value for the root section containing this
section
"""
if self.section:
return self.section.GetRootSkipAtStart()
return self._skip_at_start
def GetStartOffset(self):
"""Get the start offset for this section
Returns:
The first available offset in this section (typically 0)
"""
return self._skip_at_start
def GetImageSize(self):
"""Get the size of the image containing this section
Returns:
Image size as an integer number of bytes, which may be None if the
image size is dynamic and its sections have not yet been packed
"""
return self.image.size
def FindEntryType(self, etype):
"""Find an entry type in the section
Args:
etype: Entry type to find
Returns:
entry matching that type, or None if not found
"""
for entry in self._entries.values():
if entry.etype == etype:
return entry
return None
def GetEntryContents(self):
"""Call ObtainContents() for the section
"""
todo = self._entries.values()
for passnum in range(3):
next_todo = []
for entry in todo:
if not entry.ObtainContents():
next_todo.append(entry)
todo = next_todo
if not todo:
break
if todo:
self.Raise('Internal error: Could not complete processing of contents: remaining %s' %
todo)
return True
def _SetEntryOffsetSize(self, name, offset, size):
"""Set the offset and size of an entry
Args:
name: Entry name to update
offset: New offset, or None to leave alone
size: New size, or None to leave alone
"""
entry = self._entries.get(name)
if not entry:
self._Raise("Unable to set offset/size for unknown entry '%s'" %
name)
entry.SetOffsetSize(self._skip_at_start + offset if offset else None,
size)
def GetEntryOffsets(self):
"""Handle entries that want to set the offset/size of other entries
This calls each entry's GetOffsets() method. If it returns a list
of entries to update, it updates them.
"""
for entry in self._entries.values():
offset_dict = entry.GetOffsets()
for name, info in offset_dict.items():
self._SetEntryOffsetSize(name, *info)
def CheckSize(self):
"""Check that the image contents does not exceed its size, etc."""
contents_size = 0
for entry in self._entries.values():
contents_size = max(contents_size, entry.offset + entry.size)
contents_size -= self._skip_at_start
size = self.size
if not size:
size = self.pad_before + contents_size + self.pad_after
size = tools.Align(size, self.align_size)
if self.size and contents_size > self.size:
self._Raise("contents size %#x (%d) exceeds section size %#x (%d)" %
(contents_size, contents_size, self.size, self.size))
if not self.size:
self.size = size
if self.size != tools.Align(self.size, self.align_size):
self._Raise("Size %#x (%d) does not match align-size %#x (%d)" %
(self.size, self.size, self.align_size,
self.align_size))
return size
def ListEntries(self, entries, indent):
"""List the files in the section"""
Entry.AddEntryInfo(entries, indent, self.name, 'section', self.size,
self.image_pos, None, self.offset, self)
for entry in self._entries.values():
entry.ListEntries(entries, indent + 1)

View File

@ -22,6 +22,8 @@ class Entry_text(Entry):
that contains the string to place in the entry
<xxx> (actual name is the value of text-label): contains the string to
place in the entry.
<text>: The text to place in the entry (overrides the above mechanism).
This is useful when the text is constant.
Example node:
@ -44,15 +46,28 @@ class Entry_text(Entry):
message = "a message directly in the node"
};
or just:
text {
size = <8>;
text = "some text directly in the node"
};
The text is not itself nul-terminated. This can be achieved, if required,
by setting the size of the entry to something larger than the text.
"""
def __init__(self, section, etype, node):
Entry.__init__(self, section, etype, node)
label, = self.GetEntryArgsOrProps([EntryArg('text-label', str)])
self.text_label = tools.ToStr(label) if type(label) != str else label
value, = self.GetEntryArgsOrProps([EntryArg(self.text_label, str)])
value = tools.ToBytes(value) if value is not None else value
value = fdt_util.GetString(self._node, 'text')
if value:
value = tools.ToBytes(value)
else:
label, = self.GetEntryArgsOrProps([EntryArg('text-label', str)])
self.text_label = label
if self.text_label:
value, = self.GetEntryArgsOrProps([EntryArg(self.text_label,
str)])
value = tools.ToBytes(value) if value is not None else value
self.value = value
def ObtainContents(self):

View File

@ -12,7 +12,7 @@ class Entry_u_boot_spl_elf(Entry_blob):
"""U-Boot SPL ELF image
Properties / Entry arguments:
- filename: Filename of SPL u-boot (default 'spl/u-boot')
- filename: Filename of SPL u-boot (default 'spl/u-boot-spl')
This is the U-Boot SPL ELF image. It does not include a device tree but can
be relocated to any address for execution.

View File

@ -0,0 +1,24 @@
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2018 Google, Inc
# Written by Simon Glass <sjg@chromium.org>
#
# Entry-type module for U-Boot TPL ELF image
#
from entry import Entry
from blob import Entry_blob
class Entry_u_boot_tpl_elf(Entry_blob):
"""U-Boot TPL ELF image
Properties / Entry arguments:
- filename: Filename of TPL u-boot (default 'tpl/u-boot-tpl')
This is the U-Boot TPL ELF image. It does not include a device tree but can
be relocated to any address for execution.
"""
def __init__(self, section, etype, node):
Entry_blob.__init__(self, section, etype, node)
def GetDefaultFilename(self):
return 'tpl/u-boot-tpl'

View File

@ -49,7 +49,7 @@ class Entry_u_boot_with_ucode_ptr(Entry_blob):
def ProcessContents(self):
# If the image does not need microcode, there is nothing to do
if not self.target_offset:
return
return True
# Get the offset of the microcode
ucode_entry = self.section.FindEntryType('u-boot-ucode')
@ -91,6 +91,6 @@ class Entry_u_boot_with_ucode_ptr(Entry_blob):
# Write the microcode offset and size into the entry
offset_and_size = struct.pack('<2L', offset, size)
self.target_offset -= self.image_pos
self.ProcessContentsUpdate(self.data[:self.target_offset] +
offset_and_size +
self.data[self.target_offset + 8:])
return self.ProcessContentsUpdate(self.data[:self.target_offset] +
offset_and_size +
self.data[self.target_offset + 8:])

File diff suppressed because it is too large Load Diff

View File

@ -8,15 +8,21 @@
from __future__ import print_function
from collections import OrderedDict
import fnmatch
from operator import attrgetter
import re
import sys
from entry import Entry
from etype import fdtmap
from etype import image_header
from etype import section
import fdt
import fdt_util
import bsection
import tools
import tout
class Image:
class Image(section.Entry_section):
"""A Image, representing an output from binman
An image is comprised of a collection of entries each containing binary
@ -24,12 +30,8 @@ class Image:
This class implements the various operations needed for images.
Atrtributes:
_node: Node object that contains the image definition in device tree
_name: Image name
_size: Image size in bytes, or None if not known yet
_filename: Output filename for image
_sections: Sections present in this image (may be one or more)
Attributes:
filename: Output filename for image
Args:
test: True if this is being called from a test of Images. This this case
@ -37,106 +39,94 @@ class Image:
we create a section manually.
"""
def __init__(self, name, node, test=False):
self._node = node
self._name = name
self._size = None
self._filename = '%s.bin' % self._name
if test:
self._section = bsection.Section('main-section', None, self._node,
self, True)
else:
self._ReadNode()
self.image = self
section.Entry_section.__init__(self, None, 'section', node, test)
self.name = 'main-section'
self.image_name = name
self._filename = '%s.bin' % self.image_name
if not test:
filename = fdt_util.GetString(self._node, 'filename')
if filename:
self._filename = filename
def _ReadNode(self):
"""Read properties from the image node"""
self._size = fdt_util.GetInt(self._node, 'size')
filename = fdt_util.GetString(self._node, 'filename')
if filename:
self._filename = filename
self._section = bsection.Section('main-section', None, self._node, self)
@classmethod
def FromFile(cls, fname):
"""Convert an image file into an Image for use in binman
def GetFdtSet(self):
"""Get the set of device tree files used by this image"""
return self._section.GetFdtSet()
Args:
fname: Filename of image file to read
def ExpandEntries(self):
"""Expand out any entries which have calculated sub-entries
Returns:
Image object on success
Some entries are expanded out at runtime, e.g. 'files', which produces
a section containing a list of files. Process these entries so that
this information is added to the device tree.
Raises:
ValueError if something goes wrong
"""
self._section.ExpandEntries()
data = tools.ReadFile(fname)
size = len(data)
def AddMissingProperties(self):
"""Add properties that are not present in the device tree
# First look for an image header
pos = image_header.LocateHeaderOffset(data)
if pos is None:
# Look for the FDT map
pos = fdtmap.LocateFdtmap(data)
if pos is None:
raise ValueError('Cannot find FDT map in image')
When binman has completed packing the entries the offset and size of
each entry are known. But before this the device tree may not specify
these. Add any missing properties, with a dummy value, so that the
size of the entry is correct. That way we can insert the correct values
later.
"""
self._section.AddMissingProperties()
# We don't know the FDT size, so check its header first
probe_dtb = fdt.Fdt.FromData(
data[pos + fdtmap.FDTMAP_HDR_LEN:pos + 256])
dtb_size = probe_dtb.GetFdtObj().totalsize()
fdtmap_data = data[pos:pos + dtb_size + fdtmap.FDTMAP_HDR_LEN]
dtb = fdt.Fdt.FromData(fdtmap_data[fdtmap.FDTMAP_HDR_LEN:])
dtb.Scan()
def ProcessFdt(self, fdt):
"""Allow entries to adjust the device tree
# Return an Image with the associated nodes
image = Image('image', dtb.GetRoot())
image._data = data
return image
Some entries need to adjust the device tree for their purposes. This
may involve adding or deleting properties.
"""
return self._section.ProcessFdt(fdt)
def GetEntryContents(self):
"""Call ObtainContents() for the section
"""
self._section.GetEntryContents()
def GetEntryOffsets(self):
"""Handle entries that want to set the offset/size of other entries
This calls each entry's GetOffsets() method. If it returns a list
of entries to update, it updates them.
"""
self._section.GetEntryOffsets()
def Raise(self, msg):
"""Convenience function to raise an error referencing an image"""
raise ValueError("Image '%s': %s" % (self._node.path, msg))
def PackEntries(self):
"""Pack all entries into the image"""
self._section.PackEntries()
def CheckSize(self):
"""Check that the image contents does not exceed its size, etc."""
self._size = self._section.CheckSize()
def CheckEntries(self):
"""Check that entries do not overlap or extend outside the image"""
self._section.CheckEntries()
def SetCalculatedProperties(self):
self._section.SetCalculatedProperties()
section.Entry_section.Pack(self, 0)
def SetImagePos(self):
self._section.SetImagePos(0)
# This first section in the image so it starts at 0
section.Entry_section.SetImagePos(self, 0)
def ProcessEntryContents(self):
"""Call the ProcessContents() method for each entry
This is intended to adjust the contents as needed by the entry type.
Returns:
True if the new data size is OK, False if expansion is needed
"""
self._section.ProcessEntryContents()
sizes_ok = True
for entry in self._entries.values():
if not entry.ProcessContents():
sizes_ok = False
tout.Debug("Entry '%s' size change" % self._node.path)
return sizes_ok
def WriteSymbols(self):
"""Write symbol values into binary files for access at run time"""
self._section.WriteSymbols()
section.Entry_section.WriteSymbols(self, self)
def BuildSection(self, fd, base_offset):
"""Write the section to a file"""
fd.seek(base_offset)
fd.write(self.GetData())
def BuildImage(self):
"""Write the image to a file"""
fname = tools.GetOutputFilename(self._filename)
with open(fname, 'wb') as fd:
self._section.BuildSection(fd, 0)
def GetEntries(self):
return self._section.GetEntries()
self.BuildSection(fd, 0)
def WriteMap(self):
"""Write a map of the image to a .map file
@ -144,10 +134,169 @@ class Image:
Returns:
Filename of map file written
"""
filename = '%s.map' % self._name
filename = '%s.map' % self.image_name
fname = tools.GetOutputFilename(filename)
with open(fname, 'w') as fd:
print('%8s %8s %8s %s' % ('ImagePos', 'Offset', 'Size', 'Name'),
file=fd)
self._section.WriteMap(fd, 0)
section.Entry_section.WriteMap(self, fd, 0)
return fname
def BuildEntryList(self):
"""List the files in an image
Returns:
List of entry.EntryInfo objects describing all entries in the image
"""
entries = []
self.ListEntries(entries, 0)
return entries
def FindEntryPath(self, entry_path):
"""Find an entry at a given path in the image
Args:
entry_path: Path to entry (e.g. /ro-section/u-boot')
Returns:
Entry object corresponding to that past
Raises:
ValueError if no entry found
"""
parts = entry_path.split('/')
entries = self.GetEntries()
parent = '/'
for part in parts:
entry = entries.get(part)
if not entry:
raise ValueError("Entry '%s' not found in '%s'" %
(part, parent))
parent = entry.GetPath()
entries = entry.GetEntries()
return entry
def ReadData(self, decomp=True):
return self._data
def GetListEntries(self, entry_paths):
"""List the entries in an image
This decodes the supplied image and returns a list of entries from that
image, preceded by a header.
Args:
entry_paths: List of paths to match (each can have wildcards). Only
entries whose names match one of these paths will be printed
Returns:
String error message if something went wrong, otherwise
3-Tuple:
List of EntryInfo objects
List of lines, each
List of text columns, each a string
List of widths of each column
"""
def _EntryToStrings(entry):
"""Convert an entry to a list of strings, one for each column
Args:
entry: EntryInfo object containing information to output
Returns:
List of strings, one for each field in entry
"""
def _AppendHex(val):
"""Append a hex value, or an empty string if val is None
Args:
val: Integer value, or None if none
"""
args.append('' if val is None else '>%x' % val)
args = [' ' * entry.indent + entry.name]
_AppendHex(entry.image_pos)
_AppendHex(entry.size)
args.append(entry.etype)
_AppendHex(entry.offset)
_AppendHex(entry.uncomp_size)
return args
def _DoLine(lines, line):
"""Add a line to the output list
This adds a line (a list of columns) to the output list. It also updates
the widths[] array with the maximum width of each column
Args:
lines: List of lines to add to
line: List of strings, one for each column
"""
for i, item in enumerate(line):
widths[i] = max(widths[i], len(item))
lines.append(line)
def _NameInPaths(fname, entry_paths):
"""Check if a filename is in a list of wildcarded paths
Args:
fname: Filename to check
entry_paths: List of wildcarded paths (e.g. ['*dtb*', 'u-boot*',
'section/u-boot'])
Returns:
True if any wildcard matches the filename (using Unix filename
pattern matching, not regular expressions)
False if not
"""
for path in entry_paths:
if fnmatch.fnmatch(fname, path):
return True
return False
entries = self.BuildEntryList()
# This is our list of lines. Each item in the list is a list of strings, one
# for each column
lines = []
HEADER = ['Name', 'Image-pos', 'Size', 'Entry-type', 'Offset',
'Uncomp-size']
num_columns = len(HEADER)
# This records the width of each column, calculated as the maximum width of
# all the strings in that column
widths = [0] * num_columns
_DoLine(lines, HEADER)
# We won't print anything unless it has at least this indent. So at the
# start we will print nothing, unless a path matches (or there are no
# entry paths)
MAX_INDENT = 100
min_indent = MAX_INDENT
path_stack = []
path = ''
indent = 0
selected_entries = []
for entry in entries:
if entry.indent > indent:
path_stack.append(path)
elif entry.indent < indent:
path_stack.pop()
if path_stack:
path = path_stack[-1] + '/' + entry.name
indent = entry.indent
# If there are entry paths to match and we are not looking at a
# sub-entry of a previously matched entry, we need to check the path
if entry_paths and indent <= min_indent:
if _NameInPaths(path[1:], entry_paths):
# Print this entry and all sub-entries (=higher indent)
min_indent = indent
else:
# Don't print this entry, nor any following entries until we get
# a path match
min_indent = MAX_INDENT
continue
_DoLine(lines, _EntryToStrings(entry))
selected_entries.append(entry)
return selected_entries, lines, widths

View File

@ -12,28 +12,25 @@ from test_util import capture_sys_output
class TestImage(unittest.TestCase):
def testInvalidFormat(self):
image = Image('name', 'node', test=True)
section = image._section
with self.assertRaises(ValueError) as e:
section.LookupSymbol('_binman_something_prop_', False, 'msg')
image.LookupSymbol('_binman_something_prop_', False, 'msg')
self.assertIn(
"msg: Symbol '_binman_something_prop_' has invalid format",
str(e.exception))
def testMissingSymbol(self):
image = Image('name', 'node', test=True)
section = image._section
section._entries = {}
image._entries = {}
with self.assertRaises(ValueError) as e:
section.LookupSymbol('_binman_type_prop_pname', False, 'msg')
image.LookupSymbol('_binman_type_prop_pname', False, 'msg')
self.assertIn("msg: Entry 'type' not found in list ()",
str(e.exception))
def testMissingSymbolOptional(self):
image = Image('name', 'node', test=True)
section = image._section
section._entries = {}
image._entries = {}
with capture_sys_output() as (stdout, stderr):
val = section.LookupSymbol('_binman_type_prop_pname', True, 'msg')
val = image.LookupSymbol('_binman_type_prop_pname', True, 'msg')
self.assertEqual(val, None)
self.assertEqual("Warning: msg: Entry 'type' not found in list ()\n",
stderr.getvalue())
@ -41,8 +38,7 @@ class TestImage(unittest.TestCase):
def testBadProperty(self):
image = Image('name', 'node', test=True)
section = image._section
section._entries = {'u-boot': 1}
image._entries = {'u-boot': 1}
with self.assertRaises(ValueError) as e:
section.LookupSymbol('_binman_u_boot_prop_bad', False, 'msg')
image.LookupSymbol('_binman_u_boot_prop_bad', False, 'msg')
self.assertIn("msg: No such property 'bad", str(e.exception))

View File

@ -31,6 +31,11 @@ fdt_subset = set()
# The DTB which contains the full image information
main_dtb = None
# Allow entries to expand after they have been packed. This is detected and
# forces a re-pack. If not allowed, any attempted expansion causes an error in
# Entry.ProcessContentsUpdate()
allow_entry_expansion = True
def GetFdt(fname):
"""Get the Fdt object for a particular device-tree filename
@ -59,7 +64,7 @@ def GetFdtPath(fname):
"""
return fdt_files[fname]._fname
def GetFdtContents(fname):
def GetFdtContents(fname='u-boot.dtb'):
"""Looks up the FDT pathname and contents
This is used to obtain the Fdt pathname and contents when needed by an
@ -250,3 +255,22 @@ def CheckSetHashValue(node, get_data_func):
data = m.digest()
for n in GetUpdateNodes(hash_node):
n.SetData('value', data)
def SetAllowEntryExpansion(allow):
"""Set whether post-pack expansion of entries is allowed
Args:
allow: True to allow expansion, False to raise an exception
"""
global allow_entry_expansion
allow_entry_expansion = allow
def AllowEntryExpansion():
"""Check whether post-pack expansion of entries is allowed
Returns:
True if expansion should be allowed, False if an exception should be
raised
"""
return allow_entry_expansion

View File

@ -24,5 +24,10 @@
text-label = "test-id4";
test-id4 = "some text";
};
/* Put text directly in the node */
text5 {
type = "text";
text = "more text";
};
};
};

View File

@ -10,5 +10,7 @@
};
u-boot-spl-elf {
};
u-boot-tpl-elf {
};
};
};

View File

@ -0,0 +1,20 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
cbfs {
size = <0xb0>;
u-boot {
cbfs-type = "raw";
};
u-boot-dtb {
cbfs-type = "raw";
};
};
};
};

View File

@ -0,0 +1,21 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
cbfs {
size = <0x100>;
cbfs-arch = "ppc64";
u-boot {
cbfs-type = "raw";
};
u-boot-dtb {
cbfs-type = "raw";
};
};
};
};

View File

@ -0,0 +1,19 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
cbfs {
size = <0xb0>;
u-boot {
type = "blob";
filename = "cbfs-stage.elf";
cbfs-type = "stage";
};
};
};
};

View File

@ -0,0 +1,26 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
cbfs {
size = <0x140>;
u-boot {
type = "text";
text = "compress xxxxxxxxxxxxxxxxxxxxxx data";
cbfs-type = "raw";
cbfs-compress = "lz4";
};
u-boot-dtb {
type = "text";
text = "compress xxxxxxxxxxxxxxxxxxxxxx data";
cbfs-type = "raw";
cbfs-compress = "lzma";
};
};
};
};

View File

@ -0,0 +1,15 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
cbfs {
size = <0x100>;
cbfs-arch = "bad-arch";
};
};
};

View File

@ -0,0 +1,13 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
cbfs {
};
};
};

View File

@ -0,0 +1,17 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
cbfs {
size = <0x100>;
_testing {
return-unknown-contents;
};
};
};
};

View File

@ -0,0 +1,18 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
cbfs {
size = <0xb0>;
u-boot {
cbfs-type = "raw";
cbfs-compress = "invalid-algo";
};
};
};
};

View File

@ -0,0 +1,24 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
cbfs {
size = <0x100>;
u-boot {
cbfs-name = "FRED";
cbfs-type = "raw";
};
hello {
type = "blob";
filename = "u-boot.dtb";
cbfs-type = "raw";
};
};
};
};

View File

@ -0,0 +1,29 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
sort-by-offset;
end-at-4gb;
size = <0x800000>;
intel-descriptor {
filename = "descriptor.bin";
};
intel-ifwi {
offset-unset;
filename = "fitimage.bin";
convert-fit;
u-boot-tpl {
replace;
ifwi-subpart = "IBBP";
ifwi-entry = "IBBL";
};
};
};
};

View File

@ -0,0 +1,28 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
sort-by-offset;
end-at-4gb;
size = <0x800000>;
intel-descriptor {
filename = "descriptor.bin";
};
intel-ifwi {
offset-unset;
filename = "ifwi.bin";
u-boot-tpl {
replace;
ifwi-subpart = "IBBP";
ifwi-entry = "IBBL";
};
};
};
};

View File

@ -0,0 +1,29 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
sort-by-offset;
end-at-4gb;
size = <0x800000>;
intel-descriptor {
filename = "descriptor.bin";
};
intel-ifwi {
offset-unset;
filename = "ifwi.bin";
_testing {
return-unknown-contents;
replace;
ifwi-subpart = "IBBP";
ifwi-entry = "IBBL";
};
};
};
};

View File

@ -0,0 +1,26 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
sort-by-offset;
end-at-4gb;
size = <0x200>;
cbfs {
size = <0x200>;
offset = <0xfffffe00>;
u-boot {
cbfs-offset = <0x40>;
cbfs-type = "raw";
};
u-boot-dtb {
cbfs-offset = <0x140>;
cbfs-type = "raw";
};
};
};
};

View File

@ -0,0 +1,13 @@
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
u-boot {
};
fdtmap {
};
};
};

View File

@ -0,0 +1,17 @@
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
size = <0x400>;
u-boot {
};
fdtmap {
};
image-header {
location = "end";
};
};
};

View File

@ -0,0 +1,19 @@
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
size = <0x400>;
sort-by-offset;
u-boot {
offset = <0x100>;
};
fdtmap {
};
image-header {
location = "start";
};
};
};

View File

@ -0,0 +1,19 @@
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
size = <0x400>;
sort-by-offset;
u-boot {
offset = <0x100>;
};
fdtmap {
};
image-header {
offset = <0x80>;
};
};
};

View File

@ -0,0 +1,16 @@
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
sort-by-offset;
u-boot {
};
image-header {
offset = <0x80>;
location = "start";
};
};
};

View File

@ -0,0 +1,16 @@
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
sort-by-offset;
u-boot {
};
fdtmap {
};
image-header {
};
};
};

View File

@ -0,0 +1,20 @@
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
_testing {
bad-update-contents;
};
u-boot {
};
_testing2 {
type = "_testing";
bad-update-contents;
};
};
};

View File

@ -0,0 +1,21 @@
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
_testing {
bad-update-contents;
bad-update-contents-twice;
};
u-boot {
};
_testing2 {
type = "_testing";
bad-update-contents;
};
};
};

View File

@ -0,0 +1,22 @@
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
_testing {
bad-update-contents;
};
u-boot {
};
section {
_testing2 {
type = "_testing";
bad-update-contents;
};
};
};
};

View File

@ -0,0 +1,14 @@
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
u-boot {
};
u-boot-dtb {
compress = "lz4";
};
};
};

View File

@ -0,0 +1,21 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
cbfs {
size = <0x100>;
u-boot {
cbfs-type = "raw";
cbfs-compress = "lz4";
};
u-boot-dtb {
cbfs-type = "raw";
};
};
};
};

View File

@ -0,0 +1,17 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
cbfs {
size = <0x100>;
u-boot {
cbfs-type = "badtype";
};
};
};
};

View File

@ -0,0 +1,33 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
u-boot {
};
section {
align = <0x100>;
cbfs {
size = <0x400>;
u-boot {
cbfs-type = "raw";
cbfs-offset = <0x38>;
};
u-boot-dtb {
type = "text";
text = "compress xxxxxxxxxxxxxxxxxxxxxx data";
cbfs-type = "raw";
cbfs-compress = "lzma";
cbfs-offset = <0x78>;
};
};
u-boot-dtb {
compress = "lz4";
};
};
};
};

View File

@ -0,0 +1,36 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
size = <0xc00>;
u-boot {
};
section {
align = <0x100>;
cbfs {
size = <0x400>;
u-boot {
cbfs-type = "raw";
};
u-boot-dtb {
cbfs-type = "raw";
cbfs-compress = "lzma";
cbfs-offset = <0x80>;
};
};
u-boot-dtb {
compress = "lz4";
};
};
fdtmap {
};
image-header {
location = "end";
};
};
};

View File

@ -0,0 +1,33 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
size = <0xc00>;
u-boot {
};
section {
align = <0x100>;
cbfs {
size = <0x400>;
u-boot {
cbfs-type = "raw";
};
u-boot-dtb {
cbfs-type = "raw";
cbfs-compress = "lzma";
cbfs-offset = <0x80>;
};
};
u-boot-dtb {
compress = "lz4";
};
};
fdtmap {
};
};
};

View File

@ -0,0 +1,36 @@
// SPDX-License-Identifier: GPL-2.0+
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
size = <0xc00>;
u-boot {
};
section {
align = <0x100>;
cbfs {
size = <0x400>;
u-boot {
cbfs-type = "raw";
};
u-boot-dtb {
cbfs-type = "raw";
cbfs-compress = "lzma";
cbfs-offset = <0x80>;
};
};
u-boot-dtb {
compress = "lz4";
};
};
fdtmap {
};
image-header {
location = "end";
};
};
};

View File

@ -0,0 +1,28 @@
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
binman {
u-boot {
};
section0 {
type = "section";
align = <0x10>;
u-boot {
};
};
section1 {
type = "section";
align-size = <0x20>;
u-boot {
};
section2 {
type = "section";
u-boot {
};
};
};
};
};

Binary file not shown.

Binary file not shown.

View File

@ -137,7 +137,7 @@ the '&' operator to limit the selection:
You can also use -x to specifically exclude some boards. For example:
buildmand arm -x nvidia,freescale,.*ball$
buildman arm -x nvidia,freescale,.*ball$
means to build all arm boards except nvidia, freescale and anything ending
with 'ball'.
@ -146,7 +146,7 @@ For building specific boards you can use the --boards option, which takes a
comma-separated list of board target names and be used multiple times on
the command line:
buidman --boards sandbox,snow --boards
buildman --boards sandbox,snow --boards
It is convenient to use the -n option to see what will be built based on
the subset given. Use -v as well to get an actual list of boards.

2304
tools/ifwitool.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -108,8 +108,8 @@ def RunPipe(pipe_list, infile=None, outfile=None,
return result
def Output(*cmd, **kwargs):
raise_on_error = kwargs.get('raise_on_error', True)
return RunPipe([cmd], capture=True, raise_on_error=raise_on_error).stdout
kwargs['raise_on_error'] = kwargs.get('raise_on_error', True)
return RunPipe([cmd], capture=True, **kwargs).stdout
def OutputOneLine(*cmd, **kwargs):
raise_on_error = kwargs.pop('raise_on_error', True)

View File

@ -46,9 +46,10 @@ def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None):
glob_list = []
glob_list += exclude_list
glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
test_cmd = 'test' if 'binman.py' in prog else '-t'
cmd = ('PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools %s-coverage run '
'--omit "%s" %s -P1 -t' % (build_dir, PYTHON, ','.join(glob_list),
prog))
'--omit "%s" %s %s -P1' % (build_dir, PYTHON, ','.join(glob_list),
prog, test_cmd))
os.system(cmd)
stdout = command.Output('%s-coverage' % PYTHON, 'report')
lines = stdout.splitlines()
@ -57,6 +58,7 @@ def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None):
test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0]
for line in lines if '/etype/' in line])
missing_list = required
missing_list.discard('__init__')
missing_list.difference_update(test_set)
if missing_list:
print('Missing tests for %s' % (', '.join(missing_list)))

View File

@ -3,6 +3,8 @@
# Copyright (c) 2016 Google, Inc
#
from __future__ import print_function
import command
import glob
import os
@ -24,6 +26,8 @@ chroot_path = None
# Search paths to use for Filename(), used to find files
search_paths = []
tool_search_paths = []
# Tools and the packages that contain them, on debian
packages = {
'lz4': 'liblz4-tool',
@ -154,26 +158,56 @@ def Align(pos, align):
def NotPowerOfTwo(num):
return num and (num & (num - 1))
def PathHasFile(fname):
def SetToolPaths(toolpaths):
"""Set the path to search for tools
Args:
toolpaths: List of paths to search for tools executed by Run()
"""
global tool_search_paths
tool_search_paths = toolpaths
def PathHasFile(path_spec, fname):
"""Check if a given filename is in the PATH
Args:
path_spec: Value of PATH variable to check
fname: Filename to check
Returns:
True if found, False if not
"""
for dir in os.environ['PATH'].split(':'):
for dir in path_spec.split(':'):
if os.path.exists(os.path.join(dir, fname)):
return True
return False
def Run(name, *args, **kwargs):
"""Run a tool with some arguments
This runs a 'tool', which is a program used by binman to process files and
perhaps produce some output. Tools can be located on the PATH or in a
search path.
Args:
name: Command name to run
args: Arguments to the tool
kwargs: Options to pass to command.run()
Returns:
CommandResult object
"""
try:
return command.Run(name, *args, cwd=outdir, capture=True, **kwargs)
env = None
if tool_search_paths:
env = dict(os.environ)
env['PATH'] = ':'.join(tool_search_paths) + ':' + env['PATH']
return command.Run(name, *args, capture=True,
capture_stderr=True, env=env, **kwargs)
except:
if not PathHasFile(name):
msg = "Plesae install tool '%s'" % name
if env and not PathHasFile(env['PATH'], name):
msg = "Please install tool '%s'" % name
package = packages.get(name)
if package:
msg += " (e.g. from package '%s')" % package
@ -342,3 +376,100 @@ def ToBytes(string):
if sys.version_info[0] >= 3:
return string.encode('utf-8')
return string
def Compress(indata, algo):
"""Compress some data using a given algorithm
Note that for lzma this uses an old version of the algorithm, not that
provided by xz.
This requires 'lz4' and 'lzma_alone' tools. It also requires an output
directory to be previously set up, by calling PrepareOutputDir().
Args:
indata: Input data to compress
algo: Algorithm to use ('none', 'gzip', 'lz4' or 'lzma')
Returns:
Compressed data
"""
if algo == 'none':
return indata
fname = GetOutputFilename('%s.comp.tmp' % algo)
WriteFile(fname, indata)
if algo == 'lz4':
data = Run('lz4', '--no-frame-crc', '-c', fname, binary=True)
# cbfstool uses a very old version of lzma
elif algo == 'lzma':
outfname = GetOutputFilename('%s.comp.otmp' % algo)
Run('lzma_alone', 'e', fname, outfname, '-lc1', '-lp0', '-pb0', '-d8')
data = ReadFile(outfname)
elif algo == 'gzip':
data = Run('gzip', '-c', fname, binary=True)
else:
raise ValueError("Unknown algorithm '%s'" % algo)
return data
def Decompress(indata, algo):
"""Decompress some data using a given algorithm
Note that for lzma this uses an old version of the algorithm, not that
provided by xz.
This requires 'lz4' and 'lzma_alone' tools. It also requires an output
directory to be previously set up, by calling PrepareOutputDir().
Args:
indata: Input data to decompress
algo: Algorithm to use ('none', 'gzip', 'lz4' or 'lzma')
Returns:
Compressed data
"""
if algo == 'none':
return indata
fname = GetOutputFilename('%s.decomp.tmp' % algo)
with open(fname, 'wb') as fd:
fd.write(indata)
if algo == 'lz4':
data = Run('lz4', '-dc', fname, binary=True)
elif algo == 'lzma':
outfname = GetOutputFilename('%s.decomp.otmp' % algo)
Run('lzma_alone', 'd', fname, outfname)
data = ReadFile(outfname)
elif algo == 'gzip':
data = Run('gzip', '-cd', fname, binary=True)
else:
raise ValueError("Unknown algorithm '%s'" % algo)
return data
CMD_CREATE, CMD_DELETE, CMD_ADD, CMD_REPLACE, CMD_EXTRACT = range(5)
IFWITOOL_CMDS = {
CMD_CREATE: 'create',
CMD_DELETE: 'delete',
CMD_ADD: 'add',
CMD_REPLACE: 'replace',
CMD_EXTRACT: 'extract',
}
def RunIfwiTool(ifwi_file, cmd, fname=None, subpart=None, entry_name=None):
"""Run ifwitool with the given arguments:
Args:
ifwi_file: IFWI file to operation on
cmd: Command to execute (CMD_...)
fname: Filename of file to add/replace/extract/create (None for
CMD_DELETE)
subpart: Name of sub-partition to operation on (None for CMD_CREATE)
entry_name: Name of directory entry to operate on, or None if none
"""
args = ['ifwitool', ifwi_file]
args.append(IFWITOOL_CMDS[cmd])
if fname:
args += ['-f', fname]
if subpart:
args += ['-n', subpart]
if entry_name:
args += ['-d', '-e', entry_name]
Run(*args)

View File

@ -131,13 +131,21 @@ def Info(msg):
"""
_Output(3, msg)
def Detail(msg):
"""Display a detailed message
Args:
msg; Message to display.
"""
_Output(4, msg)
def Debug(msg):
"""Display a debug message
Args:
msg; Message to display.
"""
_Output(4, msg)
_Output(5, msg)
def UserOutput(msg):
"""Display a message regardless of the current output level.