2018-05-07 06:58:06 +09:00
|
|
|
# SPDX-License-Identifier: GPL-2.0+
|
2016-11-26 12:15:52 +09:00
|
|
|
# Copyright (c) 2016 Google, Inc
|
|
|
|
# Written by Simon Glass <sjg@chromium.org>
|
|
|
|
#
|
|
|
|
# Entry-type module for blobs, which are binary objects read from files
|
|
|
|
#
|
|
|
|
|
2020-04-18 09:09:03 +09:00
|
|
|
from binman.entry import Entry
|
|
|
|
from dtoc import fdt_util
|
2020-04-18 09:09:04 +09:00
|
|
|
from patman import tools
|
|
|
|
from patman import tout
|
2016-11-26 12:15:52 +09:00
|
|
|
|
|
|
|
class Entry_blob(Entry):
|
2018-07-18 04:25:35 +09:00
|
|
|
"""Entry containing an arbitrary binary blob
|
|
|
|
|
|
|
|
Note: This should not be used by itself. It is normally used as a parent
|
|
|
|
class by other entry types.
|
|
|
|
|
|
|
|
Properties / Entry arguments:
|
|
|
|
- filename: Filename of file to read into entry
|
2018-09-14 19:57:26 +09:00
|
|
|
- compress: Compression algorithm to use:
|
|
|
|
none: No compression
|
|
|
|
lz4: Use lz4 compression (via 'lz4' command-line utility)
|
2018-07-18 04:25:35 +09:00
|
|
|
|
|
|
|
This entry reads data from a file and places it in the entry. The
|
|
|
|
default filename is often specified specified by the subclass. See for
|
|
|
|
example the 'u_boot' entry which provides the filename 'u-boot.bin'.
|
2018-09-14 19:57:26 +09:00
|
|
|
|
|
|
|
If compression is enabled, an extra 'uncomp-size' property is written to
|
|
|
|
the node (if enabled with -u) which provides the uncompressed size of the
|
|
|
|
data.
|
2018-07-18 04:25:35 +09:00
|
|
|
"""
|
2018-06-02 00:38:14 +09:00
|
|
|
def __init__(self, section, etype, node):
|
2020-07-10 09:39:35 +09:00
|
|
|
super().__init__(section, etype, node)
|
2018-09-14 19:57:26 +09:00
|
|
|
self._filename = fdt_util.GetString(self._node, 'filename', self.etype)
|
2019-07-09 05:25:30 +09:00
|
|
|
self.compress = fdt_util.GetString(self._node, 'compress', 'none')
|
2016-11-26 12:15:52 +09:00
|
|
|
|
|
|
|
def ObtainContents(self):
|
|
|
|
self._filename = self.GetDefaultFilename()
|
|
|
|
self._pathname = tools.GetInputFilename(self._filename)
|
2018-07-18 04:25:50 +09:00
|
|
|
self.ReadBlobContents()
|
2016-11-26 12:15:52 +09:00
|
|
|
return True
|
|
|
|
|
2019-07-09 05:25:38 +09:00
|
|
|
def CompressData(self, indata):
|
2019-07-09 05:25:30 +09:00
|
|
|
if self.compress != 'none':
|
|
|
|
self.uncomp_size = len(indata)
|
|
|
|
data = tools.Compress(indata, self.compress)
|
2019-07-09 05:25:38 +09:00
|
|
|
return data
|
|
|
|
|
|
|
|
def ReadBlobContents(self):
|
|
|
|
"""Read blob contents into memory
|
|
|
|
|
|
|
|
This function compresses the data before storing if needed.
|
|
|
|
|
|
|
|
We assume the data is small enough to fit into memory. If this
|
|
|
|
is used for large filesystem image that might not be true.
|
|
|
|
In that case, Image.BuildImage() could be adjusted to use a
|
|
|
|
new Entry method which can read in chunks. Then we could copy
|
|
|
|
the data in chunks and avoid reading it all at once. For now
|
|
|
|
this seems like an unnecessary complication.
|
|
|
|
"""
|
|
|
|
indata = tools.ReadFile(self._pathname)
|
|
|
|
data = self.CompressData(indata)
|
2018-09-14 19:57:26 +09:00
|
|
|
self.SetContents(data)
|
2016-11-26 12:15:52 +09:00
|
|
|
return True
|
|
|
|
|
|
|
|
def GetDefaultFilename(self):
|
|
|
|
return self._filename
|