This is the 4.14.22 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlqSiwYACgkQONu9yGCS aT5Z0w//dVBMZvvT1H0J9SzFlyhiGf2UfX1WA5LVPXF/wPVnmAnbVu6R4XosII4T xqqRPGmwkPaShl+xj775Hqhq/+lGBOT3Hnt7YGLN5Izu8z473mC5VGtYEfRzuUGi al98aR8jE0TFCX/Jf8hI/JI7ll+oArNaLSMsIz1N2Vb2uE9z+9d2Wis0tfhFyASG E3WqCDPyq4G4tvUqNhWuDJ587e+KCKyyRbX4XXdKHsidx3deoGvuq3aRypX3FLbA L6Ee6mmDzCvdwjzL/cVX9xFaOwhYUglz6q55bxOPzLYe7PAu+NL8qou0c+wbuqeG 5COu/jYnsnHyCr3jL2AgkLiKeXcv7i9yEMknndcl/QX7uNv3VHaa+iTHXQOHL01+ xg05SjWHZuK+5WOQ3qCBEUE1Xl9s/snrbe4SSjb496MfFa4XAi93HLa8qVYZvKBS PziRgXHKrwdUyVHaXlukK+XrxKrkX9MAnFcdCoMAqmAk0IiquhWOi1Rg4wNwqwSd e3kDnhAIeII7RLE04iaCNVrEE4edFco58TNkxb25MYnaLB1fdZnPL6P4JeYYBKbi hVdzHYQLHW6hcu+/wO9M94WQlcTV2c4qjXTBmpFTQD8MiUi01FxprlEzq8Z7tsEr ZsUWlhzWGe0OAJI4ifpxRPF2hiMKaFMKKAKEGGDyAzHj8pSizbs= =d6BQ -----END PGP SIGNATURE----- Merge tag 'v4.14.22' into 4.14.x+fslc This is the 4.14.22 stable release * tag 'v4.14.22': (770 commits) Linux 4.14.22 vmalloc: fix __GFP_HIGHMEM usage for vmalloc_32 on 32b systems mei: me: add cannon point device ids for 4th device mei: me: add cannon point device ids crypto: s5p-sss - Fix kernel Oops in AES-ECB mode drm/i915: fix intel_backlight_device_register declaration crypto: talitos - fix Kernel Oops on hashing an empty file hippi: Fix a Fix a possible sleep-in-atomic bug in rr_close powerpc/perf/imc: Fix nest-imc cpuhotplug callback failure PCI: rcar: Fix use-after-free in probe error path xen: XEN_ACPI_PROCESSOR is Dom0-only platform/x86: dell-laptop: Fix keyboard max lighting for Dell Latitude E6410 x86/mm/kmmio: Fix mmiotrace for page unaligned addresses mm/early_ioremap: Fix boot hang with earlyprintk=efi,keep usb: dwc3: of-simple: fix missing clk_disable_unprepare usb: dwc3: gadget: Wait longer for controller to end command processing dmaengine: jz4740: disable/unprepare clk if probe fails drm/vc4: Release fence after signalling ASoC: rsnd: ssi: fix race condition in rsnd_ssi_pointer_update drm/armada: fix leak of crtc structure ...
This commit is contained in:
commit
fe88b0b06a
|
@ -7,38 +7,40 @@
|
|||
# command after changing this file, to see if there are
|
||||
# any tracked files which get ignored after the change.
|
||||
#
|
||||
# Normal rules
|
||||
# Normal rules (sorted alphabetically)
|
||||
#
|
||||
.*
|
||||
*.a
|
||||
*.bin
|
||||
*.bz2
|
||||
*.c.[012]*.*
|
||||
*.dtb
|
||||
*.dtb.S
|
||||
*.dwo
|
||||
*.elf
|
||||
*.gcno
|
||||
*.gz
|
||||
*.i
|
||||
*.ko
|
||||
*.ll
|
||||
*.lst
|
||||
*.lz4
|
||||
*.lzma
|
||||
*.lzo
|
||||
*.mod.c
|
||||
*.o
|
||||
*.o.*
|
||||
*.a
|
||||
*.order
|
||||
*.patch
|
||||
*.s
|
||||
*.ko
|
||||
*.so
|
||||
*.so.dbg
|
||||
*.mod.c
|
||||
*.i
|
||||
*.lst
|
||||
*.symtypes
|
||||
*.order
|
||||
*.elf
|
||||
*.bin
|
||||
*.tar
|
||||
*.gz
|
||||
*.bz2
|
||||
*.lzma
|
||||
*.xz
|
||||
*.lz4
|
||||
*.lzo
|
||||
*.patch
|
||||
*.gcno
|
||||
*.ll
|
||||
modules.builtin
|
||||
Module.symvers
|
||||
*.dwo
|
||||
*.su
|
||||
*.c.[012]*.*
|
||||
*.symtypes
|
||||
*.tar
|
||||
*.xz
|
||||
Module.symvers
|
||||
modules.builtin
|
||||
|
||||
#
|
||||
# Top-level generic files
|
||||
|
@ -53,6 +55,11 @@ Module.symvers
|
|||
/System.map
|
||||
/Module.markers
|
||||
|
||||
#
|
||||
# RPM spec file (make rpm-pkg)
|
||||
#
|
||||
/*.spec
|
||||
|
||||
#
|
||||
# Debian directory (make deb-pkg)
|
||||
#
|
||||
|
|
|
@ -1841,13 +1841,6 @@
|
|||
Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y,
|
||||
the default is off.
|
||||
|
||||
kmemcheck= [X86] Boot-time kmemcheck enable/disable/one-shot mode
|
||||
Valid arguments: 0, 1, 2
|
||||
kmemcheck=0 (disabled)
|
||||
kmemcheck=1 (enabled)
|
||||
kmemcheck=2 (one-shot mode)
|
||||
Default: 2 (one-shot mode)
|
||||
|
||||
kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
|
||||
Default is 0 (don't ignore, but inject #GP)
|
||||
|
||||
|
@ -2718,8 +2711,6 @@
|
|||
norandmaps Don't use address space randomization. Equivalent to
|
||||
echo 0 > /proc/sys/kernel/randomize_va_space
|
||||
|
||||
noreplace-paravirt [X86,IA-64,PV_OPS] Don't patch paravirt_ops
|
||||
|
||||
noreplace-smp [X86-32,SMP] Don't replace SMP instructions
|
||||
with UP alternatives
|
||||
|
||||
|
|
|
@ -71,6 +71,7 @@ stable kernels.
|
|||
| Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 |
|
||||
| Hisilicon | Hip0{6,7} | #161010701 | N/A |
|
||||
| | | | |
|
||||
| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
|
||||
| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
|
||||
| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
|
||||
| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 |
|
||||
| Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 |
|
||||
|
|
|
@ -21,7 +21,6 @@ whole; patches welcome!
|
|||
kasan
|
||||
ubsan
|
||||
kmemleak
|
||||
kmemcheck
|
||||
gdb-kernel-debugging
|
||||
kgdb
|
||||
kselftest
|
||||
|
|
|
@ -1,733 +0,0 @@
|
|||
Getting started with kmemcheck
|
||||
==============================
|
||||
|
||||
Vegard Nossum <vegardno@ifi.uio.no>
|
||||
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
kmemcheck is a debugging feature for the Linux Kernel. More specifically, it
|
||||
is a dynamic checker that detects and warns about some uses of uninitialized
|
||||
memory.
|
||||
|
||||
Userspace programmers might be familiar with Valgrind's memcheck. The main
|
||||
difference between memcheck and kmemcheck is that memcheck works for userspace
|
||||
programs only, and kmemcheck works for the kernel only. The implementations
|
||||
are of course vastly different. Because of this, kmemcheck is not as accurate
|
||||
as memcheck, but it turns out to be good enough in practice to discover real
|
||||
programmer errors that the compiler is not able to find through static
|
||||
analysis.
|
||||
|
||||
Enabling kmemcheck on a kernel will probably slow it down to the extent that
|
||||
the machine will not be usable for normal workloads such as e.g. an
|
||||
interactive desktop. kmemcheck will also cause the kernel to use about twice
|
||||
as much memory as normal. For this reason, kmemcheck is strictly a debugging
|
||||
feature.
|
||||
|
||||
|
||||
Downloading
|
||||
-----------
|
||||
|
||||
As of version 2.6.31-rc1, kmemcheck is included in the mainline kernel.
|
||||
|
||||
|
||||
Configuring and compiling
|
||||
-------------------------
|
||||
|
||||
kmemcheck only works for the x86 (both 32- and 64-bit) platform. A number of
|
||||
configuration variables must have specific settings in order for the kmemcheck
|
||||
menu to even appear in "menuconfig". These are:
|
||||
|
||||
- ``CONFIG_CC_OPTIMIZE_FOR_SIZE=n``
|
||||
This option is located under "General setup" / "Optimize for size".
|
||||
|
||||
Without this, gcc will use certain optimizations that usually lead to
|
||||
false positive warnings from kmemcheck. An example of this is a 16-bit
|
||||
field in a struct, where gcc may load 32 bits, then discard the upper
|
||||
16 bits. kmemcheck sees only the 32-bit load, and may trigger a
|
||||
warning for the upper 16 bits (if they're uninitialized).
|
||||
|
||||
- ``CONFIG_SLAB=y`` or ``CONFIG_SLUB=y``
|
||||
This option is located under "General setup" / "Choose SLAB
|
||||
allocator".
|
||||
|
||||
- ``CONFIG_FUNCTION_TRACER=n``
|
||||
This option is located under "Kernel hacking" / "Tracers" / "Kernel
|
||||
Function Tracer"
|
||||
|
||||
When function tracing is compiled in, gcc emits a call to another
|
||||
function at the beginning of every function. This means that when the
|
||||
page fault handler is called, the ftrace framework will be called
|
||||
before kmemcheck has had a chance to handle the fault. If ftrace then
|
||||
modifies memory that was tracked by kmemcheck, the result is an
|
||||
endless recursive page fault.
|
||||
|
||||
- ``CONFIG_DEBUG_PAGEALLOC=n``
|
||||
This option is located under "Kernel hacking" / "Memory Debugging"
|
||||
/ "Debug page memory allocations".
|
||||
|
||||
In addition, I highly recommend turning on ``CONFIG_DEBUG_INFO=y``. This is also
|
||||
located under "Kernel hacking". With this, you will be able to get line number
|
||||
information from the kmemcheck warnings, which is extremely valuable in
|
||||
debugging a problem. This option is not mandatory, however, because it slows
|
||||
down the compilation process and produces a much bigger kernel image.
|
||||
|
||||
Now the kmemcheck menu should be visible (under "Kernel hacking" / "Memory
|
||||
Debugging" / "kmemcheck: trap use of uninitialized memory"). Here follows
|
||||
a description of the kmemcheck configuration variables:
|
||||
|
||||
- ``CONFIG_KMEMCHECK``
|
||||
This must be enabled in order to use kmemcheck at all...
|
||||
|
||||
- ``CONFIG_KMEMCHECK_``[``DISABLED`` | ``ENABLED`` | ``ONESHOT``]``_BY_DEFAULT``
|
||||
This option controls the status of kmemcheck at boot-time. "Enabled"
|
||||
will enable kmemcheck right from the start, "disabled" will boot the
|
||||
kernel as normal (but with the kmemcheck code compiled in, so it can
|
||||
be enabled at run-time after the kernel has booted), and "one-shot" is
|
||||
a special mode which will turn kmemcheck off automatically after
|
||||
detecting the first use of uninitialized memory.
|
||||
|
||||
If you are using kmemcheck to actively debug a problem, then you
|
||||
probably want to choose "enabled" here.
|
||||
|
||||
The one-shot mode is mostly useful in automated test setups because it
|
||||
can prevent floods of warnings and increase the chances of the machine
|
||||
surviving in case something is really wrong. In other cases, the one-
|
||||
shot mode could actually be counter-productive because it would turn
|
||||
itself off at the very first error -- in the case of a false positive
|
||||
too -- and this would come in the way of debugging the specific
|
||||
problem you were interested in.
|
||||
|
||||
If you would like to use your kernel as normal, but with a chance to
|
||||
enable kmemcheck in case of some problem, it might be a good idea to
|
||||
choose "disabled" here. When kmemcheck is disabled, most of the run-
|
||||
time overhead is not incurred, and the kernel will be almost as fast
|
||||
as normal.
|
||||
|
||||
- ``CONFIG_KMEMCHECK_QUEUE_SIZE``
|
||||
Select the maximum number of error reports to store in an internal
|
||||
(fixed-size) buffer. Since errors can occur virtually anywhere and in
|
||||
any context, we need a temporary storage area which is guaranteed not
|
||||
to generate any other page faults when accessed. The queue will be
|
||||
emptied as soon as a tasklet may be scheduled. If the queue is full,
|
||||
new error reports will be lost.
|
||||
|
||||
The default value of 64 is probably fine. If some code produces more
|
||||
than 64 errors within an irqs-off section, then the code is likely to
|
||||
produce many, many more, too, and these additional reports seldom give
|
||||
any more information (the first report is usually the most valuable
|
||||
anyway).
|
||||
|
||||
This number might have to be adjusted if you are not using serial
|
||||
console or similar to capture the kernel log. If you are using the
|
||||
"dmesg" command to save the log, then getting a lot of kmemcheck
|
||||
warnings might overflow the kernel log itself, and the earlier reports
|
||||
will get lost in that way instead. Try setting this to 10 or so on
|
||||
such a setup.
|
||||
|
||||
- ``CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT``
|
||||
Select the number of shadow bytes to save along with each entry of the
|
||||
error-report queue. These bytes indicate what parts of an allocation
|
||||
are initialized, uninitialized, etc. and will be displayed when an
|
||||
error is detected to help the debugging of a particular problem.
|
||||
|
||||
The number entered here is actually the logarithm of the number of
|
||||
bytes that will be saved. So if you pick for example 5 here, kmemcheck
|
||||
will save 2^5 = 32 bytes.
|
||||
|
||||
The default value should be fine for debugging most problems. It also
|
||||
fits nicely within 80 columns.
|
||||
|
||||
- ``CONFIG_KMEMCHECK_PARTIAL_OK``
|
||||
This option (when enabled) works around certain GCC optimizations that
|
||||
produce 32-bit reads from 16-bit variables where the upper 16 bits are
|
||||
thrown away afterwards.
|
||||
|
||||
The default value (enabled) is recommended. This may of course hide
|
||||
some real errors, but disabling it would probably produce a lot of
|
||||
false positives.
|
||||
|
||||
- ``CONFIG_KMEMCHECK_BITOPS_OK``
|
||||
This option silences warnings that would be generated for bit-field
|
||||
accesses where not all the bits are initialized at the same time. This
|
||||
may also hide some real bugs.
|
||||
|
||||
This option is probably obsolete, or it should be replaced with
|
||||
the kmemcheck-/bitfield-annotations for the code in question. The
|
||||
default value is therefore fine.
|
||||
|
||||
Now compile the kernel as usual.
|
||||
|
||||
|
||||
How to use
|
||||
----------
|
||||
|
||||
Booting
|
||||
~~~~~~~
|
||||
|
||||
First some information about the command-line options. There is only one
|
||||
option specific to kmemcheck, and this is called "kmemcheck". It can be used
|
||||
to override the default mode as chosen by the ``CONFIG_KMEMCHECK_*_BY_DEFAULT``
|
||||
option. Its possible settings are:
|
||||
|
||||
- ``kmemcheck=0`` (disabled)
|
||||
- ``kmemcheck=1`` (enabled)
|
||||
- ``kmemcheck=2`` (one-shot mode)
|
||||
|
||||
If SLUB debugging has been enabled in the kernel, it may take precedence over
|
||||
kmemcheck in such a way that the slab caches which are under SLUB debugging
|
||||
will not be tracked by kmemcheck. In order to ensure that this doesn't happen
|
||||
(even though it shouldn't by default), use SLUB's boot option ``slub_debug``,
|
||||
like this: ``slub_debug=-``
|
||||
|
||||
In fact, this option may also be used for fine-grained control over SLUB vs.
|
||||
kmemcheck. For example, if the command line includes
|
||||
``kmemcheck=1 slub_debug=,dentry``, then SLUB debugging will be used only
|
||||
for the "dentry" slab cache, and with kmemcheck tracking all the other
|
||||
caches. This is advanced usage, however, and is not generally recommended.
|
||||
|
||||
|
||||
Run-time enable/disable
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
When the kernel has booted, it is possible to enable or disable kmemcheck at
|
||||
run-time. WARNING: This feature is still experimental and may cause false
|
||||
positive warnings to appear. Therefore, try not to use this. If you find that
|
||||
it doesn't work properly (e.g. you see an unreasonable amount of warnings), I
|
||||
will be happy to take bug reports.
|
||||
|
||||
Use the file ``/proc/sys/kernel/kmemcheck`` for this purpose, e.g.::
|
||||
|
||||
$ echo 0 > /proc/sys/kernel/kmemcheck # disables kmemcheck
|
||||
|
||||
The numbers are the same as for the ``kmemcheck=`` command-line option.
|
||||
|
||||
|
||||
Debugging
|
||||
~~~~~~~~~
|
||||
|
||||
A typical report will look something like this::
|
||||
|
||||
WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024)
|
||||
80000000000000000000000000000000000000000088ffff0000000000000000
|
||||
i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
|
||||
^
|
||||
|
||||
Pid: 1856, comm: ntpdate Not tainted 2.6.29-rc5 #264 945P-A
|
||||
RIP: 0010:[<ffffffff8104ede8>] [<ffffffff8104ede8>] __dequeue_signal+0xc8/0x190
|
||||
RSP: 0018:ffff88003cdf7d98 EFLAGS: 00210002
|
||||
RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009
|
||||
RDX: ffff88003e5d6018 RSI: ffff88003e5d6024 RDI: ffff88003cdf7e84
|
||||
RBP: ffff88003cdf7db8 R08: ffff88003e5d6000 R09: 0000000000000000
|
||||
R10: 0000000000000080 R11: 0000000000000000 R12: 000000000000000e
|
||||
R13: ffff88003cdf7e78 R14: ffff88003d530710 R15: ffff88003d5a98c8
|
||||
FS: 0000000000000000(0000) GS:ffff880001982000(0063) knlGS:00000
|
||||
CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033
|
||||
CR2: ffff88003f806ea0 CR3: 000000003c036000 CR4: 00000000000006a0
|
||||
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
|
||||
DR3: 0000000000000000 DR6: 00000000ffff4ff0 DR7: 0000000000000400
|
||||
[<ffffffff8104f04e>] dequeue_signal+0x8e/0x170
|
||||
[<ffffffff81050bd8>] get_signal_to_deliver+0x98/0x390
|
||||
[<ffffffff8100b87d>] do_notify_resume+0xad/0x7d0
|
||||
[<ffffffff8100c7b5>] int_signal+0x12/0x17
|
||||
[<ffffffffffffffff>] 0xffffffffffffffff
|
||||
|
||||
The single most valuable information in this report is the RIP (or EIP on 32-
|
||||
bit) value. This will help us pinpoint exactly which instruction that caused
|
||||
the warning.
|
||||
|
||||
If your kernel was compiled with ``CONFIG_DEBUG_INFO=y``, then all we have to do
|
||||
is give this address to the addr2line program, like this::
|
||||
|
||||
$ addr2line -e vmlinux -i ffffffff8104ede8
|
||||
arch/x86/include/asm/string_64.h:12
|
||||
include/asm-generic/siginfo.h:287
|
||||
kernel/signal.c:380
|
||||
kernel/signal.c:410
|
||||
|
||||
The "``-e vmlinux``" tells addr2line which file to look in. **IMPORTANT:**
|
||||
This must be the vmlinux of the kernel that produced the warning in the
|
||||
first place! If not, the line number information will almost certainly be
|
||||
wrong.
|
||||
|
||||
The "``-i``" tells addr2line to also print the line numbers of inlined
|
||||
functions. In this case, the flag was very important, because otherwise,
|
||||
it would only have printed the first line, which is just a call to
|
||||
``memcpy()``, which could be called from a thousand places in the kernel, and
|
||||
is therefore not very useful. These inlined functions would not show up in
|
||||
the stack trace above, simply because the kernel doesn't load the extra
|
||||
debugging information. This technique can of course be used with ordinary
|
||||
kernel oopses as well.
|
||||
|
||||
In this case, it's the caller of ``memcpy()`` that is interesting, and it can be
|
||||
found in ``include/asm-generic/siginfo.h``, line 287::
|
||||
|
||||
281 static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
|
||||
282 {
|
||||
283 if (from->si_code < 0)
|
||||
284 memcpy(to, from, sizeof(*to));
|
||||
285 else
|
||||
286 /* _sigchld is currently the largest know union member */
|
||||
287 memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
|
||||
288 }
|
||||
|
||||
Since this was a read (kmemcheck usually warns about reads only, though it can
|
||||
warn about writes to unallocated or freed memory as well), it was probably the
|
||||
"from" argument which contained some uninitialized bytes. Following the chain
|
||||
of calls, we move upwards to see where "from" was allocated or initialized,
|
||||
``kernel/signal.c``, line 380::
|
||||
|
||||
359 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
|
||||
360 {
|
||||
...
|
||||
367 list_for_each_entry(q, &list->list, list) {
|
||||
368 if (q->info.si_signo == sig) {
|
||||
369 if (first)
|
||||
370 goto still_pending;
|
||||
371 first = q;
|
||||
...
|
||||
377 if (first) {
|
||||
378 still_pending:
|
||||
379 list_del_init(&first->list);
|
||||
380 copy_siginfo(info, &first->info);
|
||||
381 __sigqueue_free(first);
|
||||
...
|
||||
392 }
|
||||
393 }
|
||||
|
||||
Here, it is ``&first->info`` that is being passed on to ``copy_siginfo()``. The
|
||||
variable ``first`` was found on a list -- passed in as the second argument to
|
||||
``collect_signal()``. We continue our journey through the stack, to figure out
|
||||
where the item on "list" was allocated or initialized. We move to line 410::
|
||||
|
||||
395 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
|
||||
396 siginfo_t *info)
|
||||
397 {
|
||||
...
|
||||
410 collect_signal(sig, pending, info);
|
||||
...
|
||||
414 }
|
||||
|
||||
Now we need to follow the ``pending`` pointer, since that is being passed on to
|
||||
``collect_signal()`` as ``list``. At this point, we've run out of lines from the
|
||||
"addr2line" output. Not to worry, we just paste the next addresses from the
|
||||
kmemcheck stack dump, i.e.::
|
||||
|
||||
[<ffffffff8104f04e>] dequeue_signal+0x8e/0x170
|
||||
[<ffffffff81050bd8>] get_signal_to_deliver+0x98/0x390
|
||||
[<ffffffff8100b87d>] do_notify_resume+0xad/0x7d0
|
||||
[<ffffffff8100c7b5>] int_signal+0x12/0x17
|
||||
|
||||
$ addr2line -e vmlinux -i ffffffff8104f04e ffffffff81050bd8 \
|
||||
ffffffff8100b87d ffffffff8100c7b5
|
||||
kernel/signal.c:446
|
||||
kernel/signal.c:1806
|
||||
arch/x86/kernel/signal.c:805
|
||||
arch/x86/kernel/signal.c:871
|
||||
arch/x86/kernel/entry_64.S:694
|
||||
|
||||
Remember that since these addresses were found on the stack and not as the
|
||||
RIP value, they actually point to the _next_ instruction (they are return
|
||||
addresses). This becomes obvious when we look at the code for line 446::
|
||||
|
||||
422 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
|
||||
423 {
|
||||
...
|
||||
431 signr = __dequeue_signal(&tsk->signal->shared_pending,
|
||||
432 mask, info);
|
||||
433 /*
|
||||
434 * itimer signal ?
|
||||
435 *
|
||||
436 * itimers are process shared and we restart periodic
|
||||
437 * itimers in the signal delivery path to prevent DoS
|
||||
438 * attacks in the high resolution timer case. This is
|
||||
439 * compliant with the old way of self restarting
|
||||
440 * itimers, as the SIGALRM is a legacy signal and only
|
||||
441 * queued once. Changing the restart behaviour to
|
||||
442 * restart the timer in the signal dequeue path is
|
||||
443 * reducing the timer noise on heavy loaded !highres
|
||||
444 * systems too.
|
||||
445 */
|
||||
446 if (unlikely(signr == SIGALRM)) {
|
||||
...
|
||||
489 }
|
||||
|
||||
So instead of looking at 446, we should be looking at 431, which is the line
|
||||
that executes just before 446. Here we see that what we are looking for is
|
||||
``&tsk->signal->shared_pending``.
|
||||
|
||||
Our next task is now to figure out which function that puts items on this
|
||||
``shared_pending`` list. A crude, but efficient tool, is ``git grep``::
|
||||
|
||||
$ git grep -n 'shared_pending' kernel/
|
||||
...
|
||||
kernel/signal.c:828: pending = group ? &t->signal->shared_pending : &t->pending;
|
||||
kernel/signal.c:1339: pending = group ? &t->signal->shared_pending : &t->pending;
|
||||
...
|
||||
|
||||
There were more results, but none of them were related to list operations,
|
||||
and these were the only assignments. We inspect the line numbers more closely
|
||||
and find that this is indeed where items are being added to the list::
|
||||
|
||||
816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
|
||||
817 int group)
|
||||
818 {
|
||||
...
|
||||
828 pending = group ? &t->signal->shared_pending : &t->pending;
|
||||
...
|
||||
851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
|
||||
852 (is_si_special(info) ||
|
||||
853 info->si_code >= 0)));
|
||||
854 if (q) {
|
||||
855 list_add_tail(&q->list, &pending->list);
|
||||
...
|
||||
890 }
|
||||
|
||||
and::
|
||||
|
||||
1309 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
|
||||
1310 {
|
||||
....
|
||||
1339 pending = group ? &t->signal->shared_pending : &t->pending;
|
||||
1340 list_add_tail(&q->list, &pending->list);
|
||||
....
|
||||
1347 }
|
||||
|
||||
In the first case, the list element we are looking for, ``q``, is being
|
||||
returned from the function ``__sigqueue_alloc()``, which looks like an
|
||||
allocation function. Let's take a look at it::
|
||||
|
||||
187 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
|
||||
188 int override_rlimit)
|
||||
189 {
|
||||
190 struct sigqueue *q = NULL;
|
||||
191 struct user_struct *user;
|
||||
192
|
||||
193 /*
|
||||
194 * We won't get problems with the target's UID changing under us
|
||||
195 * because changing it requires RCU be used, and if t != current, the
|
||||
196 * caller must be holding the RCU readlock (by way of a spinlock) and
|
||||
197 * we use RCU protection here
|
||||
198 */
|
||||
199 user = get_uid(__task_cred(t)->user);
|
||||
200 atomic_inc(&user->sigpending);
|
||||
201 if (override_rlimit ||
|
||||
202 atomic_read(&user->sigpending) <=
|
||||
203 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
|
||||
204 q = kmem_cache_alloc(sigqueue_cachep, flags);
|
||||
205 if (unlikely(q == NULL)) {
|
||||
206 atomic_dec(&user->sigpending);
|
||||
207 free_uid(user);
|
||||
208 } else {
|
||||
209 INIT_LIST_HEAD(&q->list);
|
||||
210 q->flags = 0;
|
||||
211 q->user = user;
|
||||
212 }
|
||||
213
|
||||
214 return q;
|
||||
215 }
|
||||
|
||||
We see that this function initializes ``q->list``, ``q->flags``, and
|
||||
``q->user``. It seems that now is the time to look at the definition of
|
||||
``struct sigqueue``, e.g.::
|
||||
|
||||
14 struct sigqueue {
|
||||
15 struct list_head list;
|
||||
16 int flags;
|
||||
17 siginfo_t info;
|
||||
18 struct user_struct *user;
|
||||
19 };
|
||||
|
||||
And, you might remember, it was a ``memcpy()`` on ``&first->info`` that
|
||||
caused the warning, so this makes perfect sense. It also seems reasonable
|
||||
to assume that it is the caller of ``__sigqueue_alloc()`` that has the
|
||||
responsibility of filling out (initializing) this member.
|
||||
|
||||
But just which fields of the struct were uninitialized? Let's look at
|
||||
kmemcheck's report again::
|
||||
|
||||
WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024)
|
||||
80000000000000000000000000000000000000000088ffff0000000000000000
|
||||
i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
|
||||
^
|
||||
|
||||
These first two lines are the memory dump of the memory object itself, and
|
||||
the shadow bytemap, respectively. The memory object itself is in this case
|
||||
``&first->info``. Just beware that the start of this dump is NOT the start
|
||||
of the object itself! The position of the caret (^) corresponds with the
|
||||
address of the read (ffff88003e4a2024).
|
||||
|
||||
The shadow bytemap dump legend is as follows:
|
||||
|
||||
- i: initialized
|
||||
- u: uninitialized
|
||||
- a: unallocated (memory has been allocated by the slab layer, but has not
|
||||
yet been handed off to anybody)
|
||||
- f: freed (memory has been allocated by the slab layer, but has been freed
|
||||
by the previous owner)
|
||||
|
||||
In order to figure out where (relative to the start of the object) the
|
||||
uninitialized memory was located, we have to look at the disassembly. For
|
||||
that, we'll need the RIP address again::
|
||||
|
||||
RIP: 0010:[<ffffffff8104ede8>] [<ffffffff8104ede8>] __dequeue_signal+0xc8/0x190
|
||||
|
||||
$ objdump -d --no-show-raw-insn vmlinux | grep -C 8 ffffffff8104ede8:
|
||||
ffffffff8104edc8: mov %r8,0x8(%r8)
|
||||
ffffffff8104edcc: test %r10d,%r10d
|
||||
ffffffff8104edcf: js ffffffff8104ee88 <__dequeue_signal+0x168>
|
||||
ffffffff8104edd5: mov %rax,%rdx
|
||||
ffffffff8104edd8: mov $0xc,%ecx
|
||||
ffffffff8104eddd: mov %r13,%rdi
|
||||
ffffffff8104ede0: mov $0x30,%eax
|
||||
ffffffff8104ede5: mov %rdx,%rsi
|
||||
ffffffff8104ede8: rep movsl %ds:(%rsi),%es:(%rdi)
|
||||
ffffffff8104edea: test $0x2,%al
|
||||
ffffffff8104edec: je ffffffff8104edf0 <__dequeue_signal+0xd0>
|
||||
ffffffff8104edee: movsw %ds:(%rsi),%es:(%rdi)
|
||||
ffffffff8104edf0: test $0x1,%al
|
||||
ffffffff8104edf2: je ffffffff8104edf5 <__dequeue_signal+0xd5>
|
||||
ffffffff8104edf4: movsb %ds:(%rsi),%es:(%rdi)
|
||||
ffffffff8104edf5: mov %r8,%rdi
|
||||
ffffffff8104edf8: callq ffffffff8104de60 <__sigqueue_free>
|
||||
|
||||
As expected, it's the "``rep movsl``" instruction from the ``memcpy()``
|
||||
that causes the warning. We know about ``REP MOVSL`` that it uses the register
|
||||
``RCX`` to count the number of remaining iterations. By taking a look at the
|
||||
register dump again (from the kmemcheck report), we can figure out how many
|
||||
bytes were left to copy::
|
||||
|
||||
RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009
|
||||
|
||||
By looking at the disassembly, we also see that ``%ecx`` is being loaded
|
||||
with the value ``$0xc`` just before (ffffffff8104edd8), so we are very
|
||||
lucky. Keep in mind that this is the number of iterations, not bytes. And
|
||||
since this is a "long" operation, we need to multiply by 4 to get the
|
||||
number of bytes. So this means that the uninitialized value was encountered
|
||||
at 4 * (0xc - 0x9) = 12 bytes from the start of the object.
|
||||
|
||||
We can now try to figure out which field of the "``struct siginfo``" that
|
||||
was not initialized. This is the beginning of the struct::
|
||||
|
||||
40 typedef struct siginfo {
|
||||
41 int si_signo;
|
||||
42 int si_errno;
|
||||
43 int si_code;
|
||||
44
|
||||
45 union {
|
||||
..
|
||||
92 } _sifields;
|
||||
93 } siginfo_t;
|
||||
|
||||
On 64-bit, the int is 4 bytes long, so it must the union member that has
|
||||
not been initialized. We can verify this using gdb::
|
||||
|
||||
$ gdb vmlinux
|
||||
...
|
||||
(gdb) p &((struct siginfo *) 0)->_sifields
|
||||
$1 = (union {...} *) 0x10
|
||||
|
||||
Actually, it seems that the union member is located at offset 0x10 -- which
|
||||
means that gcc has inserted 4 bytes of padding between the members ``si_code``
|
||||
and ``_sifields``. We can now get a fuller picture of the memory dump::
|
||||
|
||||
_----------------------------=> si_code
|
||||
/ _--------------------=> (padding)
|
||||
| / _------------=> _sifields(._kill._pid)
|
||||
| | / _----=> _sifields(._kill._uid)
|
||||
| | | /
|
||||
-------|-------|-------|-------|
|
||||
80000000000000000000000000000000000000000088ffff0000000000000000
|
||||
i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
|
||||
|
||||
This allows us to realize another important fact: ``si_code`` contains the
|
||||
value 0x80. Remember that x86 is little endian, so the first 4 bytes
|
||||
"80000000" are really the number 0x00000080. With a bit of research, we
|
||||
find that this is actually the constant ``SI_KERNEL`` defined in
|
||||
``include/asm-generic/siginfo.h``::
|
||||
|
||||
144 #define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
|
||||
|
||||
This macro is used in exactly one place in the x86 kernel: In ``send_signal()``
|
||||
in ``kernel/signal.c``::
|
||||
|
||||
816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
|
||||
817 int group)
|
||||
818 {
|
||||
...
|
||||
828 pending = group ? &t->signal->shared_pending : &t->pending;
|
||||
...
|
||||
851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
|
||||
852 (is_si_special(info) ||
|
||||
853 info->si_code >= 0)));
|
||||
854 if (q) {
|
||||
855 list_add_tail(&q->list, &pending->list);
|
||||
856 switch ((unsigned long) info) {
|
||||
...
|
||||
865 case (unsigned long) SEND_SIG_PRIV:
|
||||
866 q->info.si_signo = sig;
|
||||
867 q->info.si_errno = 0;
|
||||
868 q->info.si_code = SI_KERNEL;
|
||||
869 q->info.si_pid = 0;
|
||||
870 q->info.si_uid = 0;
|
||||
871 break;
|
||||
...
|
||||
890 }
|
||||
|
||||
Not only does this match with the ``.si_code`` member, it also matches the place
|
||||
we found earlier when looking for where siginfo_t objects are enqueued on the
|
||||
``shared_pending`` list.
|
||||
|
||||
So to sum up: It seems that it is the padding introduced by the compiler
|
||||
between two struct fields that is uninitialized, and this gets reported when
|
||||
we do a ``memcpy()`` on the struct. This means that we have identified a false
|
||||
positive warning.
|
||||
|
||||
Normally, kmemcheck will not report uninitialized accesses in ``memcpy()`` calls
|
||||
when both the source and destination addresses are tracked. (Instead, we copy
|
||||
the shadow bytemap as well). In this case, the destination address clearly
|
||||
was not tracked. We can dig a little deeper into the stack trace from above::
|
||||
|
||||
arch/x86/kernel/signal.c:805
|
||||
arch/x86/kernel/signal.c:871
|
||||
arch/x86/kernel/entry_64.S:694
|
||||
|
||||
And we clearly see that the destination siginfo object is located on the
|
||||
stack::
|
||||
|
||||
782 static void do_signal(struct pt_regs *regs)
|
||||
783 {
|
||||
784 struct k_sigaction ka;
|
||||
785 siginfo_t info;
|
||||
...
|
||||
804 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
|
||||
...
|
||||
854 }
|
||||
|
||||
And this ``&info`` is what eventually gets passed to ``copy_siginfo()`` as the
|
||||
destination argument.
|
||||
|
||||
Now, even though we didn't find an actual error here, the example is still a
|
||||
good one, because it shows how one would go about to find out what the report
|
||||
was all about.
|
||||
|
||||
|
||||
Annotating false positives
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
There are a few different ways to make annotations in the source code that
|
||||
will keep kmemcheck from checking and reporting certain allocations. Here
|
||||
they are:
|
||||
|
||||
- ``__GFP_NOTRACK_FALSE_POSITIVE``
|
||||
This flag can be passed to ``kmalloc()`` or ``kmem_cache_alloc()``
|
||||
(therefore also to other functions that end up calling one of
|
||||
these) to indicate that the allocation should not be tracked
|
||||
because it would lead to a false positive report. This is a "big
|
||||
hammer" way of silencing kmemcheck; after all, even if the false
|
||||
positive pertains to particular field in a struct, for example, we
|
||||
will now lose the ability to find (real) errors in other parts of
|
||||
the same struct.
|
||||
|
||||
Example::
|
||||
|
||||
/* No warnings will ever trigger on accessing any part of x */
|
||||
x = kmalloc(sizeof *x, GFP_KERNEL | __GFP_NOTRACK_FALSE_POSITIVE);
|
||||
|
||||
- ``kmemcheck_bitfield_begin(name)``/``kmemcheck_bitfield_end(name)`` and
|
||||
``kmemcheck_annotate_bitfield(ptr, name)``
|
||||
The first two of these three macros can be used inside struct
|
||||
definitions to signal, respectively, the beginning and end of a
|
||||
bitfield. Additionally, this will assign the bitfield a name, which
|
||||
is given as an argument to the macros.
|
||||
|
||||
Having used these markers, one can later use
|
||||
kmemcheck_annotate_bitfield() at the point of allocation, to indicate
|
||||
which parts of the allocation is part of a bitfield.
|
||||
|
||||
Example::
|
||||
|
||||
struct foo {
|
||||
int x;
|
||||
|
||||
kmemcheck_bitfield_begin(flags);
|
||||
int flag_a:1;
|
||||
int flag_b:1;
|
||||
kmemcheck_bitfield_end(flags);
|
||||
|
||||
int y;
|
||||
};
|
||||
|
||||
struct foo *x = kmalloc(sizeof *x);
|
||||
|
||||
/* No warnings will trigger on accessing the bitfield of x */
|
||||
kmemcheck_annotate_bitfield(x, flags);
|
||||
|
||||
Note that ``kmemcheck_annotate_bitfield()`` can be used even before the
|
||||
return value of ``kmalloc()`` is checked -- in other words, passing NULL
|
||||
as the first argument is legal (and will do nothing).
|
||||
|
||||
|
||||
Reporting errors
|
||||
----------------
|
||||
|
||||
As we have seen, kmemcheck will produce false positive reports. Therefore, it
|
||||
is not very wise to blindly post kmemcheck warnings to mailing lists and
|
||||
maintainers. Instead, I encourage maintainers and developers to find errors
|
||||
in their own code. If you get a warning, you can try to work around it, try
|
||||
to figure out if it's a real error or not, or simply ignore it. Most
|
||||
developers know their own code and will quickly and efficiently determine the
|
||||
root cause of a kmemcheck report. This is therefore also the most efficient
|
||||
way to work with kmemcheck.
|
||||
|
||||
That said, we (the kmemcheck maintainers) will always be on the lookout for
|
||||
false positives that we can annotate and silence. So whatever you find,
|
||||
please drop us a note privately! Kernel configs and steps to reproduce (if
|
||||
available) are of course a great help too.
|
||||
|
||||
Happy hacking!
|
||||
|
||||
|
||||
Technical description
|
||||
---------------------
|
||||
|
||||
kmemcheck works by marking memory pages non-present. This means that whenever
|
||||
somebody attempts to access the page, a page fault is generated. The page
|
||||
fault handler notices that the page was in fact only hidden, and so it calls
|
||||
on the kmemcheck code to make further investigations.
|
||||
|
||||
When the investigations are completed, kmemcheck "shows" the page by marking
|
||||
it present (as it would be under normal circumstances). This way, the
|
||||
interrupted code can continue as usual.
|
||||
|
||||
But after the instruction has been executed, we should hide the page again, so
|
||||
that we can catch the next access too! Now kmemcheck makes use of a debugging
|
||||
feature of the processor, namely single-stepping. When the processor has
|
||||
finished the one instruction that generated the memory access, a debug
|
||||
exception is raised. From here, we simply hide the page again and continue
|
||||
execution, this time with the single-stepping feature turned off.
|
||||
|
||||
kmemcheck requires some assistance from the memory allocator in order to work.
|
||||
The memory allocator needs to
|
||||
|
||||
1. Tell kmemcheck about newly allocated pages and pages that are about to
|
||||
be freed. This allows kmemcheck to set up and tear down the shadow memory
|
||||
for the pages in question. The shadow memory stores the status of each
|
||||
byte in the allocation proper, e.g. whether it is initialized or
|
||||
uninitialized.
|
||||
|
||||
2. Tell kmemcheck which parts of memory should be marked uninitialized.
|
||||
There are actually a few more states, such as "not yet allocated" and
|
||||
"recently freed".
|
||||
|
||||
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
|
||||
memory that can take page faults because of kmemcheck.
|
||||
|
||||
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
|
||||
request memory with the __GFP_NOTRACK or __GFP_NOTRACK_FALSE_POSITIVE flags.
|
||||
This does not prevent the page faults from occurring, however, but marks the
|
||||
object in question as being initialized so that no warnings will ever be
|
||||
produced for this object.
|
||||
|
||||
Currently, the SLAB and SLUB allocators are supported by kmemcheck.
|
|
@ -64,6 +64,6 @@ Example:
|
|||
reg = <0xe0000000 0x1000>;
|
||||
interrupts = <0 35 0x4>;
|
||||
dmas = <&dmahost 12 0 1>,
|
||||
<&dmahost 13 0 1 0>;
|
||||
<&dmahost 13 1 0>;
|
||||
dma-names = "rx", "rx";
|
||||
};
|
||||
|
|
|
@ -233,7 +233,7 @@ data_err=ignore(*) Just print an error message if an error occurs
|
|||
data_err=abort Abort the journal if an error occurs in a file
|
||||
data buffer in ordered mode.
|
||||
|
||||
grpid Give objects the same group ID as their creator.
|
||||
grpid New objects have the group ID of their parent.
|
||||
bsdgroups
|
||||
|
||||
nogrpid (*) New objects have the group ID of their creator.
|
||||
|
|
|
@ -0,0 +1,90 @@
|
|||
This document explains potential effects of speculation, and how undesirable
|
||||
effects can be mitigated portably using common APIs.
|
||||
|
||||
===========
|
||||
Speculation
|
||||
===========
|
||||
|
||||
To improve performance and minimize average latencies, many contemporary CPUs
|
||||
employ speculative execution techniques such as branch prediction, performing
|
||||
work which may be discarded at a later stage.
|
||||
|
||||
Typically speculative execution cannot be observed from architectural state,
|
||||
such as the contents of registers. However, in some cases it is possible to
|
||||
observe its impact on microarchitectural state, such as the presence or
|
||||
absence of data in caches. Such state may form side-channels which can be
|
||||
observed to extract secret information.
|
||||
|
||||
For example, in the presence of branch prediction, it is possible for bounds
|
||||
checks to be ignored by code which is speculatively executed. Consider the
|
||||
following code:
|
||||
|
||||
int load_array(int *array, unsigned int index)
|
||||
{
|
||||
if (index >= MAX_ARRAY_ELEMS)
|
||||
return 0;
|
||||
else
|
||||
return array[index];
|
||||
}
|
||||
|
||||
Which, on arm64, may be compiled to an assembly sequence such as:
|
||||
|
||||
CMP <index>, #MAX_ARRAY_ELEMS
|
||||
B.LT less
|
||||
MOV <returnval>, #0
|
||||
RET
|
||||
less:
|
||||
LDR <returnval>, [<array>, <index>]
|
||||
RET
|
||||
|
||||
It is possible that a CPU mis-predicts the conditional branch, and
|
||||
speculatively loads array[index], even if index >= MAX_ARRAY_ELEMS. This
|
||||
value will subsequently be discarded, but the speculated load may affect
|
||||
microarchitectural state which can be subsequently measured.
|
||||
|
||||
More complex sequences involving multiple dependent memory accesses may
|
||||
result in sensitive information being leaked. Consider the following
|
||||
code, building on the prior example:
|
||||
|
||||
int load_dependent_arrays(int *arr1, int *arr2, int index)
|
||||
{
|
||||
int val1, val2,
|
||||
|
||||
val1 = load_array(arr1, index);
|
||||
val2 = load_array(arr2, val1);
|
||||
|
||||
return val2;
|
||||
}
|
||||
|
||||
Under speculation, the first call to load_array() may return the value
|
||||
of an out-of-bounds address, while the second call will influence
|
||||
microarchitectural state dependent on this value. This may provide an
|
||||
arbitrary read primitive.
|
||||
|
||||
====================================
|
||||
Mitigating speculation side-channels
|
||||
====================================
|
||||
|
||||
The kernel provides a generic API to ensure that bounds checks are
|
||||
respected even under speculation. Architectures which are affected by
|
||||
speculation-based side-channels are expected to implement these
|
||||
primitives.
|
||||
|
||||
The array_index_nospec() helper in <linux/nospec.h> can be used to
|
||||
prevent information from being leaked via side-channels.
|
||||
|
||||
A call to array_index_nospec(index, size) returns a sanitized index
|
||||
value that is bounded to [0, size) even under cpu speculation
|
||||
conditions.
|
||||
|
||||
This can be used to protect the earlier load_array() example:
|
||||
|
||||
int load_array(int *array, unsigned int index)
|
||||
{
|
||||
if (index >= MAX_ARRAY_ELEMS)
|
||||
return 0;
|
||||
else {
|
||||
index = array_index_nospec(index, MAX_ARRAY_ELEMS);
|
||||
return array[index];
|
||||
}
|
||||
}
|
10
MAINTAINERS
10
MAINTAINERS
|
@ -7670,16 +7670,6 @@ F: include/linux/kdb.h
|
|||
F: include/linux/kgdb.h
|
||||
F: kernel/debug/
|
||||
|
||||
KMEMCHECK
|
||||
M: Vegard Nossum <vegardno@ifi.uio.no>
|
||||
M: Pekka Enberg <penberg@kernel.org>
|
||||
S: Maintained
|
||||
F: Documentation/dev-tools/kmemcheck.rst
|
||||
F: arch/x86/include/asm/kmemcheck.h
|
||||
F: arch/x86/mm/kmemcheck/
|
||||
F: include/linux/kmemcheck.h
|
||||
F: mm/kmemcheck.c
|
||||
|
||||
KMEMLEAK
|
||||
M: Catalin Marinas <catalin.marinas@arm.com>
|
||||
S: Maintained
|
||||
|
|
5
Makefile
5
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 16
|
||||
SUBLEVEL = 22
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
|
@ -416,7 +416,8 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE
|
|||
export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
|
||||
|
||||
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
|
||||
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_KASAN CFLAGS_UBSAN
|
||||
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
|
||||
export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN
|
||||
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
|
||||
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
|
||||
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
"3: .subsection 2\n" \
|
||||
"4: br 1b\n" \
|
||||
" .previous\n" \
|
||||
EXC(1b,3b,%1,$31) \
|
||||
EXC(2b,3b,%1,$31) \
|
||||
EXC(1b,3b,$31,%1) \
|
||||
EXC(2b,3b,$31,%1) \
|
||||
: "=&r" (oldval), "=&r"(ret) \
|
||||
: "r" (uaddr), "r"(oparg) \
|
||||
: "memory")
|
||||
|
@ -82,8 +82,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|||
"3: .subsection 2\n"
|
||||
"4: br 1b\n"
|
||||
" .previous\n"
|
||||
EXC(1b,3b,%0,$31)
|
||||
EXC(2b,3b,%0,$31)
|
||||
EXC(1b,3b,$31,%0)
|
||||
EXC(2b,3b,$31,%0)
|
||||
: "+r"(ret), "=&r"(prev), "=&r"(cmp)
|
||||
: "r"(uaddr), "r"((long)(int)oldval), "r"(newval)
|
||||
: "memory");
|
||||
|
|
|
@ -964,8 +964,8 @@ static inline long
|
|||
put_tv32(struct timeval32 __user *o, struct timeval *i)
|
||||
{
|
||||
return copy_to_user(o, &(struct timeval32){
|
||||
.tv_sec = o->tv_sec,
|
||||
.tv_usec = o->tv_usec},
|
||||
.tv_sec = i->tv_sec,
|
||||
.tv_usec = i->tv_usec},
|
||||
sizeof(struct timeval32));
|
||||
}
|
||||
|
||||
|
|
|
@ -144,7 +144,8 @@ struct pci_iommu_arena
|
|||
};
|
||||
|
||||
#if defined(CONFIG_ALPHA_SRM) && \
|
||||
(defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA))
|
||||
(defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA) || \
|
||||
defined(CONFIG_ALPHA_AVANTI))
|
||||
# define NEED_SRM_SAVE_RESTORE
|
||||
#else
|
||||
# undef NEED_SRM_SAVE_RESTORE
|
||||
|
|
|
@ -269,12 +269,13 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||
application calling fork. */
|
||||
if (clone_flags & CLONE_SETTLS)
|
||||
childti->pcb.unique = regs->r20;
|
||||
else
|
||||
regs->r20 = 0; /* OSF/1 has some strange fork() semantics. */
|
||||
childti->pcb.usp = usp ?: rdusp();
|
||||
*childregs = *regs;
|
||||
childregs->r0 = 0;
|
||||
childregs->r19 = 0;
|
||||
childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */
|
||||
regs->r20 = 0;
|
||||
stack = ((struct switch_stack *) regs) - 1;
|
||||
*childstack = *stack;
|
||||
childstack->r26 = (unsigned long) ret_from_fork;
|
||||
|
|
|
@ -160,11 +160,16 @@ void show_stack(struct task_struct *task, unsigned long *sp)
|
|||
for(i=0; i < kstack_depth_to_print; i++) {
|
||||
if (((long) stack & (THREAD_SIZE-1)) == 0)
|
||||
break;
|
||||
if (i && ((i % 4) == 0))
|
||||
printk("\n ");
|
||||
printk("%016lx ", *stack++);
|
||||
if ((i % 4) == 0) {
|
||||
if (i)
|
||||
pr_cont("\n");
|
||||
printk(" ");
|
||||
} else {
|
||||
pr_cont(" ");
|
||||
}
|
||||
pr_cont("%016lx", *stack++);
|
||||
}
|
||||
printk("\n");
|
||||
pr_cont("\n");
|
||||
dik_show_trace(sp);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,2 +1 @@
|
|||
*.dtb*
|
||||
uImage
|
||||
|
|
|
@ -3,4 +3,3 @@ zImage
|
|||
xipImage
|
||||
bootpImage
|
||||
uImage
|
||||
*.dtb
|
||||
|
|
|
@ -927,7 +927,8 @@
|
|||
reg = <0x48038000 0x2000>,
|
||||
<0x46000000 0x400000>;
|
||||
reg-names = "mpu", "dat";
|
||||
interrupts = <80>, <81>;
|
||||
interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "tx", "rx";
|
||||
status = "disabled";
|
||||
dmas = <&edma 8 2>,
|
||||
|
@ -941,7 +942,8 @@
|
|||
reg = <0x4803C000 0x2000>,
|
||||
<0x46400000 0x400000>;
|
||||
reg-names = "mpu", "dat";
|
||||
interrupts = <82>, <83>;
|
||||
interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "tx", "rx";
|
||||
status = "disabled";
|
||||
dmas = <&edma 10 2>,
|
||||
|
|
|
@ -301,8 +301,8 @@
|
|||
status = "okay";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&spi0_pins>;
|
||||
dmas = <&edma 16
|
||||
&edma 17>;
|
||||
dmas = <&edma 16 0
|
||||
&edma 17 0>;
|
||||
dma-names = "tx0", "rx0";
|
||||
|
||||
flash: w25q64cvzpig@0 {
|
||||
|
|
|
@ -150,11 +150,6 @@
|
|||
interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
&charlcd {
|
||||
interrupt-parent = <&intc>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
&serial0 {
|
||||
interrupt-parent = <&intc>;
|
||||
interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
|
|
@ -85,7 +85,7 @@
|
|||
timer@20200 {
|
||||
compatible = "arm,cortex-a9-global-timer";
|
||||
reg = <0x20200 0x100>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
|
||||
clocks = <&periph_clk>;
|
||||
};
|
||||
|
||||
|
@ -93,7 +93,7 @@
|
|||
compatible = "arm,cortex-a9-twd-timer";
|
||||
reg = <0x20600 0x20>;
|
||||
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
|
||||
IRQ_TYPE_LEVEL_HIGH)>;
|
||||
IRQ_TYPE_EDGE_RISING)>;
|
||||
clocks = <&periph_clk>;
|
||||
};
|
||||
|
||||
|
|
|
@ -141,10 +141,6 @@
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
&sata {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&qspi {
|
||||
bspi-sel = <0>;
|
||||
flash: m25p80@0 {
|
||||
|
|
|
@ -177,10 +177,6 @@
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
&sata {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&srab {
|
||||
compatible = "brcm,bcm58625-srab", "brcm,nsp-srab";
|
||||
status = "okay";
|
||||
|
|
|
@ -333,7 +333,6 @@
|
|||
&rtc {
|
||||
clocks = <&clock CLK_RTC>;
|
||||
clock-names = "rtc";
|
||||
interrupt-parent = <&pmu_system_controller>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
|
|
@ -72,7 +72,8 @@
|
|||
};
|
||||
|
||||
&gpmc {
|
||||
ranges = <1 0 0x08000000 0x1000000>; /* CS1: 16MB for LAN9221 */
|
||||
ranges = <0 0 0x30000000 0x1000000 /* CS0: 16MB for NAND */
|
||||
1 0 0x2c000000 0x1000000>; /* CS1: 16MB for LAN9221 */
|
||||
|
||||
ethernet@gpmc {
|
||||
pinctrl-names = "default";
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
};
|
||||
|
||||
&gpmc {
|
||||
ranges = <0 0 0x00000000 0x1000000>; /* CS0: 16MB for NAND */
|
||||
ranges = <0 0 0x30000000 0x1000000>; /* CS0: 16MB for NAND */
|
||||
|
||||
nand@0,0 {
|
||||
compatible = "ti,omap2-nand";
|
||||
|
@ -121,7 +121,7 @@
|
|||
|
||||
&mmc3 {
|
||||
interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
|
||||
pinctrl-0 = <&mmc3_pins>;
|
||||
pinctrl-0 = <&mmc3_pins &wl127x_gpio>;
|
||||
pinctrl-names = "default";
|
||||
vmmc-supply = <&wl12xx_vmmc>;
|
||||
non-removable;
|
||||
|
@ -132,8 +132,8 @@
|
|||
wlcore: wlcore@2 {
|
||||
compatible = "ti,wl1273";
|
||||
reg = <2>;
|
||||
interrupt-parent = <&gpio5>;
|
||||
interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; /* gpio 152 */
|
||||
interrupt-parent = <&gpio1>;
|
||||
interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; /* gpio 2 */
|
||||
ref-clock-frequency = <26000000>;
|
||||
};
|
||||
};
|
||||
|
@ -157,8 +157,6 @@
|
|||
OMAP3_CORE1_IOPAD(0x2166, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat5.sdmmc3_dat1 */
|
||||
OMAP3_CORE1_IOPAD(0x2168, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat2 */
|
||||
OMAP3_CORE1_IOPAD(0x216a, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat3 */
|
||||
OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT_PULLUP | MUX_MODE4) /* mcbsp4_clkx.gpio_152 */
|
||||
OMAP3_CORE1_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */
|
||||
OMAP3_CORE1_IOPAD(0x21d0, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs1.sdmmc3_cmd */
|
||||
OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs2.sdmmc_clk */
|
||||
>;
|
||||
|
@ -228,6 +226,12 @@
|
|||
OMAP3_WKUP_IOPAD(0x2a0e, PIN_OUTPUT | MUX_MODE4) /* sys_boot2.gpio_4 */
|
||||
>;
|
||||
};
|
||||
wl127x_gpio: pinmux_wl127x_gpio_pin {
|
||||
pinctrl-single,pins = <
|
||||
OMAP3_WKUP_IOPAD(0x2a0c, PIN_INPUT | MUX_MODE4) /* sys_boot0.gpio_2 */
|
||||
OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */
|
||||
>;
|
||||
};
|
||||
};
|
||||
|
||||
&omap3_pmx_core2 {
|
||||
|
|
|
@ -156,8 +156,8 @@
|
|||
uda1380: uda1380@18 {
|
||||
compatible = "nxp,uda1380";
|
||||
reg = <0x18>;
|
||||
power-gpio = <&gpio 0x59 0>;
|
||||
reset-gpio = <&gpio 0x51 0>;
|
||||
power-gpio = <&gpio 3 10 0>;
|
||||
reset-gpio = <&gpio 3 2 0>;
|
||||
dac-clk = "wspll";
|
||||
};
|
||||
|
||||
|
|
|
@ -81,8 +81,8 @@
|
|||
uda1380: uda1380@18 {
|
||||
compatible = "nxp,uda1380";
|
||||
reg = <0x18>;
|
||||
power-gpio = <&gpio 0x59 0>;
|
||||
reset-gpio = <&gpio 0x51 0>;
|
||||
power-gpio = <&gpio 3 10 0>;
|
||||
reset-gpio = <&gpio 3 2 0>;
|
||||
dac-clk = "wspll";
|
||||
};
|
||||
|
||||
|
|
|
@ -593,6 +593,7 @@
|
|||
compatible = "mediatek,mt2701-hifsys", "syscon";
|
||||
reg = <0 0x1a000000 0 0x1000>;
|
||||
#clock-cells = <1>;
|
||||
#reset-cells = <1>;
|
||||
};
|
||||
|
||||
usb0: usb@1a1c0000 {
|
||||
|
@ -677,6 +678,7 @@
|
|||
compatible = "mediatek,mt2701-ethsys", "syscon";
|
||||
reg = <0 0x1b000000 0 0x1000>;
|
||||
#clock-cells = <1>;
|
||||
#reset-cells = <1>;
|
||||
};
|
||||
|
||||
eth: ethernet@1b100000 {
|
||||
|
|
|
@ -753,6 +753,7 @@
|
|||
"syscon";
|
||||
reg = <0 0x1b000000 0 0x1000>;
|
||||
#clock-cells = <1>;
|
||||
#reset-cells = <1>;
|
||||
};
|
||||
|
||||
eth: ethernet@1b100000 {
|
||||
|
|
|
@ -204,7 +204,7 @@
|
|||
bus-width = <4>;
|
||||
max-frequency = <50000000>;
|
||||
cap-sd-highspeed;
|
||||
cd-gpios = <&pio 261 0>;
|
||||
cd-gpios = <&pio 261 GPIO_ACTIVE_LOW>;
|
||||
vmmc-supply = <&mt6323_vmch_reg>;
|
||||
vqmmc-supply = <&mt6323_vio18_reg>;
|
||||
};
|
||||
|
|
|
@ -354,7 +354,7 @@
|
|||
elm: elm@48078000 {
|
||||
compatible = "ti,am3352-elm";
|
||||
reg = <0x48078000 0x2000>;
|
||||
interrupts = <4>;
|
||||
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ti,hwmods = "elm";
|
||||
status = "disabled";
|
||||
};
|
||||
|
@ -861,14 +861,12 @@
|
|||
usbhsohci: ohci@4a064800 {
|
||||
compatible = "ti,ohci-omap3";
|
||||
reg = <0x4a064800 0x400>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
usbhsehci: ehci@4a064c00 {
|
||||
compatible = "ti,ehci-omap";
|
||||
reg = <0x4a064c00 0x400>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -463,6 +463,7 @@
|
|||
compatible = "samsung,exynos4210-ohci";
|
||||
reg = <0xec300000 0x100>;
|
||||
interrupts = <23>;
|
||||
interrupt-parent = <&vic1>;
|
||||
clocks = <&clocks CLK_USB_HOST>;
|
||||
clock-names = "usbhost";
|
||||
#address-cells = <1>;
|
||||
|
|
|
@ -349,7 +349,7 @@
|
|||
spi0: spi@e0100000 {
|
||||
status = "okay";
|
||||
num-cs = <3>;
|
||||
cs-gpios = <&gpio1 7 0>, <&spics 0>, <&spics 1>;
|
||||
cs-gpios = <&gpio1 7 0>, <&spics 0 0>, <&spics 1 0>;
|
||||
|
||||
stmpe610@0 {
|
||||
compatible = "st,stmpe610";
|
||||
|
|
|
@ -142,8 +142,8 @@
|
|||
reg = <0xb4100000 0x1000>;
|
||||
interrupts = <0 105 0x4>;
|
||||
status = "disabled";
|
||||
dmas = <&dwdma0 0x600 0 0 1>, /* 0xC << 11 */
|
||||
<&dwdma0 0x680 0 1 0>; /* 0xD << 7 */
|
||||
dmas = <&dwdma0 12 0 1>,
|
||||
<&dwdma0 13 1 0>;
|
||||
dma-names = "tx", "rx";
|
||||
};
|
||||
|
||||
|
|
|
@ -100,7 +100,7 @@
|
|||
reg = <0xb2800000 0x1000>;
|
||||
interrupts = <0 29 0x4>;
|
||||
status = "disabled";
|
||||
dmas = <&dwdma0 0 0 0 0>;
|
||||
dmas = <&dwdma0 0 0 0>;
|
||||
dma-names = "data";
|
||||
};
|
||||
|
||||
|
@ -290,8 +290,8 @@
|
|||
#size-cells = <0>;
|
||||
interrupts = <0 31 0x4>;
|
||||
status = "disabled";
|
||||
dmas = <&dwdma0 0x2000 0 0 0>, /* 0x4 << 11 */
|
||||
<&dwdma0 0x0280 0 0 0>; /* 0x5 << 7 */
|
||||
dmas = <&dwdma0 4 0 0>,
|
||||
<&dwdma0 5 0 0>;
|
||||
dma-names = "tx", "rx";
|
||||
};
|
||||
|
||||
|
|
|
@ -194,6 +194,7 @@
|
|||
rtc: rtc@fc900000 {
|
||||
compatible = "st,spear600-rtc";
|
||||
reg = <0xfc900000 0x1000>;
|
||||
interrupt-parent = <&vic0>;
|
||||
interrupts = <10>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
|
|
@ -750,6 +750,7 @@
|
|||
reg = <0x10120000 0x1000>;
|
||||
interrupt-names = "combined";
|
||||
interrupts = <14>;
|
||||
interrupt-parent = <&vica>;
|
||||
clocks = <&clcdclk>, <&hclkclcd>;
|
||||
clock-names = "clcdclk", "apb_pclk";
|
||||
status = "disabled";
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
*/
|
||||
#include "stih407-clock.dtsi"
|
||||
#include "stih407-family.dtsi"
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
/ {
|
||||
soc {
|
||||
sti-display-subsystem {
|
||||
|
@ -122,7 +123,7 @@
|
|||
<&clk_s_d2_quadfs 0>,
|
||||
<&clk_s_d2_quadfs 1>;
|
||||
|
||||
hdmi,hpd-gpio = <&pio5 3>;
|
||||
hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>;
|
||||
reset-names = "hdmi";
|
||||
resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
|
||||
ddc = <&hdmiddc>;
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include "stih410-clock.dtsi"
|
||||
#include "stih407-family.dtsi"
|
||||
#include "stih410-pinctrl.dtsi"
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
/ {
|
||||
aliases {
|
||||
bdisp0 = &bdisp0;
|
||||
|
@ -213,7 +214,7 @@
|
|||
<&clk_s_d2_quadfs 0>,
|
||||
<&clk_s_d2_quadfs 1>;
|
||||
|
||||
hdmi,hpd-gpio = <&pio5 3>;
|
||||
hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>;
|
||||
reset-names = "hdmi";
|
||||
resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
|
||||
ddc = <&hdmiddc>;
|
||||
|
|
|
@ -57,3 +57,7 @@ static struct miscdevice bL_switcher_device = {
|
|||
&bL_switcher_fops
|
||||
};
|
||||
module_misc_device(bL_switcher_device);
|
||||
|
||||
MODULE_AUTHOR("Nicolas Pitre <nico@linaro.org>");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("big.LITTLE switcher dummy user interface");
|
||||
|
|
|
@ -188,6 +188,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
|
|||
.base.cra_name = "crc32",
|
||||
.base.cra_driver_name = "crc32-arm-ce",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
}, {
|
||||
|
@ -203,6 +204,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
|
|||
.base.cra_name = "crc32c",
|
||||
.base.cra_driver_name = "crc32c-arm-ce",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
} };
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#include <linux/mm_types.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-debug.h>
|
||||
#include <linux/kmemcheck.h>
|
||||
#include <linux/kref.h>
|
||||
|
||||
#define ARM_MAPPING_ERROR (~(dma_addr_t)0x0)
|
||||
|
|
|
@ -293,4 +293,10 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
|
|||
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr);
|
||||
|
||||
static inline bool kvm_arm_harden_branch_predictor(void)
|
||||
{
|
||||
/* No way to detect it yet, pretend it is not there. */
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* __ARM_KVM_HOST_H__ */
|
||||
|
|
|
@ -221,6 +221,16 @@ static inline unsigned int kvm_get_vmid_bits(void)
|
|||
return 8;
|
||||
}
|
||||
|
||||
static inline void *kvm_get_hyp_vector(void)
|
||||
{
|
||||
return kvm_ksym_ref(__kvm_hyp_vector);
|
||||
}
|
||||
|
||||
static inline int kvm_map_vectors(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* __ARM_KVM_MMU_H__ */
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2012 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_PSCI_H__
|
||||
#define __ARM_KVM_PSCI_H__
|
||||
|
||||
#define KVM_ARM_PSCI_0_1 1
|
||||
#define KVM_ARM_PSCI_0_2 2
|
||||
|
||||
int kvm_psci_version(struct kvm_vcpu *vcpu);
|
||||
int kvm_psci_call(struct kvm_vcpu *vcpu);
|
||||
|
||||
#endif /* __ARM_KVM_PSCI_H__ */
|
|
@ -57,7 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|||
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
||||
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
||||
|
||||
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
|
||||
#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
|
||||
|
||||
static inline void clean_pte_table(pte_t *pte)
|
||||
{
|
||||
|
|
|
@ -790,7 +790,6 @@ void abort(void)
|
|||
/* if that doesn't kill us, halt */
|
||||
panic("Oops failed to kill thread");
|
||||
}
|
||||
EXPORT_SYMBOL(abort);
|
||||
|
||||
void __init trap_init(void)
|
||||
{
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_psci.h>
|
||||
#include <kvm/arm_psci.h>
|
||||
#include <trace/events/kvm.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
@ -36,9 +36,9 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
kvm_vcpu_hvc_get_imm(vcpu));
|
||||
vcpu->stat.hvc_exit_stat++;
|
||||
|
||||
ret = kvm_psci_call(vcpu);
|
||||
ret = kvm_hvc_call_handler(vcpu);
|
||||
if (ret < 0) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
vcpu_set_reg(vcpu, 0, ~0UL);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
|
||||
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
kvm_inject_undefined(vcpu);
|
||||
/*
|
||||
* "If an SMC instruction executed at Non-secure EL1 is
|
||||
* trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
|
||||
* Trap exception, not a Secure Monitor Call exception [...]"
|
||||
*
|
||||
* We need to advance the PC after the trap, as it would
|
||||
* otherwise return to the same address...
|
||||
*/
|
||||
vcpu_set_reg(vcpu, 0, ~0UL);
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -73,6 +73,25 @@ phys_addr_t omap_secure_ram_mempool_base(void)
|
|||
return omap_secure_memblock_base;
|
||||
}
|
||||
|
||||
u32 omap3_save_secure_ram(void __iomem *addr, int size)
|
||||
{
|
||||
u32 ret;
|
||||
u32 param[5];
|
||||
|
||||
if (size != OMAP3_SAVE_SECURE_RAM_SZ)
|
||||
return OMAP3_SAVE_SECURE_RAM_SZ;
|
||||
|
||||
param[0] = 4; /* Number of arguments */
|
||||
param[1] = __pa(addr); /* Physical address for saving */
|
||||
param[2] = 0;
|
||||
param[3] = 1;
|
||||
param[4] = 1;
|
||||
|
||||
ret = save_secure_ram_context(__pa(param));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* rx51_secure_dispatcher: Routine to dispatch secure PPA API calls
|
||||
* @idx: The PPA API index
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
/* Maximum Secure memory storage size */
|
||||
#define OMAP_SECURE_RAM_STORAGE (88 * SZ_1K)
|
||||
|
||||
#define OMAP3_SAVE_SECURE_RAM_SZ 0x803F
|
||||
|
||||
/* Secure low power HAL API index */
|
||||
#define OMAP4_HAL_SAVESECURERAM_INDEX 0x1a
|
||||
#define OMAP4_HAL_SAVEHW_INDEX 0x1b
|
||||
|
@ -65,6 +67,8 @@ extern u32 omap_smc2(u32 id, u32 falg, u32 pargs);
|
|||
extern u32 omap_smc3(u32 id, u32 process, u32 flag, u32 pargs);
|
||||
extern phys_addr_t omap_secure_ram_mempool_base(void);
|
||||
extern int omap_secure_ram_reserve_memblock(void);
|
||||
extern u32 save_secure_ram_context(u32 args_pa);
|
||||
extern u32 omap3_save_secure_ram(void __iomem *save_regs, int size);
|
||||
|
||||
extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
|
||||
u32 arg1, u32 arg2, u32 arg3, u32 arg4);
|
||||
|
|
|
@ -81,10 +81,6 @@ extern unsigned int omap3_do_wfi_sz;
|
|||
/* ... and its pointer from SRAM after copy */
|
||||
extern void (*omap3_do_wfi_sram)(void);
|
||||
|
||||
/* save_secure_ram_context function pointer and size, for copy to SRAM */
|
||||
extern int save_secure_ram_context(u32 *addr);
|
||||
extern unsigned int save_secure_ram_context_sz;
|
||||
|
||||
extern void omap3_save_scratchpad_contents(void);
|
||||
|
||||
#define PM_RTA_ERRATUM_i608 (1 << 0)
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
#include "prm3xxx.h"
|
||||
#include "pm.h"
|
||||
#include "sdrc.h"
|
||||
#include "omap-secure.h"
|
||||
#include "sram.h"
|
||||
#include "control.h"
|
||||
#include "vc.h"
|
||||
|
@ -66,7 +67,6 @@ struct power_state {
|
|||
|
||||
static LIST_HEAD(pwrst_list);
|
||||
|
||||
static int (*_omap_save_secure_sram)(u32 *addr);
|
||||
void (*omap3_do_wfi_sram)(void);
|
||||
|
||||
static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
|
||||
|
@ -121,8 +121,8 @@ static void omap3_save_secure_ram_context(void)
|
|||
* will hang the system.
|
||||
*/
|
||||
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
|
||||
ret = _omap_save_secure_sram((u32 *)(unsigned long)
|
||||
__pa(omap3_secure_ram_storage));
|
||||
ret = omap3_save_secure_ram(omap3_secure_ram_storage,
|
||||
OMAP3_SAVE_SECURE_RAM_SZ);
|
||||
pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
|
||||
/* Following is for error tracking, it should not happen */
|
||||
if (ret) {
|
||||
|
@ -434,15 +434,10 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
|
|||
*
|
||||
* The minimum set of functions is pushed to SRAM for execution:
|
||||
* - omap3_do_wfi for erratum i581 WA,
|
||||
* - save_secure_ram_context for security extensions.
|
||||
*/
|
||||
void omap_push_sram_idle(void)
|
||||
{
|
||||
omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
|
||||
|
||||
if (omap_type() != OMAP2_DEVICE_TYPE_GP)
|
||||
_omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
|
||||
save_secure_ram_context_sz);
|
||||
}
|
||||
|
||||
static void __init pm_errata_configure(void)
|
||||
|
@ -553,7 +548,7 @@ int __init omap3_pm_init(void)
|
|||
clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
|
||||
if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
|
||||
omap3_secure_ram_storage =
|
||||
kmalloc(0x803F, GFP_KERNEL);
|
||||
kmalloc(OMAP3_SAVE_SECURE_RAM_SZ, GFP_KERNEL);
|
||||
if (!omap3_secure_ram_storage)
|
||||
pr_err("Memory allocation failed when allocating for secure sram context\n");
|
||||
|
||||
|
|
|
@ -176,17 +176,6 @@ static int am33xx_pwrdm_read_pwrst(struct powerdomain *pwrdm)
|
|||
return v;
|
||||
}
|
||||
|
||||
static int am33xx_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm)
|
||||
{
|
||||
u32 v;
|
||||
|
||||
v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs);
|
||||
v &= AM33XX_LASTPOWERSTATEENTERED_MASK;
|
||||
v >>= AM33XX_LASTPOWERSTATEENTERED_SHIFT;
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
static int am33xx_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm)
|
||||
{
|
||||
am33xx_prm_rmw_reg_bits(AM33XX_LOWPOWERSTATECHANGE_MASK,
|
||||
|
@ -357,7 +346,6 @@ struct pwrdm_ops am33xx_pwrdm_operations = {
|
|||
.pwrdm_set_next_pwrst = am33xx_pwrdm_set_next_pwrst,
|
||||
.pwrdm_read_next_pwrst = am33xx_pwrdm_read_next_pwrst,
|
||||
.pwrdm_read_pwrst = am33xx_pwrdm_read_pwrst,
|
||||
.pwrdm_read_prev_pwrst = am33xx_pwrdm_read_prev_pwrst,
|
||||
.pwrdm_set_logic_retst = am33xx_pwrdm_set_logic_retst,
|
||||
.pwrdm_read_logic_pwrst = am33xx_pwrdm_read_logic_pwrst,
|
||||
.pwrdm_read_logic_retst = am33xx_pwrdm_read_logic_retst,
|
||||
|
|
|
@ -93,20 +93,13 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
|
|||
ENDPROC(enable_omap3630_toggle_l2_on_restore)
|
||||
|
||||
/*
|
||||
* Function to call rom code to save secure ram context. This gets
|
||||
* relocated to SRAM, so it can be all in .data section. Otherwise
|
||||
* we need to initialize api_params separately.
|
||||
* Function to call rom code to save secure ram context.
|
||||
*
|
||||
* r0 = physical address of the parameters
|
||||
*/
|
||||
.data
|
||||
.align 3
|
||||
ENTRY(save_secure_ram_context)
|
||||
stmfd sp!, {r4 - r11, lr} @ save registers on stack
|
||||
adr r3, api_params @ r3 points to parameters
|
||||
str r0, [r3,#0x4] @ r0 has sdram address
|
||||
ldr r12, high_mask
|
||||
and r3, r3, r12
|
||||
ldr r12, sram_phy_addr_mask
|
||||
orr r3, r3, r12
|
||||
mov r3, r0 @ physical address of parameters
|
||||
mov r0, #25 @ set service ID for PPA
|
||||
mov r12, r0 @ copy secure service ID in r12
|
||||
mov r1, #0 @ set task id for ROM code in r1
|
||||
|
@ -120,18 +113,7 @@ ENTRY(save_secure_ram_context)
|
|||
nop
|
||||
nop
|
||||
ldmfd sp!, {r4 - r11, pc}
|
||||
.align
|
||||
sram_phy_addr_mask:
|
||||
.word SRAM_BASE_P
|
||||
high_mask:
|
||||
.word 0xffff
|
||||
api_params:
|
||||
.word 0x4, 0x0, 0x0, 0x1, 0x1
|
||||
ENDPROC(save_secure_ram_context)
|
||||
ENTRY(save_secure_ram_context_sz)
|
||||
.word . - save_secure_ram_context
|
||||
|
||||
.text
|
||||
|
||||
/*
|
||||
* ======================
|
||||
|
|
|
@ -132,3 +132,7 @@ static struct platform_driver tosa_bt_driver = {
|
|||
},
|
||||
};
|
||||
module_platform_driver(tosa_bt_driver);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Dmitry Baryshkov");
|
||||
MODULE_DESCRIPTION("Bluetooth built-in chip control");
|
||||
|
|
|
@ -504,20 +504,13 @@ config CAVIUM_ERRATUM_30115
|
|||
config QCOM_FALKOR_ERRATUM_1003
|
||||
bool "Falkor E1003: Incorrect translation due to ASID change"
|
||||
default y
|
||||
select ARM64_PAN if ARM64_SW_TTBR0_PAN
|
||||
help
|
||||
On Falkor v1, an incorrect ASID may be cached in the TLB when ASID
|
||||
and BADDR are changed together in TTBRx_EL1. The workaround for this
|
||||
issue is to use a reserved ASID in cpu_do_switch_mm() before
|
||||
switching to the new ASID. Saying Y here selects ARM64_PAN if
|
||||
ARM64_SW_TTBR0_PAN is selected. This is done because implementing and
|
||||
maintaining the E1003 workaround in the software PAN emulation code
|
||||
would be an unnecessary complication. The affected Falkor v1 CPU
|
||||
implements ARMv8.1 hardware PAN support and using hardware PAN
|
||||
support versus software PAN emulation is mutually exclusive at
|
||||
runtime.
|
||||
|
||||
If unsure, say Y.
|
||||
and BADDR are changed together in TTBRx_EL1. Since we keep the ASID
|
||||
in TTBR1_EL1, this situation only occurs in the entry trampoline and
|
||||
then only for entries in the walk cache, since the leaf translation
|
||||
is unchanged. Work around the erratum by invalidating the walk cache
|
||||
entries for the trampoline before entering the kernel proper.
|
||||
|
||||
config QCOM_FALKOR_ERRATUM_1009
|
||||
bool "Falkor E1009: Prematurely complete a DSB after a TLBI"
|
||||
|
@ -539,6 +532,16 @@ config QCOM_QDF2400_ERRATUM_0065
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config QCOM_FALKOR_ERRATUM_E1041
|
||||
bool "Falkor E1041: Speculative instruction fetches might cause errant memory access"
|
||||
default y
|
||||
help
|
||||
Falkor CPU may speculatively fetch instructions from an improper
|
||||
memory location when MMU translation is changed from SCTLR_ELn[M]=1
|
||||
to SCTLR_ELn[M]=0. Prefix an ISB instruction to fix the problem.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
endmenu
|
||||
|
||||
|
||||
|
@ -803,6 +806,35 @@ config FORCE_MAX_ZONEORDER
|
|||
However for 4K, we choose a higher default value, 11 as opposed to 10, giving us
|
||||
4M allocations matching the default size used by generic code.
|
||||
|
||||
config UNMAP_KERNEL_AT_EL0
|
||||
bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT
|
||||
default y
|
||||
help
|
||||
Speculation attacks against some high-performance processors can
|
||||
be used to bypass MMU permission checks and leak kernel data to
|
||||
userspace. This can be defended against by unmapping the kernel
|
||||
when running in userspace, mapping it back in on exception entry
|
||||
via a trampoline page in the vector table.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config HARDEN_BRANCH_PREDICTOR
|
||||
bool "Harden the branch predictor against aliasing attacks" if EXPERT
|
||||
default y
|
||||
help
|
||||
Speculation attacks against some high-performance processors rely on
|
||||
being able to manipulate the branch predictor for a victim context by
|
||||
executing aliasing branches in the attacker context. Such attacks
|
||||
can be partially mitigated against by clearing internal branch
|
||||
predictor state and limiting the prediction logic in some situations.
|
||||
|
||||
This config option will take CPU-specific actions to harden the
|
||||
branch predictor against aliasing attacks and may rely on specific
|
||||
instruction sequences or control bits being set by the system
|
||||
firmware.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
menuconfig ARMV8_DEPRECATED
|
||||
bool "Emulate deprecated/obsolete ARMv8 instructions"
|
||||
depends on COMPAT
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
*.dtb
|
|
@ -61,6 +61,12 @@
|
|||
reg = <0x0 0x0 0x0 0x80000000>;
|
||||
};
|
||||
|
||||
aliases {
|
||||
ethernet0 = &cpm_eth0;
|
||||
ethernet1 = &cpm_eth1;
|
||||
ethernet2 = &cpm_eth2;
|
||||
};
|
||||
|
||||
cpm_reg_usb3_0_vbus: cpm-usb3-0-vbus {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "usb3h0-vbus";
|
||||
|
|
|
@ -61,6 +61,13 @@
|
|||
reg = <0x0 0x0 0x0 0x80000000>;
|
||||
};
|
||||
|
||||
aliases {
|
||||
ethernet0 = &cpm_eth0;
|
||||
ethernet1 = &cpm_eth2;
|
||||
ethernet2 = &cps_eth0;
|
||||
ethernet3 = &cps_eth1;
|
||||
};
|
||||
|
||||
cpm_reg_usb3_0_vbus: cpm-usb3-0-vbus {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "cpm-usb3h0-vbus";
|
||||
|
|
|
@ -62,6 +62,12 @@
|
|||
reg = <0x0 0x0 0x0 0x80000000>;
|
||||
};
|
||||
|
||||
aliases {
|
||||
ethernet0 = &cpm_eth0;
|
||||
ethernet1 = &cps_eth0;
|
||||
ethernet2 = &cps_eth1;
|
||||
};
|
||||
|
||||
/* Regulator labels correspond with schematics */
|
||||
v_3_3: regulator-3-3v {
|
||||
compatible = "regulator-fixed";
|
||||
|
|
|
@ -81,6 +81,7 @@
|
|||
reg = <0x000>;
|
||||
enable-method = "psci";
|
||||
cpu-idle-states = <&CPU_SLEEP_0>;
|
||||
#cooling-cells = <2>;
|
||||
};
|
||||
|
||||
cpu1: cpu@1 {
|
||||
|
@ -97,6 +98,7 @@
|
|||
reg = <0x100>;
|
||||
enable-method = "psci";
|
||||
cpu-idle-states = <&CPU_SLEEP_0>;
|
||||
#cooling-cells = <2>;
|
||||
};
|
||||
|
||||
cpu3: cpu@101 {
|
||||
|
|
|
@ -901,6 +901,7 @@
|
|||
"dsi_phy_regulator";
|
||||
|
||||
#clock-cells = <1>;
|
||||
#phy-cells = <0>;
|
||||
|
||||
clocks = <&gcc GCC_MDSS_AHB_CLK>;
|
||||
clock-names = "iface_clk";
|
||||
|
@ -1430,8 +1431,8 @@
|
|||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
qcom,ipc-1 = <&apcs 0 13>;
|
||||
qcom,ipc-6 = <&apcs 0 19>;
|
||||
qcom,ipc-1 = <&apcs 8 13>;
|
||||
qcom,ipc-3 = <&apcs 8 19>;
|
||||
|
||||
apps_smsm: apps@0 {
|
||||
reg = <0>;
|
||||
|
|
|
@ -185,6 +185,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
|
|||
.base.cra_name = "crc32",
|
||||
.base.cra_driver_name = "crc32-arm64-ce",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
}, {
|
||||
|
@ -200,6 +201,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
|
|||
.base.cra_name = "crc32c",
|
||||
.base.cra_driver_name = "crc32c-arm64-ce",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
} };
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/kernel-pgtable.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
|
@ -13,51 +14,62 @@
|
|||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
.macro __uaccess_ttbr0_disable, tmp1
|
||||
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
|
||||
bic \tmp1, \tmp1, #TTBR_ASID_MASK
|
||||
add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
|
||||
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
|
||||
isb
|
||||
sub \tmp1, \tmp1, #SWAPPER_DIR_SIZE
|
||||
msr ttbr1_el1, \tmp1 // set reserved ASID
|
||||
isb
|
||||
.endm
|
||||
|
||||
.macro __uaccess_ttbr0_enable, tmp1
|
||||
.macro __uaccess_ttbr0_enable, tmp1, tmp2
|
||||
get_thread_info \tmp1
|
||||
ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
|
||||
mrs \tmp2, ttbr1_el1
|
||||
extr \tmp2, \tmp2, \tmp1, #48
|
||||
ror \tmp2, \tmp2, #16
|
||||
msr ttbr1_el1, \tmp2 // set the active ASID
|
||||
isb
|
||||
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
|
||||
isb
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_disable, tmp1
|
||||
alternative_if_not ARM64_HAS_PAN
|
||||
__uaccess_ttbr0_disable \tmp1
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_enable, tmp1, tmp2
|
||||
.macro uaccess_ttbr0_disable, tmp1, tmp2
|
||||
alternative_if_not ARM64_HAS_PAN
|
||||
save_and_disable_irq \tmp2 // avoid preemption
|
||||
__uaccess_ttbr0_enable \tmp1
|
||||
__uaccess_ttbr0_disable \tmp1
|
||||
restore_irq \tmp2
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
|
||||
alternative_if_not ARM64_HAS_PAN
|
||||
save_and_disable_irq \tmp3 // avoid preemption
|
||||
__uaccess_ttbr0_enable \tmp1, \tmp2
|
||||
restore_irq \tmp3
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
#else
|
||||
.macro uaccess_ttbr0_disable, tmp1
|
||||
.macro uaccess_ttbr0_disable, tmp1, tmp2
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_enable, tmp1, tmp2
|
||||
.macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
|
||||
.endm
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These macros are no-ops when UAO is present.
|
||||
*/
|
||||
.macro uaccess_disable_not_uao, tmp1
|
||||
uaccess_ttbr0_disable \tmp1
|
||||
.macro uaccess_disable_not_uao, tmp1, tmp2
|
||||
uaccess_ttbr0_disable \tmp1, \tmp2
|
||||
alternative_if ARM64_ALT_PAN_NOT_UAO
|
||||
SET_PSTATE_PAN(1)
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_enable_not_uao, tmp1, tmp2
|
||||
uaccess_ttbr0_enable \tmp1, \tmp2
|
||||
.macro uaccess_enable_not_uao, tmp1, tmp2, tmp3
|
||||
uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
|
||||
alternative_if ARM64_ALT_PAN_NOT_UAO
|
||||
SET_PSTATE_PAN(0)
|
||||
alternative_else_nop_endif
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
@ -96,6 +95,24 @@
|
|||
dmb \opt
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Value prediction barrier
|
||||
*/
|
||||
.macro csdb
|
||||
hint #20
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Sanitise a 64-bit bounded index wrt speculation, returning zero if out
|
||||
* of bounds.
|
||||
*/
|
||||
.macro mask_nospec64, idx, limit, tmp
|
||||
sub \tmp, \idx, \limit
|
||||
bic \tmp, \tmp, \idx
|
||||
and \idx, \idx, \tmp, asr #63
|
||||
csdb
|
||||
.endm
|
||||
|
||||
/*
|
||||
* NOP sequence
|
||||
*/
|
||||
|
@ -464,39 +481,18 @@ alternative_endif
|
|||
mrs \rd, sp_el0
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Errata workaround prior to TTBR0_EL1 update
|
||||
*
|
||||
* val: TTBR value with new BADDR, preserved
|
||||
* tmp0: temporary register, clobbered
|
||||
* tmp1: other temporary register, clobbered
|
||||
/**
|
||||
* Errata workaround prior to disable MMU. Insert an ISB immediately prior
|
||||
* to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
|
||||
*/
|
||||
.macro pre_ttbr0_update_workaround, val, tmp0, tmp1
|
||||
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
|
||||
alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
|
||||
mrs \tmp0, ttbr0_el1
|
||||
mov \tmp1, #FALKOR_RESERVED_ASID
|
||||
bfi \tmp0, \tmp1, #48, #16 // reserved ASID + old BADDR
|
||||
msr ttbr0_el1, \tmp0
|
||||
.macro pre_disable_mmu_workaround
|
||||
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
|
||||
isb
|
||||
bfi \tmp0, \val, #0, #48 // reserved ASID + new BADDR
|
||||
msr ttbr0_el1, \tmp0
|
||||
isb
|
||||
alternative_else_nop_endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Errata workaround post TTBR0_EL1 update.
|
||||
*/
|
||||
.macro post_ttbr0_update_workaround
|
||||
#ifdef CONFIG_CAVIUM_ERRATUM_27456
|
||||
alternative_if ARM64_WORKAROUND_CAVIUM_27456
|
||||
ic iallu
|
||||
dsb nsh
|
||||
isb
|
||||
alternative_else_nop_endif
|
||||
#endif
|
||||
.macro pte_to_phys, phys, pte
|
||||
and \phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
|
||||
.endm
|
||||
|
||||
#endif /* __ASM_ASSEMBLER_H */
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
#define dmb(opt) asm volatile("dmb " #opt : : : "memory")
|
||||
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
|
||||
|
||||
#define csdb() asm volatile("hint #20" : : : "memory")
|
||||
|
||||
#define mb() dsb(sy)
|
||||
#define rmb() dsb(ld)
|
||||
#define wmb() dsb(st)
|
||||
|
@ -38,6 +40,27 @@
|
|||
#define dma_rmb() dmb(oshld)
|
||||
#define dma_wmb() dmb(oshst)
|
||||
|
||||
/*
|
||||
* Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
|
||||
* and 0 otherwise.
|
||||
*/
|
||||
#define array_index_mask_nospec array_index_mask_nospec
|
||||
static inline unsigned long array_index_mask_nospec(unsigned long idx,
|
||||
unsigned long sz)
|
||||
{
|
||||
unsigned long mask;
|
||||
|
||||
asm volatile(
|
||||
" cmp %1, %2\n"
|
||||
" sbc %0, xzr, xzr\n"
|
||||
: "=r" (mask)
|
||||
: "r" (idx), "Ir" (sz)
|
||||
: "cc");
|
||||
|
||||
csdb();
|
||||
return mask;
|
||||
}
|
||||
|
||||
#define __smp_mb() dmb(ish)
|
||||
#define __smp_rmb() dmb(ishld)
|
||||
#define __smp_wmb() dmb(ishst)
|
||||
|
|
|
@ -40,7 +40,10 @@
|
|||
#define ARM64_WORKAROUND_858921 19
|
||||
#define ARM64_WORKAROUND_CAVIUM_30115 20
|
||||
#define ARM64_HAS_DCPOP 21
|
||||
#define ARM64_UNMAP_KERNEL_AT_EL0 23
|
||||
#define ARM64_HARDEN_BRANCH_PREDICTOR 24
|
||||
#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
|
||||
|
||||
#define ARM64_NCAPS 22
|
||||
#define ARM64_NCAPS 26
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
||||
|
|
|
@ -79,26 +79,37 @@
|
|||
#define ARM_CPU_PART_AEM_V8 0xD0F
|
||||
#define ARM_CPU_PART_FOUNDATION 0xD00
|
||||
#define ARM_CPU_PART_CORTEX_A57 0xD07
|
||||
#define ARM_CPU_PART_CORTEX_A72 0xD08
|
||||
#define ARM_CPU_PART_CORTEX_A53 0xD03
|
||||
#define ARM_CPU_PART_CORTEX_A73 0xD09
|
||||
#define ARM_CPU_PART_CORTEX_A75 0xD0A
|
||||
|
||||
#define APM_CPU_PART_POTENZA 0x000
|
||||
|
||||
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
|
||||
#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
|
||||
#define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3
|
||||
#define CAVIUM_CPU_PART_THUNDERX2 0x0AF
|
||||
|
||||
#define BRCM_CPU_PART_VULCAN 0x516
|
||||
|
||||
#define QCOM_CPU_PART_FALKOR_V1 0x800
|
||||
#define QCOM_CPU_PART_FALKOR 0xC00
|
||||
#define QCOM_CPU_PART_KRYO 0x200
|
||||
|
||||
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
|
||||
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
|
||||
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
|
||||
#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
|
||||
#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
|
||||
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
|
||||
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
|
||||
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
|
||||
#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2)
|
||||
#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN)
|
||||
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
|
||||
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
|
||||
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
|
|
@ -121,19 +121,21 @@ static inline void efi_set_pgd(struct mm_struct *mm)
|
|||
if (mm != current->active_mm) {
|
||||
/*
|
||||
* Update the current thread's saved ttbr0 since it is
|
||||
* restored as part of a return from exception. Set
|
||||
* the hardware TTBR0_EL1 using cpu_switch_mm()
|
||||
* directly to enable potential errata workarounds.
|
||||
* restored as part of a return from exception. Enable
|
||||
* access to the valid TTBR0_EL1 and invoke the errata
|
||||
* workaround directly since there is no return from
|
||||
* exception when invoking the EFI run-time services.
|
||||
*/
|
||||
update_saved_ttbr0(current, mm);
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
uaccess_ttbr0_enable();
|
||||
post_ttbr_update_workaround();
|
||||
} else {
|
||||
/*
|
||||
* Defer the switch to the current thread's TTBR0_EL1
|
||||
* until uaccess_enable(). Restore the current
|
||||
* thread's saved ttbr0 corresponding to its active_mm
|
||||
*/
|
||||
cpu_set_reserved_ttbr0();
|
||||
uaccess_ttbr0_disable();
|
||||
update_saved_ttbr0(current, current->active_mm);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,6 +58,11 @@ enum fixed_addresses {
|
|||
FIX_APEI_GHES_NMI,
|
||||
#endif /* CONFIG_ACPI_APEI_GHES */
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
FIX_ENTRY_TRAMP_DATA,
|
||||
FIX_ENTRY_TRAMP_TEXT,
|
||||
#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
__end_of_permanent_fixed_addresses,
|
||||
|
||||
/*
|
||||
|
|
|
@ -48,9 +48,10 @@ do { \
|
|||
} while (0)
|
||||
|
||||
static inline int
|
||||
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
|
||||
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
|
||||
{
|
||||
int oldval = 0, ret, tmp;
|
||||
u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
|
||||
|
||||
pagefault_disable();
|
||||
|
||||
|
@ -88,15 +89,17 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
|
|||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
int ret = 0;
|
||||
u32 val, tmp;
|
||||
u32 __user *uaddr;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
uaddr = __uaccess_mask_ptr(_uaddr);
|
||||
uaccess_enable();
|
||||
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
|
||||
" prfm pstl1strm, %2\n"
|
||||
|
|
|
@ -66,6 +66,8 @@ extern u32 __kvm_get_mdcr_el2(void);
|
|||
|
||||
extern u32 __init_stage2_translation(void);
|
||||
|
||||
extern void __qcom_hyp_sanitize_btac_predictors(void);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __ARM_KVM_ASM_H__ */
|
||||
|
|
|
@ -384,4 +384,9 @@ static inline void __cpu_init_stage2(void)
|
|||
"PARange is %d bits, unsupported configuration!", parange);
|
||||
}
|
||||
|
||||
static inline bool kvm_arm_harden_branch_predictor(void)
|
||||
{
|
||||
return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
|
||||
}
|
||||
|
||||
#endif /* __ARM64_KVM_HOST_H__ */
|
||||
|
|
|
@ -309,5 +309,43 @@ static inline unsigned int kvm_get_vmid_bits(void)
|
|||
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
#include <asm/mmu.h>
|
||||
|
||||
static inline void *kvm_get_hyp_vector(void)
|
||||
{
|
||||
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
|
||||
void *vect = kvm_ksym_ref(__kvm_hyp_vector);
|
||||
|
||||
if (data->fn) {
|
||||
vect = __bp_harden_hyp_vecs_start +
|
||||
data->hyp_vectors_slot * SZ_2K;
|
||||
|
||||
if (!has_vhe())
|
||||
vect = lm_alias(vect);
|
||||
}
|
||||
|
||||
return vect;
|
||||
}
|
||||
|
||||
static inline int kvm_map_vectors(void)
|
||||
{
|
||||
return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
|
||||
kvm_ksym_ref(__bp_harden_hyp_vecs_end),
|
||||
PAGE_HYP_EXEC);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void *kvm_get_hyp_vector(void)
|
||||
{
|
||||
return kvm_ksym_ref(__kvm_hyp_vector);
|
||||
}
|
||||
|
||||
static inline int kvm_map_vectors(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ARM64_KVM_MMU_H__ */
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __ARM64_KVM_PSCI_H__
|
||||
#define __ARM64_KVM_PSCI_H__
|
||||
|
||||
#define KVM_ARM_PSCI_0_1 1
|
||||
#define KVM_ARM_PSCI_0_2 2
|
||||
|
||||
int kvm_psci_version(struct kvm_vcpu *vcpu);
|
||||
int kvm_psci_call(struct kvm_vcpu *vcpu);
|
||||
|
||||
#endif /* __ARM64_KVM_PSCI_H__ */
|
|
@ -61,8 +61,6 @@
|
|||
* KIMAGE_VADDR - the virtual address of the start of the kernel image
|
||||
* VA_BITS - the maximum number of bits for virtual addresses.
|
||||
* VA_START - the first kernel virtual address.
|
||||
* TASK_SIZE - the maximum size of a user space task.
|
||||
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
|
||||
*/
|
||||
#define VA_BITS (CONFIG_ARM64_VA_BITS)
|
||||
#define VA_START (UL(0xffffffffffffffff) - \
|
||||
|
@ -77,19 +75,6 @@
|
|||
#define PCI_IO_END (VMEMMAP_START - SZ_2M)
|
||||
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
|
||||
#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
|
||||
#define TASK_SIZE_64 (UL(1) << VA_BITS)
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#define TASK_SIZE_32 UL(0x100000000)
|
||||
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
|
||||
TASK_SIZE_32 : TASK_SIZE_64)
|
||||
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
|
||||
TASK_SIZE_32 : TASK_SIZE_64)
|
||||
#else
|
||||
#define TASK_SIZE TASK_SIZE_64
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
|
||||
|
||||
#define KERNEL_START _text
|
||||
#define KERNEL_END _end
|
||||
|
|
|
@ -17,6 +17,10 @@
|
|||
#define __ASM_MMU_H
|
||||
|
||||
#define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
|
||||
#define USER_ASID_FLAG (UL(1) << 48)
|
||||
#define TTBR_ASID_MASK (UL(0xffff) << 48)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
typedef struct {
|
||||
atomic64_t id;
|
||||
|
@ -31,6 +35,49 @@ typedef struct {
|
|||
*/
|
||||
#define ASID(mm) ((mm)->context.id.counter & 0xffff)
|
||||
|
||||
static inline bool arm64_kernel_unmapped_at_el0(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
|
||||
cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
|
||||
}
|
||||
|
||||
typedef void (*bp_hardening_cb_t)(void);
|
||||
|
||||
struct bp_hardening_data {
|
||||
int hyp_vectors_slot;
|
||||
bp_hardening_cb_t fn;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
|
||||
|
||||
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
|
||||
|
||||
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
|
||||
{
|
||||
return this_cpu_ptr(&bp_hardening_data);
|
||||
}
|
||||
|
||||
static inline void arm64_apply_bp_hardening(void)
|
||||
{
|
||||
struct bp_hardening_data *d;
|
||||
|
||||
if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
|
||||
return;
|
||||
|
||||
d = arm64_get_bp_hardening_data();
|
||||
if (d->fn)
|
||||
d->fn();
|
||||
}
|
||||
#else
|
||||
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void arm64_apply_bp_hardening(void) { }
|
||||
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
|
||||
|
||||
extern void paging_init(void);
|
||||
extern void bootmem_init(void);
|
||||
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
|
||||
|
@ -41,4 +88,5 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
|
|||
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
|
||||
extern void mark_linear_text_alias_ro(void);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
#ifndef __ASM_MMU_CONTEXT_H
|
||||
#define __ASM_MMU_CONTEXT_H
|
||||
|
||||
#define FALKOR_RESERVED_ASID 1
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
@ -57,6 +55,13 @@ static inline void cpu_set_reserved_ttbr0(void)
|
|||
isb();
|
||||
}
|
||||
|
||||
static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
|
||||
{
|
||||
BUG_ON(pgd == swapper_pg_dir);
|
||||
cpu_set_reserved_ttbr0();
|
||||
cpu_do_switch_mm(virt_to_phys(pgd),mm);
|
||||
}
|
||||
|
||||
/*
|
||||
* TCR.T0SZ value to use when the ID map is active. Usually equals
|
||||
* TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
|
||||
|
@ -170,7 +175,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
|
|||
else
|
||||
ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
|
||||
|
||||
task_thread_info(tsk)->ttbr0 = ttbr;
|
||||
WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
|
||||
}
|
||||
#else
|
||||
static inline void update_saved_ttbr0(struct task_struct *tsk,
|
||||
|
@ -225,6 +230,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
#define activate_mm(prev,next) switch_mm(prev, next, current)
|
||||
|
||||
void verify_cpu_asid_bits(void);
|
||||
void post_ttbr_update_workaround(void);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
|
||||
#define check_pgt_cache() do { } while (0)
|
||||
|
||||
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
|
||||
#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
|
||||
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
|
|
|
@ -272,6 +272,7 @@
|
|||
#define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT)
|
||||
#define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT)
|
||||
|
||||
#define TCR_A1 (UL(1) << 22)
|
||||
#define TCR_ASID16 (UL(1) << 36)
|
||||
#define TCR_TBI0 (UL(1) << 37)
|
||||
#define TCR_HA (UL(1) << 39)
|
||||
|
|
|
@ -34,8 +34,14 @@
|
|||
|
||||
#include <asm/pgtable-types.h>
|
||||
|
||||
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
|
||||
#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
|
||||
#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
|
||||
#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
|
||||
|
||||
#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
|
||||
#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
|
||||
|
||||
#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
|
||||
#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
|
||||
|
||||
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
|
||||
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
|
||||
|
@ -47,23 +53,24 @@
|
|||
#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
|
||||
#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
|
||||
|
||||
#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
|
||||
#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
|
||||
#define _HYP_PAGE_DEFAULT _PAGE_DEFAULT
|
||||
|
||||
#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
|
||||
#define PAGE_KERNEL __pgprot(PROT_NORMAL)
|
||||
#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
|
||||
#define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT)
|
||||
|
||||
#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
|
||||
#define PAGE_HYP_EXEC __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
|
||||
#define PAGE_HYP_RO __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
|
||||
#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
|
||||
#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
|
||||
#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
|
||||
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
|
||||
|
||||
#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
|
||||
#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
|
||||
#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
|
||||
#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
|
||||
|
||||
#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
|
||||
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
|
||||
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
|
||||
|
|
|
@ -684,6 +684,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
|
|||
|
||||
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
||||
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
|
||||
extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
|
||||
|
||||
/*
|
||||
* Encode and decode a swap entry:
|
||||
|
|
|
@ -35,12 +35,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
|
|||
|
||||
#include <asm/memory.h>
|
||||
|
||||
#define cpu_switch_mm(pgd,mm) \
|
||||
do { \
|
||||
BUG_ON(pgd == swapper_pg_dir); \
|
||||
cpu_do_switch_mm(virt_to_phys(pgd),mm); \
|
||||
} while (0)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASM_PROCFNS_H */
|
||||
|
|
|
@ -19,6 +19,13 @@
|
|||
#ifndef __ASM_PROCESSOR_H
|
||||
#define __ASM_PROCESSOR_H
|
||||
|
||||
#define TASK_SIZE_64 (UL(1) << VA_BITS)
|
||||
|
||||
#define KERNEL_DS UL(-1)
|
||||
#define USER_DS (TASK_SIZE_64 - 1)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* Default implementation of macro that returns current
|
||||
* instruction pointer ("program counter").
|
||||
|
@ -37,6 +44,22 @@
|
|||
#include <asm/ptrace.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
/*
|
||||
* TASK_SIZE - the maximum size of a user space task.
|
||||
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
|
||||
*/
|
||||
#ifdef CONFIG_COMPAT
|
||||
#define TASK_SIZE_32 UL(0x100000000)
|
||||
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
|
||||
TASK_SIZE_32 : TASK_SIZE_64)
|
||||
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
|
||||
TASK_SIZE_32 : TASK_SIZE_64)
|
||||
#else
|
||||
#define TASK_SIZE TASK_SIZE_64
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
|
||||
|
||||
#define STACK_TOP_MAX TASK_SIZE_64
|
||||
#ifdef CONFIG_COMPAT
|
||||
#define AARCH32_VECTORS_BASE 0xffff0000
|
||||
|
@ -194,4 +217,5 @@ static inline void spin_lock_prefetch(const void *ptr)
|
|||
int cpu_enable_pan(void *__unused);
|
||||
int cpu_enable_cache_maint_trap(void *__unused);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_PROCESSOR_H */
|
||||
|
|
|
@ -332,6 +332,8 @@
|
|||
#define ID_AA64ISAR1_DPB_SHIFT 0
|
||||
|
||||
/* id_aa64pfr0 */
|
||||
#define ID_AA64PFR0_CSV3_SHIFT 60
|
||||
#define ID_AA64PFR0_CSV2_SHIFT 56
|
||||
#define ID_AA64PFR0_GIC_SHIFT 24
|
||||
#define ID_AA64PFR0_ASIMD_SHIFT 20
|
||||
#define ID_AA64PFR0_FP_SHIFT 16
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
|
||||
#include <linux/sched.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/mmu.h>
|
||||
|
||||
/*
|
||||
* Raw TLBI operations.
|
||||
|
@ -54,6 +55,11 @@
|
|||
|
||||
#define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
|
||||
|
||||
#define __tlbi_user(op, arg) do { \
|
||||
if (arm64_kernel_unmapped_at_el0()) \
|
||||
__tlbi(op, (arg) | USER_ASID_FLAG); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* TLB Management
|
||||
* ==============
|
||||
|
@ -115,6 +121,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
|
|||
|
||||
dsb(ishst);
|
||||
__tlbi(aside1is, asid);
|
||||
__tlbi_user(aside1is, asid);
|
||||
dsb(ish);
|
||||
}
|
||||
|
||||
|
@ -125,6 +132,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
|
|||
|
||||
dsb(ishst);
|
||||
__tlbi(vale1is, addr);
|
||||
__tlbi_user(vale1is, addr);
|
||||
dsb(ish);
|
||||
}
|
||||
|
||||
|
@ -151,10 +159,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
|||
|
||||
dsb(ishst);
|
||||
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
|
||||
if (last_level)
|
||||
if (last_level) {
|
||||
__tlbi(vale1is, addr);
|
||||
else
|
||||
__tlbi_user(vale1is, addr);
|
||||
} else {
|
||||
__tlbi(vae1is, addr);
|
||||
__tlbi_user(vae1is, addr);
|
||||
}
|
||||
}
|
||||
dsb(ish);
|
||||
}
|
||||
|
@ -194,6 +205,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
|
|||
unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
|
||||
|
||||
__tlbi(vae1is, addr);
|
||||
__tlbi_user(vae1is, addr);
|
||||
dsb(ish);
|
||||
}
|
||||
|
||||
|
|
|
@ -35,16 +35,20 @@
|
|||
#include <asm/compiler.h>
|
||||
#include <asm/extable.h>
|
||||
|
||||
#define KERNEL_DS (-1UL)
|
||||
#define get_ds() (KERNEL_DS)
|
||||
|
||||
#define USER_DS TASK_SIZE_64
|
||||
#define get_fs() (current_thread_info()->addr_limit)
|
||||
|
||||
static inline void set_fs(mm_segment_t fs)
|
||||
{
|
||||
current_thread_info()->addr_limit = fs;
|
||||
|
||||
/*
|
||||
* Prevent a mispredicted conditional call to set_fs from forwarding
|
||||
* the wrong address limit to access_ok under speculation.
|
||||
*/
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
/* On user-mode return, check fs is correct */
|
||||
set_thread_flag(TIF_FSCHECK);
|
||||
|
||||
|
@ -66,22 +70,32 @@ static inline void set_fs(mm_segment_t fs)
|
|||
* Returns 1 if the range is valid, 0 otherwise.
|
||||
*
|
||||
* This is equivalent to the following test:
|
||||
* (u65)addr + (u65)size <= current->addr_limit
|
||||
*
|
||||
* This needs 65-bit arithmetic.
|
||||
* (u65)addr + (u65)size <= (u65)current->addr_limit + 1
|
||||
*/
|
||||
#define __range_ok(addr, size) \
|
||||
({ \
|
||||
unsigned long __addr = (unsigned long)(addr); \
|
||||
unsigned long flag, roksum; \
|
||||
__chk_user_ptr(addr); \
|
||||
asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
|
||||
: "=&r" (flag), "=&r" (roksum) \
|
||||
: "1" (__addr), "Ir" (size), \
|
||||
"r" (current_thread_info()->addr_limit) \
|
||||
: "cc"); \
|
||||
flag; \
|
||||
})
|
||||
static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
|
||||
{
|
||||
unsigned long limit = current_thread_info()->addr_limit;
|
||||
|
||||
__chk_user_ptr(addr);
|
||||
asm volatile(
|
||||
// A + B <= C + 1 for all A,B,C, in four easy steps:
|
||||
// 1: X = A + B; X' = X % 2^64
|
||||
" adds %0, %0, %2\n"
|
||||
// 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
|
||||
" csel %1, xzr, %1, hi\n"
|
||||
// 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
|
||||
// to compensate for the carry flag being set in step 4. For
|
||||
// X > 2^64, X' merely has to remain nonzero, which it does.
|
||||
" csinv %0, %0, xzr, cc\n"
|
||||
// 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
|
||||
// comes from the carry in being clear. Otherwise, we are
|
||||
// testing X' - C == 0, subject to the previous adjustments.
|
||||
" sbcs xzr, %0, %1\n"
|
||||
" cset %0, ls\n"
|
||||
: "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* When dealing with data aborts, watchpoints, or instruction traps we may end
|
||||
|
@ -90,7 +104,7 @@ static inline void set_fs(mm_segment_t fs)
|
|||
*/
|
||||
#define untagged_addr(addr) sign_extend64(addr, 55)
|
||||
|
||||
#define access_ok(type, addr, size) __range_ok(addr, size)
|
||||
#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size)
|
||||
#define user_addr_max get_fs
|
||||
|
||||
#define _ASM_EXTABLE(from, to) \
|
||||
|
@ -105,17 +119,23 @@ static inline void set_fs(mm_segment_t fs)
|
|||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
static inline void __uaccess_ttbr0_disable(void)
|
||||
{
|
||||
unsigned long ttbr;
|
||||
unsigned long flags, ttbr;
|
||||
|
||||
local_irq_save(flags);
|
||||
ttbr = read_sysreg(ttbr1_el1);
|
||||
ttbr &= ~TTBR_ASID_MASK;
|
||||
/* reserved_ttbr0 placed at the end of swapper_pg_dir */
|
||||
ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
|
||||
write_sysreg(ttbr, ttbr0_el1);
|
||||
write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1);
|
||||
isb();
|
||||
/* Set reserved ASID */
|
||||
write_sysreg(ttbr, ttbr1_el1);
|
||||
isb();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void __uaccess_ttbr0_enable(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long flags, ttbr0, ttbr1;
|
||||
|
||||
/*
|
||||
* Disable interrupts to avoid preemption between reading the 'ttbr0'
|
||||
|
@ -123,7 +143,17 @@ static inline void __uaccess_ttbr0_enable(void)
|
|||
* roll-over and an update of 'ttbr0'.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
|
||||
ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
|
||||
|
||||
/* Restore active ASID */
|
||||
ttbr1 = read_sysreg(ttbr1_el1);
|
||||
ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
|
||||
ttbr1 |= ttbr0 & TTBR_ASID_MASK;
|
||||
write_sysreg(ttbr1, ttbr1_el1);
|
||||
isb();
|
||||
|
||||
/* Restore user page table */
|
||||
write_sysreg(ttbr0, ttbr0_el1);
|
||||
isb();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
@ -192,6 +222,26 @@ static inline void uaccess_enable_not_uao(void)
|
|||
__uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sanitise a uaccess pointer such that it becomes NULL if above the
|
||||
* current addr_limit.
|
||||
*/
|
||||
#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
|
||||
static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
||||
{
|
||||
void __user *safe_ptr;
|
||||
|
||||
asm volatile(
|
||||
" bics xzr, %1, %2\n"
|
||||
" csel %0, %1, xzr, eq\n"
|
||||
: "=&r" (safe_ptr)
|
||||
: "r" (ptr), "r" (current_thread_info()->addr_limit)
|
||||
: "cc");
|
||||
|
||||
csdb();
|
||||
return safe_ptr;
|
||||
}
|
||||
|
||||
/*
|
||||
* The "__xxx" versions of the user access functions do not verify the address
|
||||
* space - it must have been done previously with a separate "access_ok()"
|
||||
|
@ -244,28 +294,33 @@ do { \
|
|||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
} while (0)
|
||||
|
||||
#define __get_user(x, ptr) \
|
||||
#define __get_user_check(x, ptr, err) \
|
||||
({ \
|
||||
int __gu_err = 0; \
|
||||
__get_user_err((x), (ptr), __gu_err); \
|
||||
__gu_err; \
|
||||
__typeof__(*(ptr)) __user *__p = (ptr); \
|
||||
might_fault(); \
|
||||
if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
|
||||
__p = uaccess_mask_ptr(__p); \
|
||||
__get_user_err((x), __p, (err)); \
|
||||
} else { \
|
||||
(x) = 0; (err) = -EFAULT; \
|
||||
} \
|
||||
})
|
||||
|
||||
#define __get_user_error(x, ptr, err) \
|
||||
({ \
|
||||
__get_user_err((x), (ptr), (err)); \
|
||||
__get_user_check((x), (ptr), (err)); \
|
||||
(void)0; \
|
||||
})
|
||||
|
||||
#define get_user(x, ptr) \
|
||||
#define __get_user(x, ptr) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __user *__p = (ptr); \
|
||||
might_fault(); \
|
||||
access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
|
||||
__get_user((x), __p) : \
|
||||
((x) = 0, -EFAULT); \
|
||||
int __gu_err = 0; \
|
||||
__get_user_check((x), (ptr), __gu_err); \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
#define get_user __get_user
|
||||
|
||||
#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
|
||||
asm volatile( \
|
||||
"1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
|
||||
|
@ -308,43 +363,63 @@ do { \
|
|||
uaccess_disable_not_uao(); \
|
||||
} while (0)
|
||||
|
||||
#define __put_user(x, ptr) \
|
||||
#define __put_user_check(x, ptr, err) \
|
||||
({ \
|
||||
int __pu_err = 0; \
|
||||
__put_user_err((x), (ptr), __pu_err); \
|
||||
__pu_err; \
|
||||
__typeof__(*(ptr)) __user *__p = (ptr); \
|
||||
might_fault(); \
|
||||
if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
|
||||
__p = uaccess_mask_ptr(__p); \
|
||||
__put_user_err((x), __p, (err)); \
|
||||
} else { \
|
||||
(err) = -EFAULT; \
|
||||
} \
|
||||
})
|
||||
|
||||
#define __put_user_error(x, ptr, err) \
|
||||
({ \
|
||||
__put_user_err((x), (ptr), (err)); \
|
||||
__put_user_check((x), (ptr), (err)); \
|
||||
(void)0; \
|
||||
})
|
||||
|
||||
#define put_user(x, ptr) \
|
||||
#define __put_user(x, ptr) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __user *__p = (ptr); \
|
||||
might_fault(); \
|
||||
access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
|
||||
__put_user((x), __p) : \
|
||||
-EFAULT; \
|
||||
int __pu_err = 0; \
|
||||
__put_user_check((x), (ptr), __pu_err); \
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
#define put_user __put_user
|
||||
|
||||
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
#define raw_copy_from_user __arch_copy_from_user
|
||||
#define raw_copy_from_user(to, from, n) \
|
||||
({ \
|
||||
__arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \
|
||||
})
|
||||
|
||||
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
|
||||
#define raw_copy_to_user __arch_copy_to_user
|
||||
extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
|
||||
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
|
||||
#define raw_copy_to_user(to, from, n) \
|
||||
({ \
|
||||
__arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \
|
||||
})
|
||||
|
||||
extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
|
||||
#define raw_copy_in_user(to, from, n) \
|
||||
({ \
|
||||
__arch_copy_in_user(__uaccess_mask_ptr(to), \
|
||||
__uaccess_mask_ptr(from), (n)); \
|
||||
})
|
||||
|
||||
#define INLINE_COPY_TO_USER
|
||||
#define INLINE_COPY_FROM_USER
|
||||
|
||||
static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
|
||||
extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
|
||||
static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, to, n))
|
||||
n = __clear_user(to, n);
|
||||
n = __arch_clear_user(__uaccess_mask_ptr(to), n);
|
||||
return n;
|
||||
}
|
||||
#define clear_user __clear_user
|
||||
|
||||
extern long strncpy_from_user(char *dest, const char __user *src, long count);
|
||||
|
||||
|
@ -358,7 +433,7 @@ extern unsigned long __must_check __copy_user_flushcache(void *to, const void __
|
|||
static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
|
||||
{
|
||||
kasan_check_write(dst, size);
|
||||
return __copy_user_flushcache(dst, src, size);
|
||||
return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -55,6 +55,10 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
|
|||
arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
|
||||
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
|
||||
ifeq ($(CONFIG_KVM),y)
|
||||
arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o
|
||||
endif
|
||||
|
||||
obj-y += $(arm64-obj-y) vdso/ probes/
|
||||
obj-m += $(arm64-obj-m)
|
||||
head-y := head.o
|
||||
|
|
|
@ -37,8 +37,8 @@ EXPORT_SYMBOL(clear_page);
|
|||
/* user mem (segment) */
|
||||
EXPORT_SYMBOL(__arch_copy_from_user);
|
||||
EXPORT_SYMBOL(__arch_copy_to_user);
|
||||
EXPORT_SYMBOL(__clear_user);
|
||||
EXPORT_SYMBOL(raw_copy_in_user);
|
||||
EXPORT_SYMBOL(__arch_clear_user);
|
||||
EXPORT_SYMBOL(__arch_copy_in_user);
|
||||
|
||||
/* physical memory */
|
||||
EXPORT_SYMBOL(memstart_addr);
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/kvm_host.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
@ -148,11 +149,14 @@ int main(void)
|
|||
DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
|
||||
DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
|
||||
DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
|
||||
|
||||
BLANK();
|
||||
DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
|
||||
DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
|
||||
DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next));
|
||||
DEFINE(ARM64_FTR_SYSVAL, offsetof(struct arm64_ftr_reg, sys_val));
|
||||
BLANK();
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
DEFINE(TRAMP_VALIAS, TRAMP_VALIAS);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Contains CPU specific branch predictor invalidation sequences
|
||||
*
|
||||
* Copyright (C) 2018 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
|
||||
.macro ventry target
|
||||
.rept 31
|
||||
nop
|
||||
.endr
|
||||
b \target
|
||||
.endm
|
||||
|
||||
.macro vectors target
|
||||
ventry \target + 0x000
|
||||
ventry \target + 0x080
|
||||
ventry \target + 0x100
|
||||
ventry \target + 0x180
|
||||
|
||||
ventry \target + 0x200
|
||||
ventry \target + 0x280
|
||||
ventry \target + 0x300
|
||||
ventry \target + 0x380
|
||||
|
||||
ventry \target + 0x400
|
||||
ventry \target + 0x480
|
||||
ventry \target + 0x500
|
||||
ventry \target + 0x580
|
||||
|
||||
ventry \target + 0x600
|
||||
ventry \target + 0x680
|
||||
ventry \target + 0x700
|
||||
ventry \target + 0x780
|
||||
.endm
|
||||
|
||||
.align 11
|
||||
ENTRY(__bp_harden_hyp_vecs_start)
|
||||
.rept 4
|
||||
vectors __kvm_hyp_vector
|
||||
.endr
|
||||
ENTRY(__bp_harden_hyp_vecs_end)
|
||||
|
||||
ENTRY(__qcom_hyp_sanitize_link_stack_start)
|
||||
stp x29, x30, [sp, #-16]!
|
||||
.rept 16
|
||||
bl . + 4
|
||||
.endr
|
||||
ldp x29, x30, [sp], #16
|
||||
ENTRY(__qcom_hyp_sanitize_link_stack_end)
|
||||
|
||||
.macro smccc_workaround_1 inst
|
||||
sub sp, sp, #(8 * 4)
|
||||
stp x2, x3, [sp, #(8 * 0)]
|
||||
stp x0, x1, [sp, #(8 * 2)]
|
||||
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
|
||||
\inst #0
|
||||
ldp x2, x3, [sp, #(8 * 0)]
|
||||
ldp x0, x1, [sp, #(8 * 2)]
|
||||
add sp, sp, #(8 * 4)
|
||||
.endm
|
||||
|
||||
ENTRY(__smccc_workaround_1_smc_start)
|
||||
smccc_workaround_1 smc
|
||||
ENTRY(__smccc_workaround_1_smc_end)
|
||||
|
||||
ENTRY(__smccc_workaround_1_hvc_start)
|
||||
smccc_workaround_1 hvc
|
||||
ENTRY(__smccc_workaround_1_hvc_end)
|
|
@ -16,7 +16,7 @@
|
|||
#include <asm/virt.h>
|
||||
|
||||
.text
|
||||
.pushsection .idmap.text, "ax"
|
||||
.pushsection .idmap.text, "awx"
|
||||
|
||||
/*
|
||||
* __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
|
||||
|
@ -37,6 +37,7 @@ ENTRY(__cpu_soft_restart)
|
|||
mrs x12, sctlr_el1
|
||||
ldr x13, =SCTLR_ELx_FLAGS
|
||||
bic x12, x12, x13
|
||||
pre_disable_mmu_workaround
|
||||
msr sctlr_el1, x12
|
||||
isb
|
||||
|
||||
|
|
|
@ -30,6 +30,20 @@ is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
|
|||
entry->midr_range_max);
|
||||
}
|
||||
|
||||
static bool __maybe_unused
|
||||
is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
{
|
||||
u32 model;
|
||||
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
|
||||
model = read_cpuid_id();
|
||||
model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
|
||||
MIDR_ARCHITECTURE_MASK;
|
||||
|
||||
return model == entry->midr_model;
|
||||
}
|
||||
|
||||
static bool
|
||||
has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
|
@ -46,6 +60,174 @@ static int cpu_enable_trap_ctr_access(void *__unused)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
extern char __qcom_hyp_sanitize_link_stack_start[];
|
||||
extern char __qcom_hyp_sanitize_link_stack_end[];
|
||||
extern char __smccc_workaround_1_smc_start[];
|
||||
extern char __smccc_workaround_1_smc_end[];
|
||||
extern char __smccc_workaround_1_hvc_start[];
|
||||
extern char __smccc_workaround_1_hvc_end[];
|
||||
|
||||
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
|
||||
const char *hyp_vecs_end)
|
||||
{
|
||||
void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < SZ_2K; i += 0x80)
|
||||
memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
|
||||
|
||||
flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
|
||||
}
|
||||
|
||||
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
||||
const char *hyp_vecs_start,
|
||||
const char *hyp_vecs_end)
|
||||
{
|
||||
static int last_slot = -1;
|
||||
static DEFINE_SPINLOCK(bp_lock);
|
||||
int cpu, slot = -1;
|
||||
|
||||
spin_lock(&bp_lock);
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
|
||||
slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (slot == -1) {
|
||||
last_slot++;
|
||||
BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
|
||||
/ SZ_2K) <= last_slot);
|
||||
slot = last_slot;
|
||||
__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
|
||||
}
|
||||
|
||||
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
|
||||
__this_cpu_write(bp_hardening_data.fn, fn);
|
||||
spin_unlock(&bp_lock);
|
||||
}
|
||||
#else
|
||||
#define __qcom_hyp_sanitize_link_stack_start NULL
|
||||
#define __qcom_hyp_sanitize_link_stack_end NULL
|
||||
#define __smccc_workaround_1_smc_start NULL
|
||||
#define __smccc_workaround_1_smc_end NULL
|
||||
#define __smccc_workaround_1_hvc_start NULL
|
||||
#define __smccc_workaround_1_hvc_end NULL
|
||||
|
||||
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
||||
const char *hyp_vecs_start,
|
||||
const char *hyp_vecs_end)
|
||||
{
|
||||
__this_cpu_write(bp_hardening_data.fn, fn);
|
||||
}
|
||||
#endif /* CONFIG_KVM */
|
||||
|
||||
static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
|
||||
bp_hardening_cb_t fn,
|
||||
const char *hyp_vecs_start,
|
||||
const char *hyp_vecs_end)
|
||||
{
|
||||
u64 pfr0;
|
||||
|
||||
if (!entry->matches(entry, SCOPE_LOCAL_CPU))
|
||||
return;
|
||||
|
||||
pfr0 = read_cpuid(ID_AA64PFR0_EL1);
|
||||
if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
|
||||
return;
|
||||
|
||||
__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
|
||||
}
|
||||
|
||||
#include <uapi/linux/psci.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/psci.h>
|
||||
|
||||
static void call_smc_arch_workaround_1(void)
|
||||
{
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
|
||||
}
|
||||
|
||||
static void call_hvc_arch_workaround_1(void)
|
||||
{
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
|
||||
}
|
||||
|
||||
static int enable_smccc_arch_workaround_1(void *data)
|
||||
{
|
||||
const struct arm64_cpu_capabilities *entry = data;
|
||||
bp_hardening_cb_t cb;
|
||||
void *smccc_start, *smccc_end;
|
||||
struct arm_smccc_res res;
|
||||
|
||||
if (!entry->matches(entry, SCOPE_LOCAL_CPU))
|
||||
return 0;
|
||||
|
||||
if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
|
||||
return 0;
|
||||
|
||||
switch (psci_ops.conduit) {
|
||||
case PSCI_CONDUIT_HVC:
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
if (res.a0)
|
||||
return 0;
|
||||
cb = call_hvc_arch_workaround_1;
|
||||
smccc_start = __smccc_workaround_1_hvc_start;
|
||||
smccc_end = __smccc_workaround_1_hvc_end;
|
||||
break;
|
||||
|
||||
case PSCI_CONDUIT_SMC:
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
if (res.a0)
|
||||
return 0;
|
||||
cb = call_smc_arch_workaround_1;
|
||||
smccc_start = __smccc_workaround_1_smc_start;
|
||||
smccc_end = __smccc_workaround_1_smc_end;
|
||||
break;
|
||||
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qcom_link_stack_sanitization(void)
|
||||
{
|
||||
u64 tmp;
|
||||
|
||||
asm volatile("mov %0, x30 \n"
|
||||
".rept 16 \n"
|
||||
"bl . + 4 \n"
|
||||
".endr \n"
|
||||
"mov x30, %0 \n"
|
||||
: "=&r" (tmp));
|
||||
}
|
||||
|
||||
static int qcom_enable_link_stack_sanitization(void *data)
|
||||
{
|
||||
const struct arm64_cpu_capabilities *entry = data;
|
||||
|
||||
install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
|
||||
__qcom_hyp_sanitize_link_stack_start,
|
||||
__qcom_hyp_sanitize_link_stack_end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
|
||||
|
||||
#define MIDR_RANGE(model, min, max) \
|
||||
.def_scope = SCOPE_LOCAL_CPU, \
|
||||
.matches = is_affected_midr_range, \
|
||||
|
@ -169,6 +351,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
MIDR_CPU_VAR_REV(0, 0),
|
||||
MIDR_CPU_VAR_REV(0, 0)),
|
||||
},
|
||||
{
|
||||
.desc = "Qualcomm Technologies Kryo erratum 1003",
|
||||
.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
|
||||
.def_scope = SCOPE_LOCAL_CPU,
|
||||
.midr_model = MIDR_QCOM_KRYO,
|
||||
.matches = is_kryo_midr,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
|
||||
{
|
||||
|
@ -186,6 +375,56 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
.capability = ARM64_WORKAROUND_858921,
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
|
||||
.enable = enable_smccc_arch_workaround_1,
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
|
||||
.enable = enable_smccc_arch_workaround_1,
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
|
||||
.enable = enable_smccc_arch_workaround_1,
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
|
||||
.enable = enable_smccc_arch_workaround_1,
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
|
||||
.enable = qcom_enable_link_stack_sanitization,
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
|
||||
.enable = qcom_enable_link_stack_sanitization,
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
|
||||
.enable = enable_smccc_arch_workaround_1,
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
|
||||
.enable = enable_smccc_arch_workaround_1,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
@ -200,15 +439,18 @@ void verify_local_cpu_errata_workarounds(void)
|
|||
{
|
||||
const struct arm64_cpu_capabilities *caps = arm64_errata;
|
||||
|
||||
for (; caps->matches; caps++)
|
||||
if (!cpus_have_cap(caps->capability) &&
|
||||
caps->matches(caps, SCOPE_LOCAL_CPU)) {
|
||||
for (; caps->matches; caps++) {
|
||||
if (cpus_have_cap(caps->capability)) {
|
||||
if (caps->enable)
|
||||
caps->enable((void *)caps);
|
||||
} else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
|
||||
pr_crit("CPU%d: Requires work around for %s, not detected"
|
||||
" at boot time\n",
|
||||
smp_processor_id(),
|
||||
caps->desc ? : "an erratum");
|
||||
cpu_die_early();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void update_cpu_errata_workarounds(void)
|
||||
|
|
|
@ -125,6 +125,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
|
|||
};
|
||||
|
||||
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
|
||||
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
|
||||
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
|
||||
|
@ -796,6 +798,86 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus
|
|||
ID_AA64PFR0_FP_SHIFT) < 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
|
||||
|
||||
static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
||||
int __unused)
|
||||
{
|
||||
char const *str = "command line option";
|
||||
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
|
||||
/*
|
||||
* For reasons that aren't entirely clear, enabling KPTI on Cavium
|
||||
* ThunderX leads to apparent I-cache corruption of kernel text, which
|
||||
* ends as well as you might imagine. Don't even try.
|
||||
*/
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
|
||||
str = "ARM64_WORKAROUND_CAVIUM_27456";
|
||||
__kpti_forced = -1;
|
||||
}
|
||||
|
||||
/* Forced? */
|
||||
if (__kpti_forced) {
|
||||
pr_info_once("kernel page table isolation forced %s by %s\n",
|
||||
__kpti_forced > 0 ? "ON" : "OFF", str);
|
||||
return __kpti_forced > 0;
|
||||
}
|
||||
|
||||
/* Useful for KASLR robustness */
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
|
||||
return true;
|
||||
|
||||
/* Don't force KPTI for CPUs that are not vulnerable */
|
||||
switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {
|
||||
case MIDR_CAVIUM_THUNDERX2:
|
||||
case MIDR_BRCM_VULCAN:
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Defer to CPU feature registers */
|
||||
return !cpuid_feature_extract_unsigned_field(pfr0,
|
||||
ID_AA64PFR0_CSV3_SHIFT);
|
||||
}
|
||||
|
||||
static int kpti_install_ng_mappings(void *__unused)
|
||||
{
|
||||
typedef void (kpti_remap_fn)(int, int, phys_addr_t);
|
||||
extern kpti_remap_fn idmap_kpti_install_ng_mappings;
|
||||
kpti_remap_fn *remap_fn;
|
||||
|
||||
static bool kpti_applied = false;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (kpti_applied)
|
||||
return 0;
|
||||
|
||||
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
|
||||
|
||||
cpu_install_idmap();
|
||||
remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
|
||||
cpu_uninstall_idmap();
|
||||
|
||||
if (!cpu)
|
||||
kpti_applied = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init parse_kpti(char *str)
|
||||
{
|
||||
bool enabled;
|
||||
int ret = strtobool(str, &enabled);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
__kpti_forced = enabled ? 1 : -1;
|
||||
return 0;
|
||||
}
|
||||
__setup("kpti=", parse_kpti);
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
|
||||
static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
{
|
||||
.desc = "GIC system register CPU interface",
|
||||
|
@ -882,6 +964,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||
.def_scope = SCOPE_SYSTEM,
|
||||
.matches = hyp_offset_low,
|
||||
},
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
{
|
||||
.desc = "Kernel page table isolation (KPTI)",
|
||||
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
|
||||
.def_scope = SCOPE_SYSTEM,
|
||||
.matches = unmap_kernel_at_el0,
|
||||
.enable = kpti_install_ng_mappings,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
/* FP/SIMD is not implemented */
|
||||
.capability = ARM64_HAS_NO_FPSIMD,
|
||||
|
@ -1000,6 +1091,25 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
|
|||
cap_set_elf_hwcap(hwcaps);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the current CPU has a given feature capability.
|
||||
* Should be called from non-preemptible context.
|
||||
*/
|
||||
static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
|
||||
unsigned int cap)
|
||||
{
|
||||
const struct arm64_cpu_capabilities *caps;
|
||||
|
||||
if (WARN_ON(preemptible()))
|
||||
return false;
|
||||
|
||||
for (caps = cap_array; caps->matches; caps++)
|
||||
if (caps->capability == cap &&
|
||||
caps->matches(caps, SCOPE_LOCAL_CPU))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
|
||||
const char *info)
|
||||
{
|
||||
|
@ -1035,7 +1145,7 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
|
|||
* uses an IPI, giving us a PSTATE that disappears when
|
||||
* we return.
|
||||
*/
|
||||
stop_machine(caps->enable, NULL, cpu_online_mask);
|
||||
stop_machine(caps->enable, (void *)caps, cpu_online_mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1078,8 +1188,9 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
|
|||
}
|
||||
|
||||
static void
|
||||
verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
|
||||
verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
|
||||
{
|
||||
const struct arm64_cpu_capabilities *caps = caps_list;
|
||||
for (; caps->matches; caps++) {
|
||||
if (!cpus_have_cap(caps->capability))
|
||||
continue;
|
||||
|
@ -1087,13 +1198,13 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
|
|||
* If the new CPU misses an advertised feature, we cannot proceed
|
||||
* further, park the cpu.
|
||||
*/
|
||||
if (!caps->matches(caps, SCOPE_LOCAL_CPU)) {
|
||||
if (!__this_cpu_has_cap(caps_list, caps->capability)) {
|
||||
pr_crit("CPU%d: missing feature: %s\n",
|
||||
smp_processor_id(), caps->desc);
|
||||
cpu_die_early();
|
||||
}
|
||||
if (caps->enable)
|
||||
caps->enable(NULL);
|
||||
caps->enable((void *)caps);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1148,25 +1259,6 @@ static void __init mark_const_caps_ready(void)
|
|||
static_branch_enable(&arm64_const_caps_ready);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the current CPU has a given feature capability.
|
||||
* Should be called from non-preemptible context.
|
||||
*/
|
||||
static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
|
||||
unsigned int cap)
|
||||
{
|
||||
const struct arm64_cpu_capabilities *caps;
|
||||
|
||||
if (WARN_ON(preemptible()))
|
||||
return false;
|
||||
|
||||
for (caps = cap_array; caps->desc; caps++)
|
||||
if (caps->capability == cap && caps->matches)
|
||||
return caps->matches(caps, SCOPE_LOCAL_CPU);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
extern const struct arm64_cpu_capabilities arm64_errata[];
|
||||
|
||||
bool this_cpu_has_cap(unsigned int cap)
|
||||
|
|
|
@ -96,6 +96,7 @@ ENTRY(entry)
|
|||
mrs x0, sctlr_el2
|
||||
bic x0, x0, #1 << 0 // clear SCTLR.M
|
||||
bic x0, x0, #1 << 2 // clear SCTLR.C
|
||||
pre_disable_mmu_workaround
|
||||
msr sctlr_el2, x0
|
||||
isb
|
||||
b 2f
|
||||
|
@ -103,6 +104,7 @@ ENTRY(entry)
|
|||
mrs x0, sctlr_el1
|
||||
bic x0, x0, #1 << 0 // clear SCTLR.M
|
||||
bic x0, x0, #1 << 2 // clear SCTLR.C
|
||||
pre_disable_mmu_workaround
|
||||
msr sctlr_el1, x0
|
||||
isb
|
||||
2:
|
||||
|
|
|
@ -29,6 +29,8 @@
|
|||
#include <asm/esr.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/asm-uaccess.h>
|
||||
|
@ -69,8 +71,21 @@
|
|||
#define BAD_FIQ 2
|
||||
#define BAD_ERROR 3
|
||||
|
||||
.macro kernel_ventry label
|
||||
.macro kernel_ventry, el, label, regsize = 64
|
||||
.align 7
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
alternative_if ARM64_UNMAP_KERNEL_AT_EL0
|
||||
.if \el == 0
|
||||
.if \regsize == 64
|
||||
mrs x30, tpidrro_el0
|
||||
msr tpidrro_el0, xzr
|
||||
.else
|
||||
mov x30, xzr
|
||||
.endif
|
||||
.endif
|
||||
alternative_else_nop_endif
|
||||
#endif
|
||||
|
||||
sub sp, sp, #S_FRAME_SIZE
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
/*
|
||||
|
@ -82,7 +97,7 @@
|
|||
tbnz x0, #THREAD_SHIFT, 0f
|
||||
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
|
||||
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
|
||||
b \label
|
||||
b el\()\el\()_\label
|
||||
|
||||
0:
|
||||
/*
|
||||
|
@ -114,7 +129,12 @@
|
|||
sub sp, sp, x0
|
||||
mrs x0, tpidrro_el0
|
||||
#endif
|
||||
b \label
|
||||
b el\()\el\()_\label
|
||||
.endm
|
||||
|
||||
.macro tramp_alias, dst, sym
|
||||
mov_q \dst, TRAMP_VALIAS
|
||||
add \dst, \dst, #(\sym - .entry.tramp.text)
|
||||
.endm
|
||||
|
||||
.macro kernel_entry, el, regsize = 64
|
||||
|
@ -147,10 +167,10 @@
|
|||
.else
|
||||
add x21, sp, #S_FRAME_SIZE
|
||||
get_thread_info tsk
|
||||
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
|
||||
/* Save the task's original addr_limit and set USER_DS */
|
||||
ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
||||
str x20, [sp, #S_ORIG_ADDR_LIMIT]
|
||||
mov x20, #TASK_SIZE_64
|
||||
mov x20, #USER_DS
|
||||
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
||||
/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
|
||||
.endif /* \el == 0 */
|
||||
|
@ -185,7 +205,7 @@ alternative_else_nop_endif
|
|||
|
||||
.if \el != 0
|
||||
mrs x21, ttbr0_el1
|
||||
tst x21, #0xffff << 48 // Check for the reserved ASID
|
||||
tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
|
||||
orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
|
||||
b.eq 1f // TTBR0 access already disabled
|
||||
and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
|
||||
|
@ -246,7 +266,7 @@ alternative_else_nop_endif
|
|||
tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
|
||||
.endif
|
||||
|
||||
__uaccess_ttbr0_enable x0
|
||||
__uaccess_ttbr0_enable x0, x1
|
||||
|
||||
.if \el == 0
|
||||
/*
|
||||
|
@ -255,7 +275,7 @@ alternative_else_nop_endif
|
|||
* Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
|
||||
* corruption).
|
||||
*/
|
||||
post_ttbr0_update_workaround
|
||||
bl post_ttbr_update_workaround
|
||||
.endif
|
||||
1:
|
||||
.if \el != 0
|
||||
|
@ -267,18 +287,20 @@ alternative_else_nop_endif
|
|||
.if \el == 0
|
||||
ldr x23, [sp, #S_SP] // load return stack pointer
|
||||
msr sp_el0, x23
|
||||
tst x22, #PSR_MODE32_BIT // native task?
|
||||
b.eq 3f
|
||||
|
||||
#ifdef CONFIG_ARM64_ERRATUM_845719
|
||||
alternative_if ARM64_WORKAROUND_845719
|
||||
tbz x22, #4, 1f
|
||||
#ifdef CONFIG_PID_IN_CONTEXTIDR
|
||||
mrs x29, contextidr_el1
|
||||
msr contextidr_el1, x29
|
||||
#else
|
||||
msr contextidr_el1, xzr
|
||||
#endif
|
||||
1:
|
||||
alternative_else_nop_endif
|
||||
#endif
|
||||
3:
|
||||
.endif
|
||||
|
||||
msr elr_el1, x21 // set up the return data
|
||||
|
@ -300,7 +322,21 @@ alternative_else_nop_endif
|
|||
ldp x28, x29, [sp, #16 * 14]
|
||||
ldr lr, [sp, #S_LR]
|
||||
add sp, sp, #S_FRAME_SIZE // restore sp
|
||||
eret // return to kernel
|
||||
|
||||
.if \el == 0
|
||||
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
bne 4f
|
||||
msr far_el1, x30
|
||||
tramp_alias x30, tramp_exit_native
|
||||
br x30
|
||||
4:
|
||||
tramp_alias x30, tramp_exit_compat
|
||||
br x30
|
||||
#endif
|
||||
.else
|
||||
eret
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro irq_stack_entry
|
||||
|
@ -340,6 +376,7 @@ alternative_else_nop_endif
|
|||
* x7 is reserved for the system call number in 32-bit mode.
|
||||
*/
|
||||
wsc_nr .req w25 // number of system calls
|
||||
xsc_nr .req x25 // number of system calls (zero-extended)
|
||||
wscno .req w26 // syscall number
|
||||
xscno .req x26 // syscall number (zero-extended)
|
||||
stbl .req x27 // syscall table pointer
|
||||
|
@ -365,31 +402,31 @@ tsk .req x28 // current thread_info
|
|||
|
||||
.align 11
|
||||
ENTRY(vectors)
|
||||
kernel_ventry el1_sync_invalid // Synchronous EL1t
|
||||
kernel_ventry el1_irq_invalid // IRQ EL1t
|
||||
kernel_ventry el1_fiq_invalid // FIQ EL1t
|
||||
kernel_ventry el1_error_invalid // Error EL1t
|
||||
kernel_ventry 1, sync_invalid // Synchronous EL1t
|
||||
kernel_ventry 1, irq_invalid // IRQ EL1t
|
||||
kernel_ventry 1, fiq_invalid // FIQ EL1t
|
||||
kernel_ventry 1, error_invalid // Error EL1t
|
||||
|
||||
kernel_ventry el1_sync // Synchronous EL1h
|
||||
kernel_ventry el1_irq // IRQ EL1h
|
||||
kernel_ventry el1_fiq_invalid // FIQ EL1h
|
||||
kernel_ventry el1_error_invalid // Error EL1h
|
||||
kernel_ventry 1, sync // Synchronous EL1h
|
||||
kernel_ventry 1, irq // IRQ EL1h
|
||||
kernel_ventry 1, fiq_invalid // FIQ EL1h
|
||||
kernel_ventry 1, error_invalid // Error EL1h
|
||||
|
||||
kernel_ventry el0_sync // Synchronous 64-bit EL0
|
||||
kernel_ventry el0_irq // IRQ 64-bit EL0
|
||||
kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0
|
||||
kernel_ventry el0_error_invalid // Error 64-bit EL0
|
||||
kernel_ventry 0, sync // Synchronous 64-bit EL0
|
||||
kernel_ventry 0, irq // IRQ 64-bit EL0
|
||||
kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
|
||||
kernel_ventry 0, error_invalid // Error 64-bit EL0
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
kernel_ventry el0_sync_compat // Synchronous 32-bit EL0
|
||||
kernel_ventry el0_irq_compat // IRQ 32-bit EL0
|
||||
kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
|
||||
kernel_ventry el0_error_invalid_compat // Error 32-bit EL0
|
||||
kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
|
||||
kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
|
||||
kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
|
||||
kernel_ventry 0, error_invalid_compat, 32 // Error 32-bit EL0
|
||||
#else
|
||||
kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0
|
||||
kernel_ventry el0_irq_invalid // IRQ 32-bit EL0
|
||||
kernel_ventry el0_fiq_invalid // FIQ 32-bit EL0
|
||||
kernel_ventry el0_error_invalid // Error 32-bit EL0
|
||||
kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
|
||||
kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
|
||||
kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
|
||||
kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
|
||||
#endif
|
||||
END(vectors)
|
||||
|
||||
|
@ -687,13 +724,15 @@ el0_ia:
|
|||
* Instruction abort handling
|
||||
*/
|
||||
mrs x26, far_el1
|
||||
// enable interrupts before calling the main handler
|
||||
enable_dbg_and_irq
|
||||
enable_dbg
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
bl trace_hardirqs_off
|
||||
#endif
|
||||
ct_user_exit
|
||||
mov x0, x26
|
||||
mov x1, x25
|
||||
mov x2, sp
|
||||
bl do_mem_abort
|
||||
bl do_el0_ia_bp_hardening
|
||||
b ret_to_user
|
||||
el0_fpsimd_acc:
|
||||
/*
|
||||
|
@ -720,8 +759,10 @@ el0_sp_pc:
|
|||
* Stack or PC alignment exception handling
|
||||
*/
|
||||
mrs x26, far_el1
|
||||
// enable interrupts before calling the main handler
|
||||
enable_dbg_and_irq
|
||||
enable_dbg
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
bl trace_hardirqs_off
|
||||
#endif
|
||||
ct_user_exit
|
||||
mov x0, x26
|
||||
mov x1, x25
|
||||
|
@ -780,6 +821,11 @@ el0_irq_naked:
|
|||
#endif
|
||||
|
||||
ct_user_exit
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
tbz x22, #55, 1f
|
||||
bl do_el0_irq_bp_hardening
|
||||
1:
|
||||
#endif
|
||||
irq_handler
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
|
@ -848,6 +894,7 @@ el0_svc_naked: // compat entry point
|
|||
b.ne __sys_trace
|
||||
cmp wscno, wsc_nr // check upper syscall limit
|
||||
b.hs ni_sys
|
||||
mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number
|
||||
ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
|
||||
blr x16 // call sys_* routine
|
||||
b ret_fast_syscall
|
||||
|
@ -895,6 +942,117 @@ __ni_sys_trace:
|
|||
|
||||
.popsection // .entry.text
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
/*
|
||||
* Exception vectors trampoline.
|
||||
*/
|
||||
.pushsection ".entry.tramp.text", "ax"
|
||||
|
||||
.macro tramp_map_kernel, tmp
|
||||
mrs \tmp, ttbr1_el1
|
||||
sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
|
||||
bic \tmp, \tmp, #USER_ASID_FLAG
|
||||
msr ttbr1_el1, \tmp
|
||||
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
|
||||
alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
|
||||
/* ASID already in \tmp[63:48] */
|
||||
movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
|
||||
movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
|
||||
/* 2MB boundary containing the vectors, so we nobble the walk cache */
|
||||
movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
|
||||
isb
|
||||
tlbi vae1, \tmp
|
||||
dsb nsh
|
||||
alternative_else_nop_endif
|
||||
#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
|
||||
.endm
|
||||
|
||||
.macro tramp_unmap_kernel, tmp
|
||||
mrs \tmp, ttbr1_el1
|
||||
add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
|
||||
orr \tmp, \tmp, #USER_ASID_FLAG
|
||||
msr ttbr1_el1, \tmp
|
||||
/*
|
||||
* We avoid running the post_ttbr_update_workaround here because
|
||||
* it's only needed by Cavium ThunderX, which requires KPTI to be
|
||||
* disabled.
|
||||
*/
|
||||
.endm
|
||||
|
||||
.macro tramp_ventry, regsize = 64
|
||||
.align 7
|
||||
1:
|
||||
.if \regsize == 64
|
||||
msr tpidrro_el0, x30 // Restored in kernel_ventry
|
||||
.endif
|
||||
/*
|
||||
* Defend against branch aliasing attacks by pushing a dummy
|
||||
* entry onto the return stack and using a RET instruction to
|
||||
* enter the full-fat kernel vectors.
|
||||
*/
|
||||
bl 2f
|
||||
b .
|
||||
2:
|
||||
tramp_map_kernel x30
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
adr x30, tramp_vectors + PAGE_SIZE
|
||||
alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
|
||||
ldr x30, [x30]
|
||||
#else
|
||||
ldr x30, =vectors
|
||||
#endif
|
||||
prfm plil1strm, [x30, #(1b - tramp_vectors)]
|
||||
msr vbar_el1, x30
|
||||
add x30, x30, #(1b - tramp_vectors)
|
||||
isb
|
||||
ret
|
||||
.endm
|
||||
|
||||
.macro tramp_exit, regsize = 64
|
||||
adr x30, tramp_vectors
|
||||
msr vbar_el1, x30
|
||||
tramp_unmap_kernel x30
|
||||
.if \regsize == 64
|
||||
mrs x30, far_el1
|
||||
.endif
|
||||
eret
|
||||
.endm
|
||||
|
||||
.align 11
|
||||
ENTRY(tramp_vectors)
|
||||
.space 0x400
|
||||
|
||||
tramp_ventry
|
||||
tramp_ventry
|
||||
tramp_ventry
|
||||
tramp_ventry
|
||||
|
||||
tramp_ventry 32
|
||||
tramp_ventry 32
|
||||
tramp_ventry 32
|
||||
tramp_ventry 32
|
||||
END(tramp_vectors)
|
||||
|
||||
ENTRY(tramp_exit_native)
|
||||
tramp_exit
|
||||
END(tramp_exit_native)
|
||||
|
||||
ENTRY(tramp_exit_compat)
|
||||
tramp_exit 32
|
||||
END(tramp_exit_compat)
|
||||
|
||||
.ltorg
|
||||
.popsection // .entry.tramp.text
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
.pushsection ".rodata", "a"
|
||||
.align PAGE_SHIFT
|
||||
.globl __entry_tramp_data_start
|
||||
__entry_tramp_data_start:
|
||||
.quad vectors
|
||||
.popsection // .rodata
|
||||
#endif /* CONFIG_RANDOMIZE_BASE */
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
|
||||
/*
|
||||
* Special system call wrappers.
|
||||
*/
|
||||
|
|
|
@ -371,7 +371,7 @@ ENDPROC(__primary_switched)
|
|||
* end early head section, begin head code that is also used for
|
||||
* hotplug and needs to have the same protections as the text region
|
||||
*/
|
||||
.section ".idmap.text","ax"
|
||||
.section ".idmap.text","awx"
|
||||
|
||||
ENTRY(kimage_vaddr)
|
||||
.quad _text - TEXT_OFFSET
|
||||
|
@ -732,6 +732,7 @@ __primary_switch:
|
|||
* to take into account by discarding the current kernel mapping and
|
||||
* creating a new one.
|
||||
*/
|
||||
pre_disable_mmu_workaround
|
||||
msr sctlr_el1, x20 // disable the MMU
|
||||
isb
|
||||
bl __create_page_tables // recreate kernel mapping
|
||||
|
|
|
@ -314,16 +314,14 @@ void tls_preserve_current_state(void)
|
|||
|
||||
static void tls_thread_switch(struct task_struct *next)
|
||||
{
|
||||
unsigned long tpidr, tpidrro;
|
||||
|
||||
tls_preserve_current_state();
|
||||
|
||||
tpidr = *task_user_tls(next);
|
||||
tpidrro = is_compat_thread(task_thread_info(next)) ?
|
||||
next->thread.tp_value : 0;
|
||||
if (is_compat_thread(task_thread_info(next)))
|
||||
write_sysreg(next->thread.tp_value, tpidrro_el0);
|
||||
else if (!arm64_kernel_unmapped_at_el0())
|
||||
write_sysreg(0, tpidrro_el0);
|
||||
|
||||
write_sysreg(tpidr, tpidr_el0);
|
||||
write_sysreg(tpidrro, tpidrro_el0);
|
||||
write_sysreg(*task_user_tls(next), tpidr_el0);
|
||||
}
|
||||
|
||||
/* Restore the UAO state depending on next's addr_limit */
|
||||
|
|
|
@ -45,6 +45,7 @@ ENTRY(arm64_relocate_new_kernel)
|
|||
mrs x0, sctlr_el2
|
||||
ldr x1, =SCTLR_ELx_FLAGS
|
||||
bic x0, x0, x1
|
||||
pre_disable_mmu_workaround
|
||||
msr sctlr_el2, x0
|
||||
isb
|
||||
1:
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue