Linux 5.1-rc6

-----BEGIN PGP SIGNATURE-----
 
 iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAly8rGYeHHRvcnZhbGRz
 QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGmZMH/1IRB0E1Qmzz8yzw
 wj79UuRGYPqxDDSWW+wNc8sU4Ic7iYirn9APHAztCdQqsjmzU/OVLfSa3JhdBe5w
 THo7pbGKBqEDcWnKfNk/21jXFNLZ1vr9BoQv2DGU2MMhHAyo/NZbalo2YVtpQPmM
 OCRth5n+LzvH7rGrX7RYgWu24G9l3NMfgtaDAXBNXesCGFAjVRrdkU5CBAaabvtU
 4GWh/nnutndOOLdByL3x+VZ3H3fIBnbNjcIGCglvvqzk7h3hrfGEl4UCULldTxcM
 IFsfMUhSw1ENy7F6DHGbKIG90cdCJcrQ8J/ziEzjj/KLGALluutfFhVvr6YCM2J6
 2RgU8CY=
 =CfY1
 -----END PGP SIGNATURE-----

Merge tag 'v5.1-rc6' into for-5.2/block

Pull in v5.1-rc6 to resolve two conflicts. One is in BFQ, in just a
comment, and is trivial. The other one is a conflict due to a later fix
in the bio multi-page work, and needs a bit more care.

* tag 'v5.1-rc6': (770 commits)
  Linux 5.1-rc6
  block: make sure that bvec length can't be overflow
  block: kill all_q_node in request_queue
  x86/cpu/intel: Lower the "ENERGY_PERF_BIAS: Set to normal" message's log priority
  coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping
  mm/kmemleak.c: fix unused-function warning
  init: initialize jump labels before command line option parsing
  kernel/watchdog_hld.c: hard lockup message should end with a newline
  kcov: improve CONFIG_ARCH_HAS_KCOV help text
  mm: fix inactive list balancing between NUMA nodes and cgroups
  mm/hotplug: treat CMA pages as unmovable
  proc: fixup proc-pid-vm test
  proc: fix map_files test on F29
  mm/vmstat.c: fix /proc/vmstat format for CONFIG_DEBUG_TLBFLUSH=y CONFIG_SMP=n
  mm/memory_hotplug: do not unlock after failing to take the device_hotplug_lock
  mm: swapoff: shmem_unuse() stop eviction without igrab()
  mm: swapoff: take notice of completion sooner
  mm: swapoff: remove too limiting SWAP_UNUSE_MAX_TRIES
  mm: swapoff: shmem_find_swap_entries() filter out other types
  slab: store tagged freelist for off-slab slabmgmt
  ...

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2019-04-22 09:47:36 -06:00
commit 5c61ee2cd5
777 changed files with 8124 additions and 4471 deletions

View File

@ -78,6 +78,8 @@ ForEachMacros:
- 'ata_qc_for_each_with_internal' - 'ata_qc_for_each_with_internal'
- 'ax25_for_each' - 'ax25_for_each'
- 'ax25_uid_for_each' - 'ax25_uid_for_each'
- '__bio_for_each_bvec'
- 'bio_for_each_bvec'
- 'bio_for_each_integrity_vec' - 'bio_for_each_integrity_vec'
- '__bio_for_each_segment' - '__bio_for_each_segment'
- 'bio_for_each_segment' - 'bio_for_each_segment'
@ -118,10 +120,12 @@ ForEachMacros:
- 'drm_for_each_legacy_plane' - 'drm_for_each_legacy_plane'
- 'drm_for_each_plane' - 'drm_for_each_plane'
- 'drm_for_each_plane_mask' - 'drm_for_each_plane_mask'
- 'drm_for_each_privobj'
- 'drm_mm_for_each_hole' - 'drm_mm_for_each_hole'
- 'drm_mm_for_each_node' - 'drm_mm_for_each_node'
- 'drm_mm_for_each_node_in_range' - 'drm_mm_for_each_node_in_range'
- 'drm_mm_for_each_node_safe' - 'drm_mm_for_each_node_safe'
- 'flow_action_for_each'
- 'for_each_active_drhd_unit' - 'for_each_active_drhd_unit'
- 'for_each_active_iommu' - 'for_each_active_iommu'
- 'for_each_available_child_of_node' - 'for_each_available_child_of_node'
@ -158,6 +162,9 @@ ForEachMacros:
- 'for_each_dss_dev' - 'for_each_dss_dev'
- 'for_each_efi_memory_desc' - 'for_each_efi_memory_desc'
- 'for_each_efi_memory_desc_in_map' - 'for_each_efi_memory_desc_in_map'
- 'for_each_element'
- 'for_each_element_extid'
- 'for_each_element_id'
- 'for_each_endpoint_of_node' - 'for_each_endpoint_of_node'
- 'for_each_evictable_lru' - 'for_each_evictable_lru'
- 'for_each_fib6_node_rt_rcu' - 'for_each_fib6_node_rt_rcu'
@ -195,6 +202,7 @@ ForEachMacros:
- 'for_each_net_rcu' - 'for_each_net_rcu'
- 'for_each_new_connector_in_state' - 'for_each_new_connector_in_state'
- 'for_each_new_crtc_in_state' - 'for_each_new_crtc_in_state'
- 'for_each_new_mst_mgr_in_state'
- 'for_each_new_plane_in_state' - 'for_each_new_plane_in_state'
- 'for_each_new_private_obj_in_state' - 'for_each_new_private_obj_in_state'
- 'for_each_node' - 'for_each_node'
@ -210,8 +218,10 @@ ForEachMacros:
- 'for_each_of_pci_range' - 'for_each_of_pci_range'
- 'for_each_old_connector_in_state' - 'for_each_old_connector_in_state'
- 'for_each_old_crtc_in_state' - 'for_each_old_crtc_in_state'
- 'for_each_old_mst_mgr_in_state'
- 'for_each_oldnew_connector_in_state' - 'for_each_oldnew_connector_in_state'
- 'for_each_oldnew_crtc_in_state' - 'for_each_oldnew_crtc_in_state'
- 'for_each_oldnew_mst_mgr_in_state'
- 'for_each_oldnew_plane_in_state' - 'for_each_oldnew_plane_in_state'
- 'for_each_oldnew_plane_in_state_reverse' - 'for_each_oldnew_plane_in_state_reverse'
- 'for_each_oldnew_private_obj_in_state' - 'for_each_oldnew_private_obj_in_state'
@ -243,6 +253,9 @@ ForEachMacros:
- 'for_each_sg_dma_page' - 'for_each_sg_dma_page'
- 'for_each_sg_page' - 'for_each_sg_page'
- 'for_each_sibling_event' - 'for_each_sibling_event'
- 'for_each_subelement'
- 'for_each_subelement_extid'
- 'for_each_subelement_id'
- '__for_each_thread' - '__for_each_thread'
- 'for_each_thread' - 'for_each_thread'
- 'for_each_zone' - 'for_each_zone'
@ -252,6 +265,8 @@ ForEachMacros:
- 'fwnode_for_each_child_node' - 'fwnode_for_each_child_node'
- 'fwnode_graph_for_each_endpoint' - 'fwnode_graph_for_each_endpoint'
- 'gadget_for_each_ep' - 'gadget_for_each_ep'
- 'genradix_for_each'
- 'genradix_for_each_from'
- 'hash_for_each' - 'hash_for_each'
- 'hash_for_each_possible' - 'hash_for_each_possible'
- 'hash_for_each_possible_rcu' - 'hash_for_each_possible_rcu'
@ -293,7 +308,11 @@ ForEachMacros:
- 'key_for_each' - 'key_for_each'
- 'key_for_each_safe' - 'key_for_each_safe'
- 'klp_for_each_func' - 'klp_for_each_func'
- 'klp_for_each_func_safe'
- 'klp_for_each_func_static'
- 'klp_for_each_object' - 'klp_for_each_object'
- 'klp_for_each_object_safe'
- 'klp_for_each_object_static'
- 'kvm_for_each_memslot' - 'kvm_for_each_memslot'
- 'kvm_for_each_vcpu' - 'kvm_for_each_vcpu'
- 'list_for_each' - 'list_for_each'
@ -324,6 +343,8 @@ ForEachMacros:
- 'media_device_for_each_intf' - 'media_device_for_each_intf'
- 'media_device_for_each_link' - 'media_device_for_each_link'
- 'media_device_for_each_pad' - 'media_device_for_each_pad'
- 'mp_bvec_for_each_page'
- 'mp_bvec_for_each_segment'
- 'nanddev_io_for_each_page' - 'nanddev_io_for_each_page'
- 'netdev_for_each_lower_dev' - 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private' - 'netdev_for_each_lower_private'
@ -375,6 +396,7 @@ ForEachMacros:
- 'rht_for_each_rcu' - 'rht_for_each_rcu'
- 'rht_for_each_rcu_continue' - 'rht_for_each_rcu_continue'
- '__rq_for_each_bio' - '__rq_for_each_bio'
- 'rq_for_each_bvec'
- 'rq_for_each_segment' - 'rq_for_each_segment'
- 'scsi_for_each_prot_sg' - 'scsi_for_each_prot_sg'
- 'scsi_for_each_sg' - 'scsi_for_each_sg'
@ -410,6 +432,8 @@ ForEachMacros:
- 'v4l2_m2m_for_each_src_buf_safe' - 'v4l2_m2m_for_each_src_buf_safe'
- 'virtio_device_for_each_vq' - 'virtio_device_for_each_vq'
- 'xa_for_each' - 'xa_for_each'
- 'xa_for_each_marked'
- 'xa_for_each_start'
- 'xas_for_each' - 'xas_for_each'
- 'xas_for_each_conflict' - 'xas_for_each_conflict'
- 'xas_for_each_marked' - 'xas_for_each_marked'

View File

@ -156,6 +156,8 @@ Morten Welinder <welinder@darter.rentec.com>
Morten Welinder <welinder@troll.com> Morten Welinder <welinder@troll.com>
Mythri P K <mythripk@ti.com> Mythri P K <mythripk@ti.com>
Nguyen Anh Quynh <aquynh@gmail.com> Nguyen Anh Quynh <aquynh@gmail.com>
Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Patrick Mochel <mochel@digitalimplant.org> Patrick Mochel <mochel@digitalimplant.org>
Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com> Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com>

View File

@ -56,12 +56,12 @@ situation from a state where some tasks are stalled but the CPU is
still doing productive work. As such, time spent in this subset of the still doing productive work. As such, time spent in this subset of the
stall state is tracked separately and exported in the "full" averages. stall state is tracked separately and exported in the "full" averages.
The ratios are tracked as recent trends over ten, sixty, and three The ratios (in %) are tracked as recent trends over ten, sixty, and
hundred second windows, which gives insight into short term events as three hundred second windows, which gives insight into short term events
well as medium and long term trends. The total absolute stall time is as well as medium and long term trends. The total absolute stall time
tracked and exported as well, to allow detection of latency spikes (in us) is tracked and exported as well, to allow detection of latency
which wouldn't necessarily make a dent in the time averages, or to spikes which wouldn't necessarily make a dent in the time averages,
average trends over custom time frames. or to average trends over custom time frames.
Cgroup2 interface Cgroup2 interface
================= =================

View File

@ -148,16 +148,16 @@ The ``btf_type.size * 8`` must be equal to or greater than ``BTF_INT_BITS()``
for the type. The maximum value of ``BTF_INT_BITS()`` is 128. for the type. The maximum value of ``BTF_INT_BITS()`` is 128.
The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values
for this int. For example, a bitfield struct member has: * btf member bit for this int. For example, a bitfield struct member has:
offset 100 from the start of the structure, * btf member pointing to an int * btf member bit offset 100 from the start of the structure,
type, * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4`` * btf member pointing to an int type,
* the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4``
Then in the struct memory layout, this member will occupy ``4`` bits starting Then in the struct memory layout, this member will occupy ``4`` bits starting
from bits ``100 + 2 = 102``. from bits ``100 + 2 = 102``.
Alternatively, the bitfield struct member can be the following to access the Alternatively, the bitfield struct member can be the following to access the
same bits as the above: same bits as the above:
* btf member bit offset 102, * btf member bit offset 102,
* btf member pointing to an int type, * btf member pointing to an int type,
* the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4`` * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4``

View File

@ -228,7 +228,7 @@ patternProperties:
- renesas,r9a06g032-smp - renesas,r9a06g032-smp
- rockchip,rk3036-smp - rockchip,rk3036-smp
- rockchip,rk3066-smp - rockchip,rk3066-smp
- socionext,milbeaut-m10v-smp - socionext,milbeaut-m10v-smp
- ste,dbx500-smp - ste,dbx500-smp
cpu-release-addr: cpu-release-addr:

View File

@ -26,7 +26,7 @@ Required node properties:
Optional node properties: Optional node properties:
- ti,mode: Operation mode (see above). - ti,mode: Operation mode (u8) (see above).
Example (operation mode 2): Example (operation mode 2):
@ -34,5 +34,5 @@ Example (operation mode 2):
adc128d818@1d { adc128d818@1d {
compatible = "ti,adc128d818"; compatible = "ti,adc128d818";
reg = <0x1d>; reg = <0x1d>;
ti,mode = <2>; ti,mode = /bits/ 8 <2>;
}; };

View File

@ -102,9 +102,11 @@ Byte sequences
dictionary which is empty, and that it will always be dictionary which is empty, and that it will always be
invalid at this place. invalid at this place.
17 : bitstream version. If the first byte is 17, the next byte 17 : bitstream version. If the first byte is 17, and compressed
gives the bitstream version (version 1 only). If the first byte stream length is at least 5 bytes (length of shortest possible
is not 17, the bitstream version is 0. versioned bitstream), the next byte gives the bitstream version
(version 1 only).
Otherwise, the bitstream version is 0.
18..21 : copy 0..3 literals 18..21 : copy 0..3 literals
state = (byte - 17) = 0..3 [ copy <state> literals ] state = (byte - 17) = 0..3 [ copy <state> literals ]

View File

@ -623,7 +623,7 @@ the remote via /dev/input/event devices.
- .. row 78 - .. row 78
- ``KEY_SCREEN`` - ``KEY_ASPECT_RATIO``
- Select screen aspect ratio - Select screen aspect ratio
@ -631,7 +631,7 @@ the remote via /dev/input/event devices.
- .. row 79 - .. row 79
- ``KEY_ZOOM`` - ``KEY_FULL_SCREEN``
- Put device into zoom/full screen mode - Put device into zoom/full screen mode

View File

@ -0,0 +1,126 @@
.. SPDX-License-Identifier: GPL-2.0
==================
BPF Flow Dissector
==================
Overview
========
Flow dissector is a routine that parses metadata out of the packets. It's
used in the various places in the networking subsystem (RFS, flow hash, etc).
BPF flow dissector is an attempt to reimplement C-based flow dissector logic
in BPF to gain all the benefits of BPF verifier (namely, limits on the
number of instructions and tail calls).
API
===
BPF flow dissector programs operate on an ``__sk_buff``. However, only the
limited set of fields is allowed: ``data``, ``data_end`` and ``flow_keys``.
``flow_keys`` is ``struct bpf_flow_keys`` and contains flow dissector input
and output arguments.
The inputs are:
* ``nhoff`` - initial offset of the networking header
* ``thoff`` - initial offset of the transport header, initialized to nhoff
* ``n_proto`` - L3 protocol type, parsed out of L2 header
Flow dissector BPF program should fill out the rest of the ``struct
bpf_flow_keys`` fields. Input arguments ``nhoff/thoff/n_proto`` should be
also adjusted accordingly.
The return code of the BPF program is either BPF_OK to indicate successful
dissection, or BPF_DROP to indicate parsing error.
__sk_buff->data
===============
In the VLAN-less case, this is what the initial state of the BPF flow
dissector looks like::
+------+------+------------+-----------+
| DMAC | SMAC | ETHER_TYPE | L3_HEADER |
+------+------+------------+-----------+
^
|
+-- flow dissector starts here
.. code:: c
skb->data + flow_keys->nhoff point to the first byte of L3_HEADER
flow_keys->thoff = nhoff
flow_keys->n_proto = ETHER_TYPE
In case of VLAN, flow dissector can be called with the two different states.
Pre-VLAN parsing::
+------+------+------+-----+-----------+-----------+
| DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER |
+------+------+------+-----+-----------+-----------+
^
|
+-- flow dissector starts here
.. code:: c
skb->data + flow_keys->nhoff point the to first byte of TCI
flow_keys->thoff = nhoff
flow_keys->n_proto = TPID
Please note that TPID can be 802.1AD and, hence, BPF program would
have to parse VLAN information twice for double tagged packets.
Post-VLAN parsing::
+------+------+------+-----+-----------+-----------+
| DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER |
+------+------+------+-----+-----------+-----------+
^
|
+-- flow dissector starts here
.. code:: c
skb->data + flow_keys->nhoff point the to first byte of L3_HEADER
flow_keys->thoff = nhoff
flow_keys->n_proto = ETHER_TYPE
In this case VLAN information has been processed before the flow dissector
and BPF flow dissector is not required to handle it.
The takeaway here is as follows: BPF flow dissector program can be called with
the optional VLAN header and should gracefully handle both cases: when single
or double VLAN is present and when it is not present. The same program
can be called for both cases and would have to be written carefully to
handle both cases.
Reference Implementation
========================
See ``tools/testing/selftests/bpf/progs/bpf_flow.c`` for the reference
implementation and ``tools/testing/selftests/bpf/flow_dissector_load.[hc]``
for the loader. bpftool can be used to load BPF flow dissector program as well.
The reference implementation is organized as follows:
* ``jmp_table`` map that contains sub-programs for each supported L3 protocol
* ``_dissect`` routine - entry point; it does input ``n_proto`` parsing and
does ``bpf_tail_call`` to the appropriate L3 handler
Since BPF at this point doesn't support looping (or any jumping back),
jmp_table is used instead to handle multiple levels of encapsulation (and
IPv6 options).
Current Limitations
===================
BPF flow dissector doesn't support exporting all the metadata that in-kernel
C-based implementation can export. Notable example is single VLAN (802.1Q)
and double VLAN (802.1AD) tags. Please refer to the ``struct bpf_flow_keys``
for a set of information that's currently can be exported from the BPF context.

View File

@ -9,6 +9,7 @@ Contents:
netdev-FAQ netdev-FAQ
af_xdp af_xdp
batman-adv batman-adv
bpf_flow_dissector
can can
can_ucan_protocol can_ucan_protocol
device_drivers/freescale/dpaa2/index device_drivers/freescale/dpaa2/index

View File

@ -1009,16 +1009,18 @@ The kernel interface functions are as follows:
(*) Check call still alive. (*) Check call still alive.
u32 rxrpc_kernel_check_life(struct socket *sock, bool rxrpc_kernel_check_life(struct socket *sock,
struct rxrpc_call *call); struct rxrpc_call *call,
u32 *_life);
void rxrpc_kernel_probe_life(struct socket *sock, void rxrpc_kernel_probe_life(struct socket *sock,
struct rxrpc_call *call); struct rxrpc_call *call);
The first function returns a number that is updated when ACKs are received The first function passes back in *_life a number that is updated when
from the peer (notably including PING RESPONSE ACKs which we can elicit by ACKs are received from the peer (notably including PING RESPONSE ACKs
sending PING ACKs to see if the call still exists on the server). The which we can elicit by sending PING ACKs to see if the call still exists
caller should compare the numbers of two calls to see if the call is still on the server). The caller should compare the numbers of two calls to see
alive after waiting for a suitable interval. if the call is still alive after waiting for a suitable interval. It also
returns true as long as the call hasn't yet reached the completed state.
This allows the caller to work out if the server is still contactable and This allows the caller to work out if the server is still contactable and
if the call is still alive on the server while waiting for the server to if the call is still alive on the server while waiting for the server to

View File

@ -1893,14 +1893,15 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git
ARM/NUVOTON NPCM ARCHITECTURE ARM/NUVOTON NPCM ARCHITECTURE
M: Avi Fishman <avifishman70@gmail.com> M: Avi Fishman <avifishman70@gmail.com>
M: Tomer Maimon <tmaimon77@gmail.com> M: Tomer Maimon <tmaimon77@gmail.com>
M: Tali Perry <tali.perry1@gmail.com>
R: Patrick Venture <venture@google.com> R: Patrick Venture <venture@google.com>
R: Nancy Yuen <yuenn@google.com> R: Nancy Yuen <yuenn@google.com>
R: Brendan Higgins <brendanhiggins@google.com> R: Benjamin Fair <benjaminfair@google.com>
L: openbmc@lists.ozlabs.org (moderated for non-subscribers) L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
S: Supported S: Supported
F: arch/arm/mach-npcm/ F: arch/arm/mach-npcm/
F: arch/arm/boot/dts/nuvoton-npcm* F: arch/arm/boot/dts/nuvoton-npcm*
F: include/dt-bindings/clock/nuvoton,npcm7xx-clks.h F: include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
F: drivers/*/*npcm* F: drivers/*/*npcm*
F: Documentation/devicetree/bindings/*/*npcm* F: Documentation/devicetree/bindings/*/*npcm*
F: Documentation/devicetree/bindings/*/*/*npcm* F: Documentation/devicetree/bindings/*/*/*npcm*
@ -4129,7 +4130,7 @@ F: drivers/cpuidle/*
F: include/linux/cpuidle.h F: include/linux/cpuidle.h
CRAMFS FILESYSTEM CRAMFS FILESYSTEM
M: Nicolas Pitre <nico@linaro.org> M: Nicolas Pitre <nico@fluxnic.net>
S: Maintained S: Maintained
F: Documentation/filesystems/cramfs.txt F: Documentation/filesystems/cramfs.txt
F: fs/cramfs/ F: fs/cramfs/
@ -5833,7 +5834,7 @@ L: netdev@vger.kernel.org
S: Maintained S: Maintained
F: Documentation/ABI/testing/sysfs-bus-mdio F: Documentation/ABI/testing/sysfs-bus-mdio
F: Documentation/devicetree/bindings/net/mdio* F: Documentation/devicetree/bindings/net/mdio*
F: Documentation/networking/phy.txt F: Documentation/networking/phy.rst
F: drivers/net/phy/ F: drivers/net/phy/
F: drivers/of/of_mdio.c F: drivers/of/of_mdio.c
F: drivers/of/of_net.c F: drivers/of/of_net.c
@ -7332,7 +7333,6 @@ F: Documentation/devicetree/bindings/i3c/
F: Documentation/driver-api/i3c F: Documentation/driver-api/i3c
F: drivers/i3c/ F: drivers/i3c/
F: include/linux/i3c/ F: include/linux/i3c/
F: include/dt-bindings/i3c/
I3C DRIVER FOR SYNOPSYS DESIGNWARE I3C DRIVER FOR SYNOPSYS DESIGNWARE
M: Vitor Soares <vitor.soares@synopsys.com> M: Vitor Soares <vitor.soares@synopsys.com>
@ -7515,7 +7515,7 @@ F: include/net/mac802154.h
F: include/net/af_ieee802154.h F: include/net/af_ieee802154.h
F: include/net/cfg802154.h F: include/net/cfg802154.h
F: include/net/ieee802154_netdev.h F: include/net/ieee802154_netdev.h
F: Documentation/networking/ieee802154.txt F: Documentation/networking/ieee802154.rst
IFE PROTOCOL IFE PROTOCOL
M: Yotam Gigi <yotam.gi@gmail.com> M: Yotam Gigi <yotam.gi@gmail.com>
@ -10144,7 +10144,7 @@ F: drivers/spi/spi-at91-usart.c
F: Documentation/devicetree/bindings/mfd/atmel-usart.txt F: Documentation/devicetree/bindings/mfd/atmel-usart.txt
MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
M: Woojung Huh <Woojung.Huh@microchip.com> M: Woojung Huh <woojung.huh@microchip.com>
M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com> M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Maintained S: Maintained
@ -13981,7 +13981,7 @@ F: drivers/media/rc/serial_ir.c
SFC NETWORK DRIVER SFC NETWORK DRIVER
M: Solarflare linux maintainers <linux-net-drivers@solarflare.com> M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
M: Edward Cree <ecree@solarflare.com> M: Edward Cree <ecree@solarflare.com>
M: Bert Kenward <bkenward@solarflare.com> M: Martin Habets <mhabets@solarflare.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
F: drivers/net/ethernet/sfc/ F: drivers/net/ethernet/sfc/
@ -16508,7 +16508,7 @@ F: drivers/char/virtio_console.c
F: include/linux/virtio_console.h F: include/linux/virtio_console.h
F: include/uapi/linux/virtio_console.h F: include/uapi/linux/virtio_console.h
VIRTIO CORE, NET AND BLOCK DRIVERS VIRTIO CORE AND NET DRIVERS
M: "Michael S. Tsirkin" <mst@redhat.com> M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com> M: Jason Wang <jasowang@redhat.com>
L: virtualization@lists.linux-foundation.org L: virtualization@lists.linux-foundation.org
@ -16523,6 +16523,19 @@ F: include/uapi/linux/virtio_*.h
F: drivers/crypto/virtio/ F: drivers/crypto/virtio/
F: mm/balloon_compaction.c F: mm/balloon_compaction.c
VIRTIO BLOCK AND SCSI DRIVERS
M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
R: Paolo Bonzini <pbonzini@redhat.com>
R: Stefan Hajnoczi <stefanha@redhat.com>
L: virtualization@lists.linux-foundation.org
S: Maintained
F: drivers/block/virtio_blk.c
F: drivers/scsi/virtio_scsi.c
F: include/uapi/linux/virtio_blk.h
F: include/uapi/linux/virtio_scsi.h
F: drivers/vhost/scsi.c
VIRTIO CRYPTO DRIVER VIRTIO CRYPTO DRIVER
M: Gonglei <arei.gonglei@huawei.com> M: Gonglei <arei.gonglei@huawei.com>
L: virtualization@lists.linux-foundation.org L: virtualization@lists.linux-foundation.org

View File

@ -2,7 +2,7 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 1 PATCHLEVEL = 1
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc3 EXTRAVERSION = -rc6
NAME = Shy Crocodile NAME = Shy Crocodile
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -55,12 +55,11 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
*/ */
static inline void static inline void
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
unsigned int i, unsigned int n, unsigned long *args) unsigned long *args)
{ {
unsigned long *inside_ptregs = &(regs->r0); unsigned long *inside_ptregs = &(regs->r0);
inside_ptregs -= i; unsigned int n = 6;
unsigned int i = 0;
BUG_ON((i + n) > 6);
while (n--) { while (n--) {
args[i++] = (*inside_ptregs); args[i++] = (*inside_ptregs);

View File

@ -57,6 +57,24 @@
enable-active-high; enable-active-high;
}; };
/* TPS79501 */
v1_8d_reg: fixedregulator-v1_8d {
compatible = "regulator-fixed";
regulator-name = "v1_8d";
vin-supply = <&vbat>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
/* TPS79501 */
v3_3d_reg: fixedregulator-v3_3d {
compatible = "regulator-fixed";
regulator-name = "v3_3d";
vin-supply = <&vbat>;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
matrix_keypad: matrix_keypad0 { matrix_keypad: matrix_keypad0 {
compatible = "gpio-matrix-keypad"; compatible = "gpio-matrix-keypad";
debounce-delay-ms = <5>; debounce-delay-ms = <5>;
@ -499,10 +517,10 @@
status = "okay"; status = "okay";
/* Regulators */ /* Regulators */
AVDD-supply = <&vaux2_reg>; AVDD-supply = <&v3_3d_reg>;
IOVDD-supply = <&vaux2_reg>; IOVDD-supply = <&v3_3d_reg>;
DRVDD-supply = <&vaux2_reg>; DRVDD-supply = <&v3_3d_reg>;
DVDD-supply = <&vbat>; DVDD-supply = <&v1_8d_reg>;
}; };
}; };

View File

@ -73,6 +73,24 @@
enable-active-high; enable-active-high;
}; };
/* TPS79518 */
v1_8d_reg: fixedregulator-v1_8d {
compatible = "regulator-fixed";
regulator-name = "v1_8d";
vin-supply = <&vbat>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
/* TPS78633 */
v3_3d_reg: fixedregulator-v3_3d {
compatible = "regulator-fixed";
regulator-name = "v3_3d";
vin-supply = <&vbat>;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
leds { leds {
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&user_leds_s0>; pinctrl-0 = <&user_leds_s0>;
@ -501,10 +519,10 @@
status = "okay"; status = "okay";
/* Regulators */ /* Regulators */
AVDD-supply = <&vaux2_reg>; AVDD-supply = <&v3_3d_reg>;
IOVDD-supply = <&vaux2_reg>; IOVDD-supply = <&v3_3d_reg>;
DRVDD-supply = <&vaux2_reg>; DRVDD-supply = <&v3_3d_reg>;
DVDD-supply = <&vbat>; DVDD-supply = <&v1_8d_reg>;
}; };
}; };

View File

@ -1762,7 +1762,7 @@
reg = <0xcc000 0x4>; reg = <0xcc000 0x4>;
reg-names = "rev"; reg-names = "rev";
/* Domains (P, C): per_pwrdm, l4ls_clkdm */ /* Domains (P, C): per_pwrdm, l4ls_clkdm */
clocks = <&l4ls_clkctrl AM3_D_CAN0_CLKCTRL 0>; clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>;
clock-names = "fck"; clock-names = "fck";
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
@ -1785,7 +1785,7 @@
reg = <0xd0000 0x4>; reg = <0xd0000 0x4>;
reg-names = "rev"; reg-names = "rev";
/* Domains (P, C): per_pwrdm, l4ls_clkdm */ /* Domains (P, C): per_pwrdm, l4ls_clkdm */
clocks = <&l4ls_clkctrl AM3_D_CAN1_CLKCTRL 0>; clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>;
clock-names = "fck"; clock-names = "fck";
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;

View File

@ -254,6 +254,7 @@
}; };
vccio_sd: LDO_REG5 { vccio_sd: LDO_REG5 {
regulator-boot-on;
regulator-min-microvolt = <1800000>; regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>; regulator-max-microvolt = <3300000>;
regulator-name = "vccio_sd"; regulator-name = "vccio_sd";
@ -430,7 +431,7 @@
bus-width = <4>; bus-width = <4>;
cap-mmc-highspeed; cap-mmc-highspeed;
cap-sd-highspeed; cap-sd-highspeed;
card-detect-delay = <200>; broken-cd;
disable-wp; /* wp not hooked up */ disable-wp; /* wp not hooked up */
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>; pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;

View File

@ -25,8 +25,6 @@
gpio_keys: gpio-keys { gpio_keys: gpio-keys {
compatible = "gpio-keys"; compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pwr_key_l>; pinctrl-0 = <&pwr_key_l>;

View File

@ -70,7 +70,7 @@
compatible = "arm,cortex-a12"; compatible = "arm,cortex-a12";
reg = <0x501>; reg = <0x501>;
resets = <&cru SRST_CORE1>; resets = <&cru SRST_CORE1>;
operating-points = <&cpu_opp_table>; operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>; /* min followed by max */ #cooling-cells = <2>; /* min followed by max */
clock-latency = <40000>; clock-latency = <40000>;
clocks = <&cru ARMCLK>; clocks = <&cru ARMCLK>;
@ -80,7 +80,7 @@
compatible = "arm,cortex-a12"; compatible = "arm,cortex-a12";
reg = <0x502>; reg = <0x502>;
resets = <&cru SRST_CORE2>; resets = <&cru SRST_CORE2>;
operating-points = <&cpu_opp_table>; operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>; /* min followed by max */ #cooling-cells = <2>; /* min followed by max */
clock-latency = <40000>; clock-latency = <40000>;
clocks = <&cru ARMCLK>; clocks = <&cru ARMCLK>;
@ -90,7 +90,7 @@
compatible = "arm,cortex-a12"; compatible = "arm,cortex-a12";
reg = <0x503>; reg = <0x503>;
resets = <&cru SRST_CORE3>; resets = <&cru SRST_CORE3>;
operating-points = <&cpu_opp_table>; operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>; /* min followed by max */ #cooling-cells = <2>; /* min followed by max */
clock-latency = <40000>; clock-latency = <40000>;
clocks = <&cru ARMCLK>; clocks = <&cru ARMCLK>;
@ -1119,8 +1119,6 @@
clock-names = "ref", "pclk"; clock-names = "ref", "pclk";
power-domains = <&power RK3288_PD_VIO>; power-domains = <&power RK3288_PD_VIO>;
rockchip,grf = <&grf>; rockchip,grf = <&grf>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled"; status = "disabled";
ports { ports {
@ -1282,27 +1280,27 @@
gpu_opp_table: gpu-opp-table { gpu_opp_table: gpu-opp-table {
compatible = "operating-points-v2"; compatible = "operating-points-v2";
opp@100000000 { opp-100000000 {
opp-hz = /bits/ 64 <100000000>; opp-hz = /bits/ 64 <100000000>;
opp-microvolt = <950000>; opp-microvolt = <950000>;
}; };
opp@200000000 { opp-200000000 {
opp-hz = /bits/ 64 <200000000>; opp-hz = /bits/ 64 <200000000>;
opp-microvolt = <950000>; opp-microvolt = <950000>;
}; };
opp@300000000 { opp-300000000 {
opp-hz = /bits/ 64 <300000000>; opp-hz = /bits/ 64 <300000000>;
opp-microvolt = <1000000>; opp-microvolt = <1000000>;
}; };
opp@400000000 { opp-400000000 {
opp-hz = /bits/ 64 <400000000>; opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <1100000>; opp-microvolt = <1100000>;
}; };
opp@500000000 { opp-500000000 {
opp-hz = /bits/ 64 <500000000>; opp-hz = /bits/ 64 <500000000>;
opp-microvolt = <1200000>; opp-microvolt = <1200000>;
}; };
opp@600000000 { opp-600000000 {
opp-hz = /bits/ 64 <600000000>; opp-hz = /bits/ 64 <600000000>;
opp-microvolt = <1250000>; opp-microvolt = <1250000>;
}; };

View File

@ -518,7 +518,7 @@
#define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0) #define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0)
#define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3) #define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3)
#define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1) #define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1)
#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 2, 1) #define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 3, 1)
#define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2) #define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2)
#define PIN_PC10 74 #define PIN_PC10 74
#define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0) #define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0)

View File

@ -213,13 +213,12 @@
gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>; gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>;
gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>; gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>;
/* /*
* This chipselect is active high. Just setting the flags * It's not actually active high, but the frameworks assume
* to GPIO_ACTIVE_HIGH is not enough for the SPI DT bindings, * the polarity of the passed-in GPIO is "normal" (active
* it will be ignored, only the special "spi-cs-high" flag * high) then actively drives the line low to select the
* really counts. * chip.
*/ */
cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
spi-cs-high;
num-chipselects = <1>; num-chipselects = <1>;
/* /*

View File

@ -55,53 +55,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
if (n == 0) args[0] = regs->ARM_ORIG_r0;
return; args++;
if (i + n > SYSCALL_MAX_ARGS) { memcpy(args, &regs->ARM_r0 + 1, 5 * sizeof(args[0]));
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
pr_warn("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
memset(args_bad, 0, n_bad * sizeof(args[0]));
n = SYSCALL_MAX_ARGS - i;
}
if (i == 0) {
args[0] = regs->ARM_ORIG_r0;
args++;
i++;
n--;
}
memcpy(args, &regs->ARM_r0 + i, n * sizeof(args[0]));
} }
static inline void syscall_set_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args) const unsigned long *args)
{ {
if (n == 0) regs->ARM_ORIG_r0 = args[0];
return; args++;
if (i + n > SYSCALL_MAX_ARGS) { memcpy(&regs->ARM_r0 + 1, args, 5 * sizeof(args[0]));
pr_warn("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
n = SYSCALL_MAX_ARGS - i;
}
if (i == 0) {
regs->ARM_ORIG_r0 = args[0];
args++;
i++;
n--;
}
memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
} }
static inline int syscall_get_arch(void) static inline int syscall_get_arch(void)

View File

@ -591,13 +591,13 @@ static int __init at91_pm_backup_init(void)
np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam"); np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
if (!np) if (!np)
goto securam_fail; goto securam_fail_no_ref_dev;
pdev = of_find_device_by_node(np); pdev = of_find_device_by_node(np);
of_node_put(np); of_node_put(np);
if (!pdev) { if (!pdev) {
pr_warn("%s: failed to find securam device!\n", __func__); pr_warn("%s: failed to find securam device!\n", __func__);
goto securam_fail; goto securam_fail_no_ref_dev;
} }
sram_pool = gen_pool_get(&pdev->dev, NULL); sram_pool = gen_pool_get(&pdev->dev, NULL);
@ -620,6 +620,8 @@ static int __init at91_pm_backup_init(void)
return 0; return 0;
securam_fail: securam_fail:
put_device(&pdev->dev);
securam_fail_no_ref_dev:
iounmap(pm_data.sfrbu); iounmap(pm_data.sfrbu);
pm_data.sfrbu = NULL; pm_data.sfrbu = NULL;
return ret; return ret;

View File

@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = {
} }
}; };
static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64); static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32);
static struct iop_adma_platform_data iop13xx_adma_0_data = { static struct iop_adma_platform_data iop13xx_adma_0_data = {
.hw_id = 0, .hw_id = 0,
.pool_size = PAGE_SIZE, .pool_size = PAGE_SIZE,
@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = {
.resource = iop13xx_adma_0_resources, .resource = iop13xx_adma_0_resources,
.dev = { .dev = {
.dma_mask = &iop13xx_adma_dmamask, .dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_0_data, .platform_data = (void *) &iop13xx_adma_0_data,
}, },
}; };
@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = {
.resource = iop13xx_adma_1_resources, .resource = iop13xx_adma_1_resources,
.dev = { .dev = {
.dma_mask = &iop13xx_adma_dmamask, .dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_1_data, .platform_data = (void *) &iop13xx_adma_1_data,
}, },
}; };
@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = {
.resource = iop13xx_adma_2_resources, .resource = iop13xx_adma_2_resources,
.dev = { .dev = {
.dma_mask = &iop13xx_adma_dmamask, .dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_2_data, .platform_data = (void *) &iop13xx_adma_2_data,
}, },
}; };

View File

@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
} }
}; };
u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64); u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32);
static struct platform_device iop13xx_tpmi_0_device = { static struct platform_device iop13xx_tpmi_0_device = {
.name = "iop-tpmi", .name = "iop-tpmi",
.id = 0, .id = 0,
@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
.resource = iop13xx_tpmi_0_resources, .resource = iop13xx_tpmi_0_resources,
.dev = { .dev = {
.dma_mask = &iop13xx_tpmi_mask, .dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_BIT_MASK(64), .coherent_dma_mask = DMA_BIT_MASK(32),
}, },
}; };
@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
.resource = iop13xx_tpmi_1_resources, .resource = iop13xx_tpmi_1_resources,
.dev = { .dev = {
.dma_mask = &iop13xx_tpmi_mask, .dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_BIT_MASK(64), .coherent_dma_mask = DMA_BIT_MASK(32),
}, },
}; };
@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
.resource = iop13xx_tpmi_2_resources, .resource = iop13xx_tpmi_2_resources,
.dev = { .dev = {
.dma_mask = &iop13xx_tpmi_mask, .dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_BIT_MASK(64), .coherent_dma_mask = DMA_BIT_MASK(32),
}, },
}; };
@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
.resource = iop13xx_tpmi_3_resources, .resource = iop13xx_tpmi_3_resources,
.dev = { .dev = {
.dma_mask = &iop13xx_tpmi_mask, .dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_BIT_MASK(64), .coherent_dma_mask = DMA_BIT_MASK(32),
}, },
}; };

View File

@ -65,6 +65,7 @@ static void m10v_smp_init(unsigned int max_cpus)
writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4); writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4);
} }
#ifdef CONFIG_HOTPLUG_CPU
static void m10v_cpu_die(unsigned int l_cpu) static void m10v_cpu_die(unsigned int l_cpu)
{ {
gic_cpu_if_down(0); gic_cpu_if_down(0);
@ -83,12 +84,15 @@ static int m10v_cpu_kill(unsigned int l_cpu)
return 1; return 1;
} }
#endif
static struct smp_operations m10v_smp_ops __initdata = { static struct smp_operations m10v_smp_ops __initdata = {
.smp_prepare_cpus = m10v_smp_init, .smp_prepare_cpus = m10v_smp_init,
.smp_boot_secondary = m10v_boot_secondary, .smp_boot_secondary = m10v_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_die = m10v_cpu_die, .cpu_die = m10v_cpu_die,
.cpu_kill = m10v_cpu_kill, .cpu_kill = m10v_cpu_kill,
#endif
}; };
CPU_METHOD_OF_DECLARE(m10v_smp, "socionext,milbeaut-m10v-smp", &m10v_smp_ops); CPU_METHOD_OF_DECLARE(m10v_smp, "socionext,milbeaut-m10v-smp", &m10v_smp_ops);

View File

@ -182,6 +182,7 @@ static struct resource latch1_resources[] = {
static struct bgpio_pdata latch1_pdata = { static struct bgpio_pdata latch1_pdata = {
.label = LATCH1_LABEL, .label = LATCH1_LABEL,
.base = -1,
.ngpio = LATCH1_NGPIO, .ngpio = LATCH1_NGPIO,
}; };
@ -219,6 +220,7 @@ static struct resource latch2_resources[] = {
static struct bgpio_pdata latch2_pdata = { static struct bgpio_pdata latch2_pdata = {
.label = LATCH2_LABEL, .label = LATCH2_LABEL,
.base = -1,
.ngpio = LATCH2_NGPIO, .ngpio = LATCH2_NGPIO,
}; };

View File

@ -250,8 +250,10 @@ static int __init omapdss_init_of(void)
if (!node) if (!node)
return 0; return 0;
if (!of_device_is_available(node)) if (!of_device_is_available(node)) {
of_node_put(node);
return 0; return 0;
}
pdev = of_find_device_by_node(node); pdev = of_find_device_by_node(node);

View File

@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
.resource = iop3xx_dma_0_resources, .resource = iop3xx_dma_0_resources,
.dev = { .dev = {
.dma_mask = &iop3xx_adma_dmamask, .dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_dma_0_data, .platform_data = (void *) &iop3xx_dma_0_data,
}, },
}; };
@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
.resource = iop3xx_dma_1_resources, .resource = iop3xx_dma_1_resources,
.dev = { .dev = {
.dma_mask = &iop3xx_adma_dmamask, .dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_dma_1_data, .platform_data = (void *) &iop3xx_dma_1_data,
}, },
}; };
@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
.resource = iop3xx_aau_resources, .resource = iop3xx_aau_resources,
.dev = { .dev = {
.dma_mask = &iop3xx_adma_dmamask, .dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_aau_data, .platform_data = (void *) &iop3xx_aau_data,
}, },
}; };

View File

@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = {
.resource = orion_xor0_shared_resources, .resource = orion_xor0_shared_resources,
.dev = { .dev = {
.dma_mask = &orion_xor_dmamask, .dma_mask = &orion_xor_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &orion_xor0_pdata, .platform_data = &orion_xor0_pdata,
}, },
}; };
@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = {
.resource = orion_xor1_shared_resources, .resource = orion_xor1_shared_resources,
.dev = { .dev = {
.dma_mask = &orion_xor_dmamask, .dma_mask = &orion_xor_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &orion_xor1_pdata, .platform_data = &orion_xor1_pdata,
}, },
}; };

View File

@ -162,6 +162,7 @@
rx-fifo-depth = <16384>; rx-fifo-depth = <16384>;
snps,multicast-filter-bins = <256>; snps,multicast-filter-bins = <256>;
iommus = <&smmu 1>; iommus = <&smmu 1>;
altr,sysmgr-syscon = <&sysmgr 0x44 0>;
status = "disabled"; status = "disabled";
}; };
@ -179,6 +180,7 @@
rx-fifo-depth = <16384>; rx-fifo-depth = <16384>;
snps,multicast-filter-bins = <256>; snps,multicast-filter-bins = <256>;
iommus = <&smmu 2>; iommus = <&smmu 2>;
altr,sysmgr-syscon = <&sysmgr 0x48 0>;
status = "disabled"; status = "disabled";
}; };
@ -196,6 +198,7 @@
rx-fifo-depth = <16384>; rx-fifo-depth = <16384>;
snps,multicast-filter-bins = <256>; snps,multicast-filter-bins = <256>;
iommus = <&smmu 3>; iommus = <&smmu 3>;
altr,sysmgr-syscon = <&sysmgr 0x4c 0>;
status = "disabled"; status = "disabled";
}; };

View File

@ -108,8 +108,8 @@
snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>; snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>;
snps,reset-active-low; snps,reset-active-low;
snps,reset-delays-us = <0 10000 50000>; snps,reset-delays-us = <0 10000 50000>;
tx_delay = <0x25>; tx_delay = <0x24>;
rx_delay = <0x11>; rx_delay = <0x18>;
status = "okay"; status = "okay";
}; };

View File

@ -46,8 +46,7 @@
vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator { vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator {
compatible = "regulator-fixed"; compatible = "regulator-fixed";
enable-active-high; gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&usb20_host_drv>; pinctrl-0 = <&usb20_host_drv>;
regulator-name = "vcc_host1_5v"; regulator-name = "vcc_host1_5v";

View File

@ -1445,11 +1445,11 @@
sdmmc0 { sdmmc0 {
sdmmc0_clk: sdmmc0-clk { sdmmc0_clk: sdmmc0-clk {
rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>; rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>;
}; };
sdmmc0_cmd: sdmmc0-cmd { sdmmc0_cmd: sdmmc0-cmd {
rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>; rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>;
}; };
sdmmc0_dectn: sdmmc0-dectn { sdmmc0_dectn: sdmmc0-dectn {
@ -1461,14 +1461,14 @@
}; };
sdmmc0_bus1: sdmmc0-bus1 { sdmmc0_bus1: sdmmc0-bus1 {
rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>; rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>;
}; };
sdmmc0_bus4: sdmmc0-bus4 { sdmmc0_bus4: sdmmc0-bus4 {
rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>, rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>,
<1 RK_PA1 1 &pcfg_pull_up_4ma>, <1 RK_PA1 1 &pcfg_pull_up_8ma>,
<1 RK_PA2 1 &pcfg_pull_up_4ma>, <1 RK_PA2 1 &pcfg_pull_up_8ma>,
<1 RK_PA3 1 &pcfg_pull_up_4ma>; <1 RK_PA3 1 &pcfg_pull_up_8ma>;
}; };
sdmmc0_gpio: sdmmc0-gpio { sdmmc0_gpio: sdmmc0-gpio {
@ -1642,50 +1642,50 @@
rgmiim1_pins: rgmiim1-pins { rgmiim1_pins: rgmiim1-pins {
rockchip,pins = rockchip,pins =
/* mac_txclk */ /* mac_txclk */
<1 RK_PB4 2 &pcfg_pull_none_12ma>, <1 RK_PB4 2 &pcfg_pull_none_8ma>,
/* mac_rxclk */ /* mac_rxclk */
<1 RK_PB5 2 &pcfg_pull_none_2ma>, <1 RK_PB5 2 &pcfg_pull_none_4ma>,
/* mac_mdio */ /* mac_mdio */
<1 RK_PC3 2 &pcfg_pull_none_2ma>, <1 RK_PC3 2 &pcfg_pull_none_4ma>,
/* mac_txen */ /* mac_txen */
<1 RK_PD1 2 &pcfg_pull_none_12ma>, <1 RK_PD1 2 &pcfg_pull_none_8ma>,
/* mac_clk */ /* mac_clk */
<1 RK_PC5 2 &pcfg_pull_none_2ma>, <1 RK_PC5 2 &pcfg_pull_none_4ma>,
/* mac_rxdv */ /* mac_rxdv */
<1 RK_PC6 2 &pcfg_pull_none_2ma>, <1 RK_PC6 2 &pcfg_pull_none_4ma>,
/* mac_mdc */ /* mac_mdc */
<1 RK_PC7 2 &pcfg_pull_none_2ma>, <1 RK_PC7 2 &pcfg_pull_none_4ma>,
/* mac_rxd1 */ /* mac_rxd1 */
<1 RK_PB2 2 &pcfg_pull_none_2ma>, <1 RK_PB2 2 &pcfg_pull_none_4ma>,
/* mac_rxd0 */ /* mac_rxd0 */
<1 RK_PB3 2 &pcfg_pull_none_2ma>, <1 RK_PB3 2 &pcfg_pull_none_4ma>,
/* mac_txd1 */ /* mac_txd1 */
<1 RK_PB0 2 &pcfg_pull_none_12ma>, <1 RK_PB0 2 &pcfg_pull_none_8ma>,
/* mac_txd0 */ /* mac_txd0 */
<1 RK_PB1 2 &pcfg_pull_none_12ma>, <1 RK_PB1 2 &pcfg_pull_none_8ma>,
/* mac_rxd3 */ /* mac_rxd3 */
<1 RK_PB6 2 &pcfg_pull_none_2ma>, <1 RK_PB6 2 &pcfg_pull_none_4ma>,
/* mac_rxd2 */ /* mac_rxd2 */
<1 RK_PB7 2 &pcfg_pull_none_2ma>, <1 RK_PB7 2 &pcfg_pull_none_4ma>,
/* mac_txd3 */ /* mac_txd3 */
<1 RK_PC0 2 &pcfg_pull_none_12ma>, <1 RK_PC0 2 &pcfg_pull_none_8ma>,
/* mac_txd2 */ /* mac_txd2 */
<1 RK_PC1 2 &pcfg_pull_none_12ma>, <1 RK_PC1 2 &pcfg_pull_none_8ma>,
/* mac_txclk */ /* mac_txclk */
<0 RK_PB0 1 &pcfg_pull_none>, <0 RK_PB0 1 &pcfg_pull_none_8ma>,
/* mac_txen */ /* mac_txen */
<0 RK_PB4 1 &pcfg_pull_none>, <0 RK_PB4 1 &pcfg_pull_none_8ma>,
/* mac_clk */ /* mac_clk */
<0 RK_PD0 1 &pcfg_pull_none>, <0 RK_PD0 1 &pcfg_pull_none_4ma>,
/* mac_txd1 */ /* mac_txd1 */
<0 RK_PC0 1 &pcfg_pull_none>, <0 RK_PC0 1 &pcfg_pull_none_8ma>,
/* mac_txd0 */ /* mac_txd0 */
<0 RK_PC1 1 &pcfg_pull_none>, <0 RK_PC1 1 &pcfg_pull_none_8ma>,
/* mac_txd3 */ /* mac_txd3 */
<0 RK_PC7 1 &pcfg_pull_none>, <0 RK_PC7 1 &pcfg_pull_none_8ma>,
/* mac_txd2 */ /* mac_txd2 */
<0 RK_PC6 1 &pcfg_pull_none>; <0 RK_PC6 1 &pcfg_pull_none_8ma>;
}; };
rmiim1_pins: rmiim1-pins { rmiim1_pins: rmiim1-pins {

View File

@ -158,6 +158,7 @@
}; };
&hdmi { &hdmi {
ddc-i2c-bus = <&i2c3>;
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&hdmi_cec>; pinctrl-0 = <&hdmi_cec>;
status = "okay"; status = "okay";

View File

@ -30,8 +30,8 @@ do { \
" prfm pstl1strm, %2\n" \ " prfm pstl1strm, %2\n" \
"1: ldxr %w1, %2\n" \ "1: ldxr %w1, %2\n" \
insn "\n" \ insn "\n" \
"2: stlxr %w3, %w0, %2\n" \ "2: stlxr %w0, %w3, %2\n" \
" cbnz %w3, 1b\n" \ " cbnz %w0, 1b\n" \
" dmb ish\n" \ " dmb ish\n" \
"3:\n" \ "3:\n" \
" .pushsection .fixup,\"ax\"\n" \ " .pushsection .fixup,\"ax\"\n" \
@ -57,23 +57,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
switch (op) { switch (op) {
case FUTEX_OP_SET: case FUTEX_OP_SET:
__futex_atomic_op("mov %w0, %w4", __futex_atomic_op("mov %w3, %w4",
ret, oldval, uaddr, tmp, oparg); ret, oldval, uaddr, tmp, oparg);
break; break;
case FUTEX_OP_ADD: case FUTEX_OP_ADD:
__futex_atomic_op("add %w0, %w1, %w4", __futex_atomic_op("add %w3, %w1, %w4",
ret, oldval, uaddr, tmp, oparg); ret, oldval, uaddr, tmp, oparg);
break; break;
case FUTEX_OP_OR: case FUTEX_OP_OR:
__futex_atomic_op("orr %w0, %w1, %w4", __futex_atomic_op("orr %w3, %w1, %w4",
ret, oldval, uaddr, tmp, oparg); ret, oldval, uaddr, tmp, oparg);
break; break;
case FUTEX_OP_ANDN: case FUTEX_OP_ANDN:
__futex_atomic_op("and %w0, %w1, %w4", __futex_atomic_op("and %w3, %w1, %w4",
ret, oldval, uaddr, tmp, ~oparg); ret, oldval, uaddr, tmp, ~oparg);
break; break;
case FUTEX_OP_XOR: case FUTEX_OP_XOR:
__futex_atomic_op("eor %w0, %w1, %w4", __futex_atomic_op("eor %w3, %w1, %w4",
ret, oldval, uaddr, tmp, oparg); ret, oldval, uaddr, tmp, oparg);
break; break;
default: default:

View File

@ -73,4 +73,9 @@ static inline bool is_forbidden_offset_for_adrp(void *place)
struct plt_entry get_plt_entry(u64 dst, void *pc); struct plt_entry get_plt_entry(u64 dst, void *pc);
bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b); bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
static inline bool plt_entry_is_initialized(const struct plt_entry *e)
{
return e->adrp || e->add || e->br;
}
#endif /* __ASM_MODULE_H */ #endif /* __ASM_MODULE_H */

View File

@ -65,52 +65,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
if (n == 0) args[0] = regs->orig_x0;
return; args++;
if (i + n > SYSCALL_MAX_ARGS) { memcpy(args, &regs->regs[1], 5 * sizeof(args[0]));
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
pr_warning("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
memset(args_bad, 0, n_bad * sizeof(args[0]));
}
if (i == 0) {
args[0] = regs->orig_x0;
args++;
i++;
n--;
}
memcpy(args, &regs->regs[i], n * sizeof(args[0]));
} }
static inline void syscall_set_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args) const unsigned long *args)
{ {
if (n == 0) regs->orig_x0 = args[0];
return; args++;
if (i + n > SYSCALL_MAX_ARGS) { memcpy(&regs->regs[1], args, 5 * sizeof(args[0]));
pr_warning("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
n = SYSCALL_MAX_ARGS - i;
}
if (i == 0) {
regs->orig_x0 = args[0];
args++;
i++;
n--;
}
memcpy(&regs->regs[i], args, n * sizeof(args[0]));
} }
/* /*

View File

@ -107,8 +107,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline); trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
if (!plt_entries_equal(mod->arch.ftrace_trampoline, if (!plt_entries_equal(mod->arch.ftrace_trampoline,
&trampoline)) { &trampoline)) {
if (!plt_entries_equal(mod->arch.ftrace_trampoline, if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
&(struct plt_entry){})) {
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
return -EINVAL; return -EINVAL;
} }

View File

@ -94,6 +94,9 @@ static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
unsigned long high = low + SDEI_STACK_SIZE; unsigned long high = low + SDEI_STACK_SIZE;
if (!low)
return false;
if (sp < low || sp >= high) if (sp < low || sp >= high)
return false; return false;
@ -111,6 +114,9 @@ static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
unsigned long high = low + SDEI_STACK_SIZE; unsigned long high = low + SDEI_STACK_SIZE;
if (!low)
return false;
if (sp < low || sp >= high) if (sp < low || sp >= high)
return false; return false;

View File

@ -102,10 +102,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{ {
struct stackframe frame; struct stackframe frame;
int skip; int skip = 0;
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
if (regs) {
if (user_mode(regs))
return;
skip = 1;
}
if (!tsk) if (!tsk)
tsk = current; tsk = current;
@ -126,7 +132,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
frame.graph = 0; frame.graph = 0;
#endif #endif
skip = !!regs;
printk("Call trace:\n"); printk("Call trace:\n");
do { do {
/* skip until specified stack frame */ /* skip until specified stack frame */
@ -176,15 +181,13 @@ static int __die(const char *str, int err, struct pt_regs *regs)
return ret; return ret;
print_modules(); print_modules();
__show_regs(regs);
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
end_of_stack(tsk)); end_of_stack(tsk));
show_regs(regs);
if (!user_mode(regs)) { if (!user_mode(regs))
dump_backtrace(regs, tsk);
dump_instr(KERN_EMERG, regs); dump_instr(KERN_EMERG, regs);
}
return ret; return ret;
} }

View File

@ -46,78 +46,27 @@ static inline void syscall_set_return_value(struct task_struct *task,
} }
static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs, unsigned int i, struct pt_regs *regs,
unsigned int n, unsigned long *args) unsigned long *args)
{ {
switch (i) { *args++ = regs->a4;
case 0: *args++ = regs->b4;
if (!n--) *args++ = regs->a6;
break; *args++ = regs->b6;
*args++ = regs->a4; *args++ = regs->a8;
case 1: *args = regs->b8;
if (!n--)
break;
*args++ = regs->b4;
case 2:
if (!n--)
break;
*args++ = regs->a6;
case 3:
if (!n--)
break;
*args++ = regs->b6;
case 4:
if (!n--)
break;
*args++ = regs->a8;
case 5:
if (!n--)
break;
*args++ = regs->b8;
case 6:
if (!n--)
break;
default:
BUG();
}
} }
static inline void syscall_set_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args) const unsigned long *args)
{ {
switch (i) { regs->a4 = *args++;
case 0: regs->b4 = *args++;
if (!n--) regs->a6 = *args++;
break; regs->b6 = *args++;
regs->a4 = *args++; regs->a8 = *args++;
case 1: regs->a9 = *args;
if (!n--)
break;
regs->b4 = *args++;
case 2:
if (!n--)
break;
regs->a6 = *args++;
case 3:
if (!n--)
break;
regs->b6 = *args++;
case 4:
if (!n--)
break;
regs->a8 = *args++;
case 5:
if (!n--)
break;
regs->a9 = *args++;
case 6:
if (!n)
break;
default:
BUG();
}
} }
#endif /* __ASM_C6X_SYSCALLS_H */ #endif /* __ASM_C6X_SYSCALLS_H */

View File

@ -43,30 +43,20 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
static inline void static inline void
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
unsigned int i, unsigned int n, unsigned long *args) unsigned long *args)
{ {
BUG_ON(i + n > 6); args[0] = regs->orig_a0;
if (i == 0) { args++;
args[0] = regs->orig_a0; memcpy(args, &regs->a1, 5 * sizeof(args[0]));
args++;
i++;
n--;
}
memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
} }
static inline void static inline void
syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
unsigned int i, unsigned int n, const unsigned long *args) const unsigned long *args)
{ {
BUG_ON(i + n > 6); regs->orig_a0 = args[0];
if (i == 0) { args++;
regs->orig_a0 = args[0]; memcpy(&regs->a1, args, 5 * sizeof(regs->a1));
args++;
i++;
n--;
}
memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
} }
static inline int static inline int

View File

@ -17,34 +17,14 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
static inline void static inline void
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
unsigned int i, unsigned int n, unsigned long *args) unsigned long *args)
{ {
BUG_ON(i + n > 6); *args++ = regs->er1;
*args++ = regs->er2;
while (n > 0) { *args++ = regs->er3;
switch (i) { *args++ = regs->er4;
case 0: *args++ = regs->er5;
*args++ = regs->er1; *args = regs->er6;
break;
case 1:
*args++ = regs->er2;
break;
case 2:
*args++ = regs->er3;
break;
case 3:
*args++ = regs->er4;
break;
case 4:
*args++ = regs->er5;
break;
case 5:
*args++ = regs->er6;
break;
}
i++;
n--;
}
} }

View File

@ -37,10 +37,8 @@ static inline long syscall_get_nr(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
BUG_ON(i + n > 6); memcpy(args, &(&regs->r00)[0], 6 * sizeof(args[0]));
memcpy(args, &(&regs->r00)[i], n * sizeof(args[0]));
} }
#endif #endif

View File

@ -59,26 +59,19 @@ static inline void syscall_set_return_value(struct task_struct *task,
} }
extern void ia64_syscall_get_set_arguments(struct task_struct *task, extern void ia64_syscall_get_set_arguments(struct task_struct *task,
struct pt_regs *regs, unsigned int i, unsigned int n, struct pt_regs *regs, unsigned long *args, int rw);
unsigned long *args, int rw);
static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
BUG_ON(i + n > 6); ia64_syscall_get_set_arguments(task, regs, args, 0);
ia64_syscall_get_set_arguments(task, regs, i, n, args, 0);
} }
static inline void syscall_set_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
BUG_ON(i + n > 6); ia64_syscall_get_set_arguments(task, regs, args, 1);
ia64_syscall_get_set_arguments(task, regs, i, n, args, 1);
} }
static inline int syscall_get_arch(void) static inline int syscall_get_arch(void)

View File

@ -2179,12 +2179,11 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
} }
void ia64_syscall_get_set_arguments(struct task_struct *task, void ia64_syscall_get_set_arguments(struct task_struct *task,
struct pt_regs *regs, unsigned int i, unsigned int n, struct pt_regs *regs, unsigned long *args, int rw)
unsigned long *args, int rw)
{ {
struct syscall_get_set_args data = { struct syscall_get_set_args data = {
.i = i, .i = 0,
.n = n, .n = 6,
.args = args, .args = args,
.regs = regs, .regs = regs,
.rw = rw, .rw = rw,

View File

@ -82,18 +82,22 @@ static inline void microblaze_set_syscall_arg(struct pt_regs *regs,
static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
unsigned int i = 0;
unsigned int n = 6;
while (n--) while (n--)
*args++ = microblaze_get_syscall_arg(regs, i++); *args++ = microblaze_get_syscall_arg(regs, i++);
} }
static inline void syscall_set_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args) const unsigned long *args)
{ {
unsigned int i = 0;
unsigned int n = 6;
while (n--) while (n--)
microblaze_set_syscall_arg(regs, i++, *args++); microblaze_set_syscall_arg(regs, i++, *args++);
} }

View File

@ -1,6 +1,10 @@
# require CONFIG_CPU_MIPS32_R2=y # require CONFIG_CPU_MIPS32_R2=y
CONFIG_LEGACY_BOARD_OCELOT=y CONFIG_LEGACY_BOARD_OCELOT=y
CONFIG_FIT_IMAGE_FDT_OCELOT=y
CONFIG_BRIDGE=y
CONFIG_GENERIC_PHY=y
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CMDLINE_PARTS=y
@ -19,6 +23,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
CONFIG_NET_SWITCHDEV=y
CONFIG_NET_DSA=y
CONFIG_MSCC_OCELOT_SWITCH=y CONFIG_MSCC_OCELOT_SWITCH=y
CONFIG_MSCC_OCELOT_SWITCH_OCELOT=y CONFIG_MSCC_OCELOT_SWITCH_OCELOT=y
CONFIG_MDIO_MSCC_MIIM=y CONFIG_MDIO_MSCC_MIIM=y
@ -35,6 +41,8 @@ CONFIG_SPI_DESIGNWARE=y
CONFIG_SPI_DW_MMIO=y CONFIG_SPI_DW_MMIO=y
CONFIG_SPI_SPIDEV=y CONFIG_SPI_SPIDEV=y
CONFIG_PINCTRL_OCELOT=y
CONFIG_GPIO_SYSFS=y CONFIG_GPIO_SYSFS=y
CONFIG_POWER_RESET=y CONFIG_POWER_RESET=y

View File

@ -116,9 +116,10 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
unsigned int i = 0;
unsigned int n = 6;
int ret; int ret;
/* O32 ABI syscall() */ /* O32 ABI syscall() */

View File

@ -33,6 +33,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/sigcontext.h> #include <asm/sigcontext.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/irq_regs.h>
static struct hard_trap_info { static struct hard_trap_info {
unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */ unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
@ -214,7 +215,7 @@ void kgdb_call_nmi_hook(void *ignored)
old_fs = get_fs(); old_fs = get_fs();
set_fs(KERNEL_DS); set_fs(KERNEL_DS);
kgdb_nmicallback(raw_smp_processor_id(), NULL); kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
set_fs(old_fs); set_fs(old_fs);
} }

View File

@ -1419,7 +1419,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
sd.nr = syscall; sd.nr = syscall;
sd.arch = syscall_get_arch(); sd.arch = syscall_get_arch();
syscall_get_arguments(current, regs, 0, 6, args); syscall_get_arguments(current, regs, args);
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++)
sd.args[i] = args[i]; sd.args[i] = args[i];
sd.instruction_pointer = KSTK_EIP(current); sd.instruction_pointer = KSTK_EIP(current);

View File

@ -118,7 +118,6 @@ static void shutdown_bridge_irq(struct irq_data *d)
{ {
struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
struct bridge_controller *bc; struct bridge_controller *bc;
int pin = hd->pin;
if (!hd) if (!hd)
return; return;
@ -126,7 +125,7 @@ static void shutdown_bridge_irq(struct irq_data *d)
disable_hub_irq(d); disable_hub_irq(d);
bc = hd->bc; bc = hd->bc;
bridge_clr(bc, b_int_enable, (1 << pin)); bridge_clr(bc, b_int_enable, (1 << hd->pin));
bridge_read(bc, b_wid_tflush); bridge_read(bc, b_wid_tflush);
} }

View File

@ -108,81 +108,41 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
* syscall_get_arguments - extract system call parameter values * syscall_get_arguments - extract system call parameter values
* @task: task of interest, must be blocked * @task: task of interest, must be blocked
* @regs: task_pt_regs() of @task * @regs: task_pt_regs() of @task
* @i: argument index [0,5]
* @n: number of arguments; n+i must be [1,6].
* @args: array filled with argument values * @args: array filled with argument values
* *
* Fetches @n arguments to the system call starting with the @i'th argument * Fetches 6 arguments to the system call (from 0 through 5). The first
* (from 0 through 5). Argument @i is stored in @args[0], and so on. * argument is stored in @args[0], and so on.
* An arch inline version is probably optimal when @i and @n are constants.
* *
* It's only valid to call this when @task is stopped for tracing on * It's only valid to call this when @task is stopped for tracing on
* entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
* It's invalid to call this with @i + @n > 6; we only support system calls
* taking up to 6 arguments.
*/ */
#define SYSCALL_MAX_ARGS 6 #define SYSCALL_MAX_ARGS 6
void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
unsigned int i, unsigned int n, unsigned long *args) unsigned long *args)
{ {
if (n == 0) args[0] = regs->orig_r0;
return; args++;
if (i + n > SYSCALL_MAX_ARGS) { memcpy(args, &regs->uregs[0] + 1, 5 * sizeof(args[0]));
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
pr_warning("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
memset(args_bad, 0, n_bad * sizeof(args[0]));
memset(args_bad, 0, n_bad * sizeof(args[0]));
}
if (i == 0) {
args[0] = regs->orig_r0;
args++;
i++;
n--;
}
memcpy(args, &regs->uregs[0] + i, n * sizeof(args[0]));
} }
/** /**
* syscall_set_arguments - change system call parameter value * syscall_set_arguments - change system call parameter value
* @task: task of interest, must be in system call entry tracing * @task: task of interest, must be in system call entry tracing
* @regs: task_pt_regs() of @task * @regs: task_pt_regs() of @task
* @i: argument index [0,5]
* @n: number of arguments; n+i must be [1,6].
* @args: array of argument values to store * @args: array of argument values to store
* *
* Changes @n arguments to the system call starting with the @i'th argument. * Changes 6 arguments to the system call. The first argument gets value
* Argument @i gets value @args[0], and so on. * @args[0], and so on.
* An arch inline version is probably optimal when @i and @n are constants.
* *
* It's only valid to call this when @task is stopped for tracing on * It's only valid to call this when @task is stopped for tracing on
* entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
* It's invalid to call this with @i + @n > 6; we only support system calls
* taking up to 6 arguments.
*/ */
void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args) const unsigned long *args)
{ {
if (n == 0) regs->orig_r0 = args[0];
return; args++;
if (i + n > SYSCALL_MAX_ARGS) { memcpy(&regs->uregs[0] + 1, args, 5 * sizeof(args[0]));
pr_warn("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
n = SYSCALL_MAX_ARGS - i;
}
if (i == 0) {
regs->orig_r0 = args[0];
args++;
i++;
n--;
}
memcpy(&regs->uregs[0] + i, args, n * sizeof(args[0]));
} }
#endif /* _ASM_NDS32_SYSCALL_H */ #endif /* _ASM_NDS32_SYSCALL_H */

View File

@ -58,81 +58,25 @@ static inline void syscall_set_return_value(struct task_struct *task,
} }
static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs, unsigned int i, unsigned int n, struct pt_regs *regs, unsigned long *args)
unsigned long *args)
{ {
BUG_ON(i + n > 6); *args++ = regs->r4;
*args++ = regs->r5;
switch (i) { *args++ = regs->r6;
case 0: *args++ = regs->r7;
if (!n--) *args++ = regs->r8;
break; *args = regs->r9;
*args++ = regs->r4;
case 1:
if (!n--)
break;
*args++ = regs->r5;
case 2:
if (!n--)
break;
*args++ = regs->r6;
case 3:
if (!n--)
break;
*args++ = regs->r7;
case 4:
if (!n--)
break;
*args++ = regs->r8;
case 5:
if (!n--)
break;
*args++ = regs->r9;
case 6:
if (!n--)
break;
default:
BUG();
}
} }
static inline void syscall_set_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs, unsigned int i, unsigned int n, struct pt_regs *regs, const unsigned long *args)
const unsigned long *args)
{ {
BUG_ON(i + n > 6); regs->r4 = *args++;
regs->r5 = *args++;
switch (i) { regs->r6 = *args++;
case 0: regs->r7 = *args++;
if (!n--) regs->r8 = *args++;
break; regs->r9 = *args;
regs->r4 = *args++;
case 1:
if (!n--)
break;
regs->r5 = *args++;
case 2:
if (!n--)
break;
regs->r6 = *args++;
case 3:
if (!n--)
break;
regs->r7 = *args++;
case 4:
if (!n--)
break;
regs->r8 = *args++;
case 5:
if (!n--)
break;
regs->r9 = *args++;
case 6:
if (!n)
break;
default:
BUG();
}
} }
#endif #endif

View File

@ -56,20 +56,16 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
static inline void static inline void
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
unsigned int i, unsigned int n, unsigned long *args) unsigned long *args)
{ {
BUG_ON(i + n > 6); memcpy(args, &regs->gpr[3], 6 * sizeof(args[0]));
memcpy(args, &regs->gpr[3 + i], n * sizeof(args[0]));
} }
static inline void static inline void
syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
unsigned int i, unsigned int n, const unsigned long *args) const unsigned long *args)
{ {
BUG_ON(i + n > 6); memcpy(&regs->gpr[3], args, 6 * sizeof(args[0]));
memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
} }
static inline int syscall_get_arch(void) static inline int syscall_get_arch(void)

View File

@ -22,13 +22,14 @@ unsigned long profile_pc(struct pt_regs *);
static inline unsigned long regs_return_value(struct pt_regs *regs) static inline unsigned long regs_return_value(struct pt_regs *regs)
{ {
return regs->gr[20]; return regs->gr[28];
} }
static inline void instruction_pointer_set(struct pt_regs *regs, static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val) unsigned long val)
{ {
regs->iaoq[0] = val; regs->iaoq[0] = val;
regs->iaoq[1] = val + 4;
} }
/* Query offset/name of register from its name/offset */ /* Query offset/name of register from its name/offset */

View File

@ -18,29 +18,15 @@ static inline long syscall_get_nr(struct task_struct *tsk,
} }
static inline void syscall_get_arguments(struct task_struct *tsk, static inline void syscall_get_arguments(struct task_struct *tsk,
struct pt_regs *regs, unsigned int i, struct pt_regs *regs,
unsigned int n, unsigned long *args) unsigned long *args)
{ {
BUG_ON(i); args[5] = regs->gr[21];
args[4] = regs->gr[22];
switch (n) { args[3] = regs->gr[23];
case 6: args[2] = regs->gr[24];
args[5] = regs->gr[21]; args[1] = regs->gr[25];
case 5: args[0] = regs->gr[26];
args[4] = regs->gr[22];
case 4:
args[3] = regs->gr[23];
case 3:
args[2] = regs->gr[24];
case 2:
args[1] = regs->gr[25];
case 1:
args[0] = regs->gr[26];
case 0:
break;
default:
BUG();
}
} }
static inline long syscall_get_return_value(struct task_struct *task, static inline long syscall_get_return_value(struct task_struct *task,

View File

@ -210,12 +210,6 @@ void __cpuidle arch_cpu_idle(void)
static int __init parisc_idle_init(void) static int __init parisc_idle_init(void)
{ {
const char *marker;
/* check QEMU/SeaBIOS marker in PAGE0 */
marker = (char *) &PAGE0->pad0;
running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
if (!running_on_qemu) if (!running_on_qemu)
cpu_idle_poll_ctrl(1); cpu_idle_poll_ctrl(1);

View File

@ -397,6 +397,9 @@ void __init start_parisc(void)
int ret, cpunum; int ret, cpunum;
struct pdc_coproc_cfg coproc_cfg; struct pdc_coproc_cfg coproc_cfg;
/* check QEMU/SeaBIOS marker in PAGE0 */
running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
cpunum = smp_processor_id(); cpunum = smp_processor_id();
init_cpu_topology(); init_cpu_topology();

View File

@ -352,7 +352,7 @@ static inline bool strict_kernel_rwx_enabled(void)
#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \ #if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \
defined (CONFIG_PPC_64K_PAGES) defined (CONFIG_PPC_64K_PAGES)
#define MAX_PHYSMEM_BITS 51 #define MAX_PHYSMEM_BITS 51
#elif defined(CONFIG_SPARSEMEM) #elif defined(CONFIG_PPC64)
#define MAX_PHYSMEM_BITS 46 #define MAX_PHYSMEM_BITS 46
#endif #endif

View File

@ -65,22 +65,20 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
unsigned long val, mask = -1UL; unsigned long val, mask = -1UL;
unsigned int n = 6;
BUG_ON(i + n > 6);
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
if (test_tsk_thread_flag(task, TIF_32BIT)) if (test_tsk_thread_flag(task, TIF_32BIT))
mask = 0xffffffff; mask = 0xffffffff;
#endif #endif
while (n--) { while (n--) {
if (n == 0 && i == 0) if (n == 0)
val = regs->orig_gpr3; val = regs->orig_gpr3;
else else
val = regs->gpr[3 + i + n]; val = regs->gpr[3 + n];
args[n] = val & mask; args[n] = val & mask;
} }
@ -88,15 +86,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
static inline void syscall_set_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args) const unsigned long *args)
{ {
BUG_ON(i + n > 6); memcpy(&regs->gpr[3], args, 6 * sizeof(args[0]));
memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
/* Also copy the first argument into orig_gpr3 */ /* Also copy the first argument into orig_gpr3 */
if (i == 0 && n > 0) regs->orig_gpr3 = args[0];
regs->orig_gpr3 = args[0];
} }
static inline int syscall_get_arch(void) static inline int syscall_get_arch(void)

View File

@ -656,11 +656,17 @@ EXC_COMMON_BEGIN(data_access_slb_common)
ld r4,PACA_EXSLB+EX_DAR(r13) ld r4,PACA_EXSLB+EX_DAR(r13)
std r4,_DAR(r1) std r4,_DAR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
BEGIN_MMU_FTR_SECTION
/* HPT case, do SLB fault */
bl do_slb_fault bl do_slb_fault
cmpdi r3,0 cmpdi r3,0
bne- 1f bne- 1f
b fast_exception_return b fast_exception_return
1: /* Error case */ 1: /* Error case */
MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
li r3,-EFAULT
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1) std r3,RESULT(r1)
bl save_nvgprs bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11) RECONCILE_IRQ_STATE(r10, r11)
@ -705,11 +711,17 @@ EXC_COMMON_BEGIN(instruction_access_slb_common)
EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB) EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB)
ld r4,_NIP(r1) ld r4,_NIP(r1)
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
BEGIN_MMU_FTR_SECTION
/* HPT case, do SLB fault */
bl do_slb_fault bl do_slb_fault
cmpdi r3,0 cmpdi r3,0
bne- 1f bne- 1f
b fast_exception_return b fast_exception_return
1: /* Error case */ 1: /* Error case */
MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
li r3,-EFAULT
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1) std r3,RESULT(r1)
bl save_nvgprs bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11) RECONCILE_IRQ_STATE(r10, r11)

View File

@ -851,10 +851,6 @@ __secondary_start:
tophys(r4,r2) tophys(r4,r2)
addi r4,r4,THREAD /* phys address of our thread_struct */ addi r4,r4,THREAD /* phys address of our thread_struct */
mtspr SPRN_SPRG_THREAD,r4 mtspr SPRN_SPRG_THREAD,r4
#ifdef CONFIG_PPC_RTAS
li r3,0
stw r3, RTAS_SP(r4) /* 0 => not in RTAS */
#endif
lis r4, (swapper_pg_dir - PAGE_OFFSET)@h lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
mtspr SPRN_SPRG_PGDIR, r4 mtspr SPRN_SPRG_PGDIR, r4
@ -941,10 +937,6 @@ start_here:
tophys(r4,r2) tophys(r4,r2)
addi r4,r4,THREAD /* init task's THREAD */ addi r4,r4,THREAD /* init task's THREAD */
mtspr SPRN_SPRG_THREAD,r4 mtspr SPRN_SPRG_THREAD,r4
#ifdef CONFIG_PPC_RTAS
li r3,0
stw r3, RTAS_SP(r4) /* 0 => not in RTAS */
#endif
lis r4, (swapper_pg_dir - PAGE_OFFSET)@h lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
mtspr SPRN_SPRG_PGDIR, r4 mtspr SPRN_SPRG_PGDIR, r4

View File

@ -22,6 +22,7 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/kvm_para.h> #include <linux/kvm_para.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/of.h> #include <linux/of.h>
@ -712,6 +713,12 @@ static void kvm_use_magic_page(void)
static __init void kvm_free_tmp(void) static __init void kvm_free_tmp(void)
{ {
/*
* Inform kmemleak about the hole in the .bss section since the
* corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
*/
kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
free_reserved_area(&kvm_tmp[kvm_tmp_index], free_reserved_area(&kvm_tmp[kvm_tmp_index],
&kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL); &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
} }

View File

@ -98,7 +98,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
* can be used, r7 contains NSEC_PER_SEC. * can be used, r7 contains NSEC_PER_SEC.
*/ */
lwz r5,WTOM_CLOCK_SEC(r9) lwz r5,(WTOM_CLOCK_SEC+LOPART)(r9)
lwz r6,WTOM_CLOCK_NSEC(r9) lwz r6,WTOM_CLOCK_NSEC(r9)
/* We now have our offset in r5,r6. We create a fake dependency /* We now have our offset in r5,r6. We create a fake dependency

View File

@ -0,0 +1,84 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
CONFIG_CGROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_CGROUP_BPF=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_BPF_SYSCALL=y
CONFIG_ARCH_RV32I=y
CONFIG_SMP=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NETLINK_DIAG=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCIE_XILINX=y
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
CONFIG_NETDEVICES=y
CONFIG_VIRTIO_NET=y
CONFIG_MACB=y
CONFIG_E1000E=y
CONFIG_R8169=y
CONFIG_MICROSEMI_PHY=y
CONFIG_INPUT_MOUSEDEV=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
CONFIG_HVC_RISCV_SBI=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_PLATFORM=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
CONFIG_VIRTIO_MMIO=y
CONFIG_SIFIVE_PLIC=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_AUTOFS4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_NFS_V4_1=y
CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_PRINTK_TIME=y
# CONFIG_RCU_TRACE is not set

View File

@ -26,7 +26,7 @@ enum fixed_addresses {
}; };
#define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE) #define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE)
#define FIXADDR_TOP (PAGE_OFFSET) #define FIXADDR_TOP (VMALLOC_START)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#define FIXMAP_PAGE_IO PAGE_KERNEL #define FIXMAP_PAGE_IO PAGE_KERNEL

View File

@ -72,32 +72,20 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
BUG_ON(i + n > 6); args[0] = regs->orig_a0;
if (i == 0) { args++;
args[0] = regs->orig_a0; memcpy(args, &regs->a1, 5 * sizeof(args[0]));
args++;
i++;
n--;
}
memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
} }
static inline void syscall_set_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args) const unsigned long *args)
{ {
BUG_ON(i + n > 6); regs->orig_a0 = args[0];
if (i == 0) { args++;
regs->orig_a0 = args[0]; memcpy(&regs->a1, args, 5 * sizeof(regs->a1));
args++;
i++;
n--;
}
memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
} }
static inline int syscall_get_arch(void) static inline int syscall_get_arch(void)

View File

@ -300,7 +300,7 @@ do { \
" .balign 4\n" \ " .balign 4\n" \
"4:\n" \ "4:\n" \
" li %0, %6\n" \ " li %0, %6\n" \
" jump 2b, %1\n" \ " jump 3b, %1\n" \
" .previous\n" \ " .previous\n" \
" .section __ex_table,\"a\"\n" \ " .section __ex_table,\"a\"\n" \
" .balign " RISCV_SZPTR "\n" \ " .balign " RISCV_SZPTR "\n" \

View File

@ -4,7 +4,6 @@
ifdef CONFIG_FTRACE ifdef CONFIG_FTRACE
CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_setup.o = -pg
endif endif
extra-y += head.o extra-y += head.o
@ -29,8 +28,6 @@ obj-y += vdso.o
obj-y += cacheinfo.o obj-y += cacheinfo.o
obj-y += vdso/ obj-y += vdso/
CFLAGS_setup.o := -mcmodel=medany
obj-$(CONFIG_FPU) += fpu.o obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_SMP) += smpboot.o obj-$(CONFIG_SMP) += smpboot.o
obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += smp.o

View File

@ -141,7 +141,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
{ {
s32 hi20; s32 hi20;
if (IS_ENABLED(CMODEL_MEDLOW)) { if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) {
pr_err( pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
me->name, (long long)v, location); me->name, (long long)v, location);

View File

@ -48,14 +48,6 @@ struct screen_info screen_info = {
}; };
#endif #endif
unsigned long va_pa_offset;
EXPORT_SYMBOL(va_pa_offset);
unsigned long pfn_base;
EXPORT_SYMBOL(pfn_base);
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
/* The lucky hart to first increment this variable will boot the other cores */ /* The lucky hart to first increment this variable will boot the other cores */
atomic_t hart_lottery; atomic_t hart_lottery;
unsigned long boot_cpu_hartid; unsigned long boot_cpu_hartid;

View File

@ -1,3 +1,9 @@
CFLAGS_init.o := -mcmodel=medany
ifdef CONFIG_FTRACE
CFLAGS_REMOVE_init.o = -pg
endif
obj-y += init.o obj-y += init.o
obj-y += fault.o obj-y += fault.o
obj-y += extable.o obj-y += extable.o

View File

@ -25,6 +25,10 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/io.h> #include <asm/io.h>
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
static void __init zone_sizes_init(void) static void __init zone_sizes_init(void)
{ {
unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
@ -117,6 +121,14 @@ void __init setup_bootmem(void)
*/ */
memblock_reserve(reg->base, vmlinux_end - reg->base); memblock_reserve(reg->base, vmlinux_end - reg->base);
mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET); mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
/*
* Remove memblock from the end of usable area to the
* end of region
*/
if (reg->base + mem_size < end)
memblock_remove(reg->base + mem_size,
end - reg->base - mem_size);
} }
} }
BUG_ON(mem_size == 0); BUG_ON(mem_size == 0);
@ -143,6 +155,11 @@ void __init setup_bootmem(void)
} }
} }
unsigned long va_pa_offset;
EXPORT_SYMBOL(va_pa_offset);
unsigned long pfn_base;
EXPORT_SYMBOL(pfn_base);
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
@ -172,6 +189,25 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
} }
} }
/*
* setup_vm() is called from head.S with MMU-off.
*
* Following requirements should be honoured for setup_vm() to work
* correctly:
* 1) It should use PC-relative addressing for accessing kernel symbols.
* To achieve this we always use GCC cmodel=medany.
* 2) The compiler instrumentation for FTRACE will not work for setup_vm()
* so disable compiler instrumentation when FTRACE is enabled.
*
* Currently, the above requirements are honoured by using custom CFLAGS
* for init.o in mm/Makefile.
*/
#ifndef __riscv_cmodel_medany
#error "setup_vm() is called from head.S before relocate so it should "
"not use absolute addressing."
#endif
asmlinkage void __init setup_vm(void) asmlinkage void __init setup_vm(void)
{ {
extern char _start; extern char _start;

View File

@ -25,7 +25,7 @@ static void *mem_detect_alloc_extended(void)
{ {
unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64)); unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
if (IS_ENABLED(BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE && if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
INITRD_START < offset + ENTRIES_EXTENDED_MAX) INITRD_START < offset + ENTRIES_EXTENDED_MAX)
offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64)); offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));

View File

@ -56,40 +56,32 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
unsigned long mask = -1UL; unsigned long mask = -1UL;
unsigned int n = 6;
/*
* No arguments for this syscall, there's nothing to do.
*/
if (!n)
return;
BUG_ON(i + n > 6);
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
if (test_tsk_thread_flag(task, TIF_31BIT)) if (test_tsk_thread_flag(task, TIF_31BIT))
mask = 0xffffffff; mask = 0xffffffff;
#endif #endif
while (n-- > 0) while (n-- > 0)
if (i + n > 0) if (n > 0)
args[n] = regs->gprs[2 + i + n] & mask; args[n] = regs->gprs[2 + n] & mask;
if (i == 0)
args[0] = regs->orig_gpr2 & mask; args[0] = regs->orig_gpr2 & mask;
} }
static inline void syscall_set_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args) const unsigned long *args)
{ {
BUG_ON(i + n > 6); unsigned int n = 6;
while (n-- > 0) while (n-- > 0)
if (i + n > 0) if (n > 0)
regs->gprs[2 + i + n] = args[n]; regs->gprs[2 + n] = args[n];
if (i == 0) regs->orig_gpr2 = args[0];
regs->orig_gpr2 = args[0];
} }
static inline int syscall_get_arch(void) static inline int syscall_get_arch(void)

View File

@ -23,7 +23,7 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
if (flags & KERNEL_FPC) if (flags & KERNEL_FPC)
/* Save floating point control */ /* Save floating point control */
asm volatile("stfpc %0" : "=m" (state->fpc)); asm volatile("stfpc %0" : "=Q" (state->fpc));
if (!MACHINE_HAS_VX) { if (!MACHINE_HAS_VX) {
if (flags & KERNEL_VXR_V0V7) { if (flags & KERNEL_VXR_V0V7) {

View File

@ -37,7 +37,7 @@ static inline u64 get_vtimer(void)
{ {
u64 timer; u64 timer;
asm volatile("stpt %0" : "=m" (timer)); asm volatile("stpt %0" : "=Q" (timer));
return timer; return timer;
} }
@ -48,7 +48,7 @@ static inline void set_vtimer(u64 expires)
asm volatile( asm volatile(
" stpt %0\n" /* Store current cpu timer value */ " stpt %0\n" /* Store current cpu timer value */
" spt %1" /* Set new value imm. afterwards */ " spt %1" /* Set new value imm. afterwards */
: "=m" (timer) : "m" (expires)); : "=Q" (timer) : "Q" (expires));
S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
S390_lowcore.last_update_timer = expires; S390_lowcore.last_update_timer = expires;
} }
@ -135,8 +135,8 @@ static int do_account_vtime(struct task_struct *tsk)
#else #else
" stck %1" /* Store current tod clock value */ " stck %1" /* Store current tod clock value */
#endif #endif
: "=m" (S390_lowcore.last_update_timer), : "=Q" (S390_lowcore.last_update_timer),
"=m" (S390_lowcore.last_update_clock)); "=Q" (S390_lowcore.last_update_clock));
clock = S390_lowcore.last_update_clock - clock; clock = S390_lowcore.last_update_clock - clock;
timer -= S390_lowcore.last_update_timer; timer -= S390_lowcore.last_update_timer;

View File

@ -164,10 +164,10 @@ static struct sh_machine_vector __initmv sh_of_generic_mv = {
struct sh_clk_ops; struct sh_clk_ops;
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{ {
} }
void __init plat_irq_setup(void) void __init __weak plat_irq_setup(void)
{ {
} }

View File

@ -48,51 +48,28 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
/*
* Do this simply for now. If we need to start supporting
* fetching arguments from arbitrary indices, this will need some
* extra logic. Presently there are no in-tree users that depend
* on this behaviour.
*/
BUG_ON(i);
/* Argument pattern is: R4, R5, R6, R7, R0, R1 */ /* Argument pattern is: R4, R5, R6, R7, R0, R1 */
switch (n) { args[5] = regs->regs[1];
case 6: args[5] = regs->regs[1]; args[4] = regs->regs[0];
case 5: args[4] = regs->regs[0]; args[3] = regs->regs[7];
case 4: args[3] = regs->regs[7]; args[2] = regs->regs[6];
case 3: args[2] = regs->regs[6]; args[1] = regs->regs[5];
case 2: args[1] = regs->regs[5]; args[0] = regs->regs[4];
case 1: args[0] = regs->regs[4];
case 0:
break;
default:
BUG();
}
} }
static inline void syscall_set_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args) const unsigned long *args)
{ {
/* Same note as above applies */ regs->regs[1] = args[5];
BUG_ON(i); regs->regs[0] = args[4];
regs->regs[7] = args[3];
switch (n) { regs->regs[6] = args[2];
case 6: regs->regs[1] = args[5]; regs->regs[5] = args[1];
case 5: regs->regs[0] = args[4]; regs->regs[4] = args[0];
case 4: regs->regs[7] = args[3];
case 3: regs->regs[6] = args[2];
case 2: regs->regs[5] = args[1];
case 1: regs->regs[4] = args[0];
break;
default:
BUG();
}
} }
static inline int syscall_get_arch(void) static inline int syscall_get_arch(void)

View File

@ -47,20 +47,16 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
BUG_ON(i + n > 6); memcpy(args, &regs->regs[2], 6 * sizeof(args[0]));
memcpy(args, &regs->regs[2 + i], n * sizeof(args[0]));
} }
static inline void syscall_set_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args) const unsigned long *args)
{ {
BUG_ON(i + n > 6); memcpy(&regs->regs[2], args, 6 * sizeof(args[0]));
memcpy(&regs->regs[2 + i], args, n * sizeof(args[0]));
} }
static inline int syscall_get_arch(void) static inline int syscall_get_arch(void)

View File

@ -96,11 +96,11 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
int zero_extend = 0; int zero_extend = 0;
unsigned int j; unsigned int j;
unsigned int n = 6;
#ifdef CONFIG_SPARC64 #ifdef CONFIG_SPARC64
if (test_tsk_thread_flag(task, TIF_32BIT)) if (test_tsk_thread_flag(task, TIF_32BIT))
@ -108,7 +108,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
#endif #endif
for (j = 0; j < n; j++) { for (j = 0; j < n; j++) {
unsigned long val = regs->u_regs[UREG_I0 + i + j]; unsigned long val = regs->u_regs[UREG_I0 + j];
if (zero_extend) if (zero_extend)
args[j] = (u32) val; args[j] = (u32) val;
@ -119,13 +119,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
static inline void syscall_set_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args) const unsigned long *args)
{ {
unsigned int j; unsigned int i;
for (j = 0; j < n; j++) for (i = 0; i < 6; i++)
regs->u_regs[UREG_I0 + i + j] = args[j]; regs->u_regs[UREG_I0 + i] = args[i];
} }
static inline int syscall_get_arch(void) static inline int syscall_get_arch(void)

View File

@ -73,6 +73,11 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
p->npages = 0; p->npages = 0;
} }
static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
{
return iommu->atu && mask > DMA_BIT_MASK(32);
}
/* Interrupts must be disabled. */ /* Interrupts must be disabled. */
static long iommu_batch_flush(struct iommu_batch *p, u64 mask) static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
{ {
@ -92,7 +97,7 @@ static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
while (npages != 0) { while (npages != 0) {
if (mask <= DMA_BIT_MASK(32) || !pbm->iommu->atu) { if (!iommu_use_atu(pbm->iommu, mask)) {
num = pci_sun4v_iommu_map(devhandle, num = pci_sun4v_iommu_map(devhandle,
HV_PCI_TSBID(0, entry), HV_PCI_TSBID(0, entry),
npages, npages,
@ -179,7 +184,6 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
unsigned long flags, order, first_page, npages, n; unsigned long flags, order, first_page, npages, n;
unsigned long prot = 0; unsigned long prot = 0;
struct iommu *iommu; struct iommu *iommu;
struct atu *atu;
struct iommu_map_table *tbl; struct iommu_map_table *tbl;
struct page *page; struct page *page;
void *ret; void *ret;
@ -205,13 +209,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
memset((char *)first_page, 0, PAGE_SIZE << order); memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu; iommu = dev->archdata.iommu;
atu = iommu->atu;
mask = dev->coherent_dma_mask; mask = dev->coherent_dma_mask;
if (mask <= DMA_BIT_MASK(32) || !atu) if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl; tbl = &iommu->tbl;
else else
tbl = &atu->tbl; tbl = &iommu->atu->tbl;
entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
(unsigned long)(-1), 0); (unsigned long)(-1), 0);
@ -333,7 +335,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
atu = iommu->atu; atu = iommu->atu;
devhandle = pbm->devhandle; devhandle = pbm->devhandle;
if (dvma <= DMA_BIT_MASK(32)) { if (!iommu_use_atu(iommu, dvma)) {
tbl = &iommu->tbl; tbl = &iommu->tbl;
iotsb_num = 0; /* we don't care for legacy iommu */ iotsb_num = 0; /* we don't care for legacy iommu */
} else { } else {
@ -374,7 +376,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
npages >>= IO_PAGE_SHIFT; npages >>= IO_PAGE_SHIFT;
mask = *dev->dma_mask; mask = *dev->dma_mask;
if (mask <= DMA_BIT_MASK(32)) if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl; tbl = &iommu->tbl;
else else
tbl = &atu->tbl; tbl = &atu->tbl;
@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT; IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
mask = *dev->dma_mask; mask = *dev->dma_mask;
if (mask <= DMA_BIT_MASK(32)) if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl; tbl = &iommu->tbl;
else else
tbl = &atu->tbl; tbl = &atu->tbl;

View File

@ -53,84 +53,30 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
const struct uml_pt_regs *r = &regs->regs; const struct uml_pt_regs *r = &regs->regs;
switch (i) { *args++ = UPT_SYSCALL_ARG1(r);
case 0: *args++ = UPT_SYSCALL_ARG2(r);
if (!n--) *args++ = UPT_SYSCALL_ARG3(r);
break; *args++ = UPT_SYSCALL_ARG4(r);
*args++ = UPT_SYSCALL_ARG1(r); *args++ = UPT_SYSCALL_ARG5(r);
case 1: *args = UPT_SYSCALL_ARG6(r);
if (!n--)
break;
*args++ = UPT_SYSCALL_ARG2(r);
case 2:
if (!n--)
break;
*args++ = UPT_SYSCALL_ARG3(r);
case 3:
if (!n--)
break;
*args++ = UPT_SYSCALL_ARG4(r);
case 4:
if (!n--)
break;
*args++ = UPT_SYSCALL_ARG5(r);
case 5:
if (!n--)
break;
*args++ = UPT_SYSCALL_ARG6(r);
case 6:
if (!n--)
break;
default:
BUG();
break;
}
} }
static inline void syscall_set_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args) const unsigned long *args)
{ {
struct uml_pt_regs *r = &regs->regs; struct uml_pt_regs *r = &regs->regs;
switch (i) { UPT_SYSCALL_ARG1(r) = *args++;
case 0: UPT_SYSCALL_ARG2(r) = *args++;
if (!n--) UPT_SYSCALL_ARG3(r) = *args++;
break; UPT_SYSCALL_ARG4(r) = *args++;
UPT_SYSCALL_ARG1(r) = *args++; UPT_SYSCALL_ARG5(r) = *args++;
case 1: UPT_SYSCALL_ARG6(r) = *args;
if (!n--)
break;
UPT_SYSCALL_ARG2(r) = *args++;
case 2:
if (!n--)
break;
UPT_SYSCALL_ARG3(r) = *args++;
case 3:
if (!n--)
break;
UPT_SYSCALL_ARG4(r) = *args++;
case 4:
if (!n--)
break;
UPT_SYSCALL_ARG5(r) = *args++;
case 5:
if (!n--)
break;
UPT_SYSCALL_ARG6(r) = *args++;
case 6:
if (!n--)
break;
default:
BUG();
break;
}
} }
/* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */ /* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */

View File

@ -1499,7 +1499,7 @@ config X86_CPA_STATISTICS
depends on DEBUG_FS depends on DEBUG_FS
---help--- ---help---
Expose statistics about the Change Page Attribute mechanims, which Expose statistics about the Change Page Attribute mechanims, which
helps to determine the effectivness of preserving large and huge helps to determine the effectiveness of preserving large and huge
page mappings when mapping protections are changed. page mappings when mapping protections are changed.
config ARCH_HAS_MEM_ENCRYPT config ARCH_HAS_MEM_ENCRYPT

View File

@ -323,6 +323,12 @@ ENTRY(poly1305_4block_avx2)
vpaddq t2,t1,t1 vpaddq t2,t1,t1
vmovq t1x,d4 vmovq t1x,d4
# Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
# h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
# amount. Careful: we must not assume the carry bits 'd0 >> 26',
# 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
# integers. It's true in a single-block implementation, but not here.
# d1 += d0 >> 26 # d1 += d0 >> 26
mov d0,%rax mov d0,%rax
shr $26,%rax shr $26,%rax
@ -361,16 +367,16 @@ ENTRY(poly1305_4block_avx2)
# h0 += (d4 >> 26) * 5 # h0 += (d4 >> 26) * 5
mov d4,%rax mov d4,%rax
shr $26,%rax shr $26,%rax
lea (%eax,%eax,4),%eax lea (%rax,%rax,4),%rax
add %eax,%ebx add %rax,%rbx
# h4 = d4 & 0x3ffffff # h4 = d4 & 0x3ffffff
mov d4,%rax mov d4,%rax
and $0x3ffffff,%eax and $0x3ffffff,%eax
mov %eax,h4 mov %eax,h4
# h1 += h0 >> 26 # h1 += h0 >> 26
mov %ebx,%eax mov %rbx,%rax
shr $26,%eax shr $26,%rax
add %eax,h1 add %eax,h1
# h0 = h0 & 0x3ffffff # h0 = h0 & 0x3ffffff
andl $0x3ffffff,%ebx andl $0x3ffffff,%ebx

View File

@ -253,16 +253,16 @@ ENTRY(poly1305_block_sse2)
# h0 += (d4 >> 26) * 5 # h0 += (d4 >> 26) * 5
mov d4,%rax mov d4,%rax
shr $26,%rax shr $26,%rax
lea (%eax,%eax,4),%eax lea (%rax,%rax,4),%rax
add %eax,%ebx add %rax,%rbx
# h4 = d4 & 0x3ffffff # h4 = d4 & 0x3ffffff
mov d4,%rax mov d4,%rax
and $0x3ffffff,%eax and $0x3ffffff,%eax
mov %eax,h4 mov %eax,h4
# h1 += h0 >> 26 # h1 += h0 >> 26
mov %ebx,%eax mov %rbx,%rax
shr $26,%eax shr $26,%rax
add %eax,h1 add %eax,h1
# h0 = h0 & 0x3ffffff # h0 = h0 & 0x3ffffff
andl $0x3ffffff,%ebx andl $0x3ffffff,%ebx
@ -524,6 +524,12 @@ ENTRY(poly1305_2block_sse2)
paddq t2,t1 paddq t2,t1
movq t1,d4 movq t1,d4
# Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
# h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
# amount. Careful: we must not assume the carry bits 'd0 >> 26',
# 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
# integers. It's true in a single-block implementation, but not here.
# d1 += d0 >> 26 # d1 += d0 >> 26
mov d0,%rax mov d0,%rax
shr $26,%rax shr $26,%rax
@ -562,16 +568,16 @@ ENTRY(poly1305_2block_sse2)
# h0 += (d4 >> 26) * 5 # h0 += (d4 >> 26) * 5
mov d4,%rax mov d4,%rax
shr $26,%rax shr $26,%rax
lea (%eax,%eax,4),%eax lea (%rax,%rax,4),%rax
add %eax,%ebx add %rax,%rbx
# h4 = d4 & 0x3ffffff # h4 = d4 & 0x3ffffff
mov d4,%rax mov d4,%rax
and $0x3ffffff,%eax and $0x3ffffff,%eax
mov %eax,h4 mov %eax,h4
# h1 += h0 >> 26 # h1 += h0 >> 26
mov %ebx,%eax mov %rbx,%rax
shr $26,%eax shr $26,%rax
add %eax,h1 add %eax,h1
# h0 = h0 & 0x3ffffff # h0 = h0 & 0x3ffffff
andl $0x3ffffff,%ebx andl $0x3ffffff,%ebx

View File

@ -3,10 +3,14 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/delay.h>
#include <asm/apicdef.h> #include <asm/apicdef.h>
#include <asm/nmi.h>
#include "../perf_event.h" #include "../perf_event.h"
static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
static __initconst const u64 amd_hw_cache_event_ids static __initconst const u64 amd_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_OP_MAX]
@ -113,22 +117,39 @@ static __initconst const u64 amd_hw_cache_event_ids
}; };
/* /*
* AMD Performance Monitor K7 and later. * AMD Performance Monitor K7 and later, up to and including Family 16h:
*/ */
static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] = static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
{ {
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076, [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d, [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
[PERF_COUNT_HW_CACHE_MISSES] = 0x077e, [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
};
/*
* AMD Performance Monitor Family 17h and later:
*/
static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
{
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
}; };
static u64 amd_pmu_event_map(int hw_event) static u64 amd_pmu_event_map(int hw_event)
{ {
if (boot_cpu_data.x86 >= 0x17)
return amd_f17h_perfmon_event_map[hw_event];
return amd_perfmon_event_map[hw_event]; return amd_perfmon_event_map[hw_event];
} }
@ -429,6 +450,132 @@ static void amd_pmu_cpu_dead(int cpu)
} }
} }
/*
* When a PMC counter overflows, an NMI is used to process the event and
* reset the counter. NMI latency can result in the counter being updated
* before the NMI can run, which can result in what appear to be spurious
* NMIs. This function is intended to wait for the NMI to run and reset
* the counter to avoid possible unhandled NMI messages.
*/
#define OVERFLOW_WAIT_COUNT 50
static void amd_pmu_wait_on_overflow(int idx)
{
unsigned int i;
u64 counter;
/*
* Wait for the counter to be reset if it has overflowed. This loop
* should exit very, very quickly, but just in case, don't wait
* forever...
*/
for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
rdmsrl(x86_pmu_event_addr(idx), counter);
if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
break;
/* Might be in IRQ context, so can't sleep */
udelay(1);
}
}
static void amd_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;
x86_pmu_disable_all();
/*
* This shouldn't be called from NMI context, but add a safeguard here
* to return, since if we're in NMI context we can't wait for an NMI
* to reset an overflowed counter value.
*/
if (in_nmi())
return;
/*
* Check each counter for overflow and wait for it to be reset by the
* NMI if it has overflowed. This relies on the fact that all active
* counters are always enabled when this function is caled and
* ARCH_PERFMON_EVENTSEL_INT is always set.
*/
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask))
continue;
amd_pmu_wait_on_overflow(idx);
}
}
static void amd_pmu_disable_event(struct perf_event *event)
{
x86_pmu_disable_event(event);
/*
* This can be called from NMI context (via x86_pmu_stop). The counter
* may have overflowed, but either way, we'll never see it get reset
* by the NMI if we're already in the NMI. And the NMI latency support
* below will take care of any pending NMI that might have been
* generated by the overflow.
*/
if (in_nmi())
return;
amd_pmu_wait_on_overflow(event->hw.idx);
}
/*
* Because of NMI latency, if multiple PMC counters are active or other sources
* of NMIs are received, the perf NMI handler can handle one or more overflowed
* PMC counters outside of the NMI associated with the PMC overflow. If the NMI
* doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
* back-to-back NMI support won't be active. This PMC handler needs to take into
* account that this can occur, otherwise this could result in unknown NMI
* messages being issued. Examples of this is PMC overflow while in the NMI
* handler when multiple PMCs are active or PMC overflow while handling some
* other source of an NMI.
*
* Attempt to mitigate this by using the number of active PMCs to determine
* whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
* any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
* number of active PMCs or 2. The value of 2 is used in case an NMI does not
* arrive at the LAPIC in time to be collapsed into an already pending NMI.
*/
static int amd_pmu_handle_irq(struct pt_regs *regs)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int active, handled;
/*
* Obtain the active count before calling x86_pmu_handle_irq() since
* it is possible that x86_pmu_handle_irq() may make a counter
* inactive (through x86_pmu_stop).
*/
active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
/* Process any counter overflows */
handled = x86_pmu_handle_irq(regs);
/*
* If a counter was handled, record the number of possible remaining
* NMIs that can occur.
*/
if (handled) {
this_cpu_write(perf_nmi_counter,
min_t(unsigned int, 2, active));
return handled;
}
if (!this_cpu_read(perf_nmi_counter))
return NMI_DONE;
this_cpu_dec(perf_nmi_counter);
return NMI_HANDLED;
}
static struct event_constraint * static struct event_constraint *
amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx, amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event) struct perf_event *event)
@ -621,11 +768,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config)
static __initconst const struct x86_pmu amd_pmu = { static __initconst const struct x86_pmu amd_pmu = {
.name = "AMD", .name = "AMD",
.handle_irq = x86_pmu_handle_irq, .handle_irq = amd_pmu_handle_irq,
.disable_all = x86_pmu_disable_all, .disable_all = amd_pmu_disable_all,
.enable_all = x86_pmu_enable_all, .enable_all = x86_pmu_enable_all,
.enable = x86_pmu_enable_event, .enable = x86_pmu_enable_event,
.disable = x86_pmu_disable_event, .disable = amd_pmu_disable_event,
.hw_config = amd_pmu_hw_config, .hw_config = amd_pmu_hw_config,
.schedule_events = x86_schedule_events, .schedule_events = x86_schedule_events,
.eventsel = MSR_K7_EVNTSEL0, .eventsel = MSR_K7_EVNTSEL0,
@ -732,7 +879,7 @@ void amd_pmu_enable_virt(void)
cpuc->perf_ctr_virt_mask = 0; cpuc->perf_ctr_virt_mask = 0;
/* Reload all events */ /* Reload all events */
x86_pmu_disable_all(); amd_pmu_disable_all();
x86_pmu_enable_all(0); x86_pmu_enable_all(0);
} }
EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
@ -750,7 +897,7 @@ void amd_pmu_disable_virt(void)
cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
/* Reload all events */ /* Reload all events */
x86_pmu_disable_all(); amd_pmu_disable_all();
x86_pmu_enable_all(0); x86_pmu_enable_all(0);
} }
EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);

View File

@ -1349,8 +1349,9 @@ void x86_pmu_stop(struct perf_event *event, int flags)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { if (test_bit(hwc->idx, cpuc->active_mask)) {
x86_pmu.disable(event); x86_pmu.disable(event);
__clear_bit(hwc->idx, cpuc->active_mask);
cpuc->events[hwc->idx] = NULL; cpuc->events[hwc->idx] = NULL;
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED; hwc->state |= PERF_HES_STOPPED;
@ -1447,16 +1448,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
apic_write(APIC_LVTPC, APIC_DM_NMI); apic_write(APIC_LVTPC, APIC_DM_NMI);
for (idx = 0; idx < x86_pmu.num_counters; idx++) { for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask)) { if (!test_bit(idx, cpuc->active_mask))
/*
* Though we deactivated the counter some cpus
* might still deliver spurious interrupts still
* in flight. Catch them:
*/
if (__test_and_clear_bit(idx, cpuc->running))
handled++;
continue; continue;
}
event = cpuc->events[idx]; event = cpuc->events[idx];

View File

@ -3131,7 +3131,7 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
flags &= ~PERF_SAMPLE_TIME; flags &= ~PERF_SAMPLE_TIME;
if (!event->attr.exclude_kernel) if (!event->attr.exclude_kernel)
flags &= ~PERF_SAMPLE_REGS_USER; flags &= ~PERF_SAMPLE_REGS_USER;
if (event->attr.sample_regs_user & ~PEBS_REGS) if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR); flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
return flags; return flags;
} }
@ -3185,7 +3185,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
return ret; return ret;
if (event->attr.precise_ip) { if (event->attr.precise_ip) {
if (!event->attr.freq) { if (!(event->attr.freq || event->attr.wakeup_events)) {
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
if (!(event->attr.sample_type & if (!(event->attr.sample_type &
~intel_pmu_large_pebs_flags(event))) ~intel_pmu_large_pebs_flags(event)))
@ -3575,6 +3575,12 @@ static void intel_pmu_cpu_starting(int cpu)
cpuc->lbr_sel = NULL; cpuc->lbr_sel = NULL;
if (x86_pmu.flags & PMU_FL_TFA) {
WARN_ON_ONCE(cpuc->tfa_shadow);
cpuc->tfa_shadow = ~0ULL;
intel_set_tfa(cpuc, false);
}
if (x86_pmu.version > 1) if (x86_pmu.version > 1)
flip_smm_bit(&x86_pmu.attr_freeze_on_smi); flip_smm_bit(&x86_pmu.attr_freeze_on_smi);

View File

@ -96,25 +96,25 @@ struct amd_nb {
PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \ PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
PERF_SAMPLE_PERIOD) PERF_SAMPLE_PERIOD)
#define PEBS_REGS \ #define PEBS_GP_REGS \
(PERF_REG_X86_AX | \ ((1ULL << PERF_REG_X86_AX) | \
PERF_REG_X86_BX | \ (1ULL << PERF_REG_X86_BX) | \
PERF_REG_X86_CX | \ (1ULL << PERF_REG_X86_CX) | \
PERF_REG_X86_DX | \ (1ULL << PERF_REG_X86_DX) | \
PERF_REG_X86_DI | \ (1ULL << PERF_REG_X86_DI) | \
PERF_REG_X86_SI | \ (1ULL << PERF_REG_X86_SI) | \
PERF_REG_X86_SP | \ (1ULL << PERF_REG_X86_SP) | \
PERF_REG_X86_BP | \ (1ULL << PERF_REG_X86_BP) | \
PERF_REG_X86_IP | \ (1ULL << PERF_REG_X86_IP) | \
PERF_REG_X86_FLAGS | \ (1ULL << PERF_REG_X86_FLAGS) | \
PERF_REG_X86_R8 | \ (1ULL << PERF_REG_X86_R8) | \
PERF_REG_X86_R9 | \ (1ULL << PERF_REG_X86_R9) | \
PERF_REG_X86_R10 | \ (1ULL << PERF_REG_X86_R10) | \
PERF_REG_X86_R11 | \ (1ULL << PERF_REG_X86_R11) | \
PERF_REG_X86_R12 | \ (1ULL << PERF_REG_X86_R12) | \
PERF_REG_X86_R13 | \ (1ULL << PERF_REG_X86_R13) | \
PERF_REG_X86_R14 | \ (1ULL << PERF_REG_X86_R14) | \
PERF_REG_X86_R15) (1ULL << PERF_REG_X86_R15))
/* /*
* Per register state. * Per register state.

View File

@ -36,16 +36,17 @@
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/ */
#define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) #define RLONG_ADDR(x) "m" (*(volatile long *) (x))
#define WBYTE_ADDR(x) "+m" (*(volatile char *) (x))
#define ADDR BITOP_ADDR(addr) #define ADDR RLONG_ADDR(addr)
/* /*
* We do the locked ops that don't return the old value as * We do the locked ops that don't return the old value as
* a mask operation on a byte. * a mask operation on a byte.
*/ */
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) #define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
#define CONST_MASK(nr) (1 << ((nr) & 7)) #define CONST_MASK(nr) (1 << ((nr) & 7))
/** /**
@ -73,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr)
: "memory"); : "memory");
} else { } else {
asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
: BITOP_ADDR(addr) : "Ir" (nr) : "memory"); : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
} }
} }
@ -88,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr)
*/ */
static __always_inline void __set_bit(long nr, volatile unsigned long *addr) static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
{ {
asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory"); asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
} }
/** /**
@ -110,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr)
: "iq" ((u8)~CONST_MASK(nr))); : "iq" ((u8)~CONST_MASK(nr)));
} else { } else {
asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
: BITOP_ADDR(addr) : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
: "Ir" (nr));
} }
} }
@ -131,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
{ {
asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr)); asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
} }
static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
@ -139,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
bool negative; bool negative;
asm volatile(LOCK_PREFIX "andb %2,%1" asm volatile(LOCK_PREFIX "andb %2,%1"
CC_SET(s) CC_SET(s)
: CC_OUT(s) (negative), ADDR : CC_OUT(s) (negative), WBYTE_ADDR(addr)
: "ir" ((char) ~(1 << nr)) : "memory"); : "ir" ((char) ~(1 << nr)) : "memory");
return negative; return negative;
} }
@ -155,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
* __clear_bit() is non-atomic and implies release semantics before the memory * __clear_bit() is non-atomic and implies release semantics before the memory
* operation. It can be used for an unlock if no other CPUs can concurrently * operation. It can be used for an unlock if no other CPUs can concurrently
* modify other bits in the word. * modify other bits in the word.
*
* No memory barrier is required here, because x86 cannot reorder stores past
* older loads. Same principle as spin_unlock.
*/ */
static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{ {
barrier();
__clear_bit(nr, addr); __clear_bit(nr, addr);
} }
@ -176,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
*/ */
static __always_inline void __change_bit(long nr, volatile unsigned long *addr) static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
{ {
asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr)); asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
} }
/** /**
@ -196,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
: "iq" ((u8)CONST_MASK(nr))); : "iq" ((u8)CONST_MASK(nr)));
} else { } else {
asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
: BITOP_ADDR(addr) : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
: "Ir" (nr));
} }
} }
@ -242,8 +237,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
asm(__ASM_SIZE(bts) " %2,%1" asm(__ASM_SIZE(bts) " %2,%1"
CC_SET(c) CC_SET(c)
: CC_OUT(c) (oldbit), ADDR : CC_OUT(c) (oldbit)
: "Ir" (nr)); : ADDR, "Ir" (nr) : "memory");
return oldbit; return oldbit;
} }
@ -282,8 +277,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
asm volatile(__ASM_SIZE(btr) " %2,%1" asm volatile(__ASM_SIZE(btr) " %2,%1"
CC_SET(c) CC_SET(c)
: CC_OUT(c) (oldbit), ADDR : CC_OUT(c) (oldbit)
: "Ir" (nr)); : ADDR, "Ir" (nr) : "memory");
return oldbit; return oldbit;
} }
@ -294,8 +289,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
asm volatile(__ASM_SIZE(btc) " %2,%1" asm volatile(__ASM_SIZE(btc) " %2,%1"
CC_SET(c) CC_SET(c)
: CC_OUT(c) (oldbit), ADDR : CC_OUT(c) (oldbit)
: "Ir" (nr) : "memory"); : ADDR, "Ir" (nr) : "memory");
return oldbit; return oldbit;
} }
@ -326,7 +321,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
asm volatile(__ASM_SIZE(bt) " %2,%1" asm volatile(__ASM_SIZE(bt) " %2,%1"
CC_SET(c) CC_SET(c)
: CC_OUT(c) (oldbit) : CC_OUT(c) (oldbit)
: "m" (*(unsigned long *)addr), "Ir" (nr)); : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
return oldbit; return oldbit;
} }

View File

@ -226,7 +226,9 @@ struct x86_emulate_ops {
unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt); unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags); void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt, u64 smbase); int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
const char *smstate);
void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt);
}; };

View File

@ -126,7 +126,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
} }
#define KVM_PERMILLE_MMU_PAGES 20 #define KVM_PERMILLE_MMU_PAGES 20
#define KVM_MIN_ALLOC_MMU_PAGES 64 #define KVM_MIN_ALLOC_MMU_PAGES 64UL
#define KVM_MMU_HASH_SHIFT 12 #define KVM_MMU_HASH_SHIFT 12
#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
#define KVM_MIN_FREE_MMU_PAGES 5 #define KVM_MIN_FREE_MMU_PAGES 5
@ -844,9 +844,9 @@ enum kvm_irqchip_mode {
}; };
struct kvm_arch { struct kvm_arch {
unsigned int n_used_mmu_pages; unsigned long n_used_mmu_pages;
unsigned int n_requested_mmu_pages; unsigned long n_requested_mmu_pages;
unsigned int n_max_mmu_pages; unsigned long n_max_mmu_pages;
unsigned int indirect_shadow_pages; unsigned int indirect_shadow_pages;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/* /*
@ -1182,7 +1182,7 @@ struct kvm_x86_ops {
int (*smi_allowed)(struct kvm_vcpu *vcpu); int (*smi_allowed)(struct kvm_vcpu *vcpu);
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate); int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase); int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
int (*enable_smi_window)(struct kvm_vcpu *vcpu); int (*enable_smi_window)(struct kvm_vcpu *vcpu);
int (*mem_enc_op)(struct kvm *kvm, void __user *argp); int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
@ -1256,8 +1256,8 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
gfn_t gfn_offset, unsigned long mask); gfn_t gfn_offset, unsigned long mask);
void kvm_mmu_zap_all(struct kvm *kvm); void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm); unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
bool pdptrs_changed(struct kvm_vcpu *vcpu); bool pdptrs_changed(struct kvm_vcpu *vcpu);
@ -1592,4 +1592,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
#define put_smstate(type, buf, offset, val) \ #define put_smstate(type, buf, offset, val) \
*(type *)((buf) + (offset) - 0x7e00) = val *(type *)((buf) + (offset) - 0x7e00) = val
#define GET_SMSTATE(type, buf, offset) \
(*(type *)((buf) + (offset) - 0x7e00))
#endif /* _ASM_X86_KVM_HOST_H */ #endif /* _ASM_X86_KVM_HOST_H */

View File

@ -91,11 +91,9 @@ static inline void syscall_set_return_value(struct task_struct *task,
static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
BUG_ON(i + n > 6); memcpy(args, &regs->bx, 6 * sizeof(args[0]));
memcpy(args, &regs->bx + i, n * sizeof(args[0]));
} }
static inline void syscall_set_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task,
@ -116,124 +114,50 @@ static inline int syscall_get_arch(void)
static inline void syscall_get_arguments(struct task_struct *task, static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
unsigned long *args) unsigned long *args)
{ {
# ifdef CONFIG_IA32_EMULATION # ifdef CONFIG_IA32_EMULATION
if (task->thread_info.status & TS_COMPAT) if (task->thread_info.status & TS_COMPAT) {
switch (i) { *args++ = regs->bx;
case 0: *args++ = regs->cx;
if (!n--) break; *args++ = regs->dx;
*args++ = regs->bx; *args++ = regs->si;
case 1: *args++ = regs->di;
if (!n--) break; *args = regs->bp;
*args++ = regs->cx; } else
case 2:
if (!n--) break;
*args++ = regs->dx;
case 3:
if (!n--) break;
*args++ = regs->si;
case 4:
if (!n--) break;
*args++ = regs->di;
case 5:
if (!n--) break;
*args++ = regs->bp;
case 6:
if (!n--) break;
default:
BUG();
break;
}
else
# endif # endif
switch (i) { {
case 0: *args++ = regs->di;
if (!n--) break; *args++ = regs->si;
*args++ = regs->di; *args++ = regs->dx;
case 1: *args++ = regs->r10;
if (!n--) break; *args++ = regs->r8;
*args++ = regs->si; *args = regs->r9;
case 2: }
if (!n--) break;
*args++ = regs->dx;
case 3:
if (!n--) break;
*args++ = regs->r10;
case 4:
if (!n--) break;
*args++ = regs->r8;
case 5:
if (!n--) break;
*args++ = regs->r9;
case 6:
if (!n--) break;
default:
BUG();
break;
}
} }
static inline void syscall_set_arguments(struct task_struct *task, static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
unsigned int i, unsigned int n,
const unsigned long *args) const unsigned long *args)
{ {
# ifdef CONFIG_IA32_EMULATION # ifdef CONFIG_IA32_EMULATION
if (task->thread_info.status & TS_COMPAT) if (task->thread_info.status & TS_COMPAT) {
switch (i) { regs->bx = *args++;
case 0: regs->cx = *args++;
if (!n--) break; regs->dx = *args++;
regs->bx = *args++; regs->si = *args++;
case 1: regs->di = *args++;
if (!n--) break; regs->bp = *args;
regs->cx = *args++; } else
case 2:
if (!n--) break;
regs->dx = *args++;
case 3:
if (!n--) break;
regs->si = *args++;
case 4:
if (!n--) break;
regs->di = *args++;
case 5:
if (!n--) break;
regs->bp = *args++;
case 6:
if (!n--) break;
default:
BUG();
break;
}
else
# endif # endif
switch (i) { {
case 0: regs->di = *args++;
if (!n--) break; regs->si = *args++;
regs->di = *args++; regs->dx = *args++;
case 1: regs->r10 = *args++;
if (!n--) break; regs->r8 = *args++;
regs->si = *args++; regs->r9 = *args;
case 2: }
if (!n--) break;
regs->dx = *args++;
case 3:
if (!n--) break;
regs->r10 = *args++;
case 4:
if (!n--) break;
regs->r8 = *args++;
case 5:
if (!n--) break;
regs->r9 = *args++;
case 6:
if (!n--) break;
default:
BUG();
break;
}
} }
static inline int syscall_get_arch(void) static inline int syscall_get_arch(void)

View File

@ -206,6 +206,9 @@ xen_single_call(unsigned int call,
__HYPERCALL_DECLS; __HYPERCALL_DECLS;
__HYPERCALL_5ARG(a1, a2, a3, a4, a5); __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
return -EINVAL;
asm volatile(CALL_NOSPEC asm volatile(CALL_NOSPEC
: __HYPERCALL_5PARAM : __HYPERCALL_5PARAM
: [thunk_target] "a" (&hypercall_page[call]) : [thunk_target] "a" (&hypercall_page[call])

View File

@ -146,6 +146,7 @@
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2 #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2
#define VMX_ABORT_VMCS_CORRUPTED 3
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4 #define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
#endif /* _UAPIVMX_H */ #endif /* _UAPIVMX_H */

View File

@ -275,7 +275,7 @@ static const struct {
const char *option; const char *option;
enum spectre_v2_user_cmd cmd; enum spectre_v2_user_cmd cmd;
bool secure; bool secure;
} v2_user_options[] __initdata = { } v2_user_options[] __initconst = {
{ "auto", SPECTRE_V2_USER_CMD_AUTO, false }, { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
{ "off", SPECTRE_V2_USER_CMD_NONE, false }, { "off", SPECTRE_V2_USER_CMD_NONE, false },
{ "on", SPECTRE_V2_USER_CMD_FORCE, true }, { "on", SPECTRE_V2_USER_CMD_FORCE, true },
@ -419,7 +419,7 @@ static const struct {
const char *option; const char *option;
enum spectre_v2_mitigation_cmd cmd; enum spectre_v2_mitigation_cmd cmd;
bool secure; bool secure;
} mitigation_options[] __initdata = { } mitigation_options[] __initconst = {
{ "off", SPECTRE_V2_CMD_NONE, false }, { "off", SPECTRE_V2_CMD_NONE, false },
{ "on", SPECTRE_V2_CMD_FORCE, true }, { "on", SPECTRE_V2_CMD_FORCE, true },
{ "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
@ -658,7 +658,7 @@ static const char * const ssb_strings[] = {
static const struct { static const struct {
const char *option; const char *option;
enum ssb_mitigation_cmd cmd; enum ssb_mitigation_cmd cmd;
} ssb_mitigation_options[] __initdata = { } ssb_mitigation_options[] __initconst = {
{ "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
{ "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
{ "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */

Some files were not shown because too many files have changed in this diff Show More