summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2022-02-23 07:38:41 -0500
committerMike Pagano <mpagano@gentoo.org>2022-02-23 07:38:41 -0500
commitcf18885d021c12b4fccd6e06066a5b3d7e3adeef (patch)
treec632641a98ec4004118d4818adfbde587ef4863c
parentLinux patch 4.19.130 (diff)
downloadlinux-patches-cf18885d021c12b4fccd6e06066a5b3d7e3adeef.tar.gz
linux-patches-cf18885d021c12b4fccd6e06066a5b3d7e3adeef.tar.bz2
linux-patches-cf18885d021c12b4fccd6e06066a5b3d7e3adeef.zip
Linux patch 4.19.2314.19-230
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1230_linux-4.19.231.patch2055
2 files changed, 2059 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 30a2e032..67a0a028 100644
--- a/0000_README
+++ b/0000_README
@@ -959,6 +959,10 @@ Patch: 1229_linux-4.19.230.patch
From: https://www.kernel.org
Desc: Linux 4.19.230
+Patch: 1230_linux-4.19.231.patch
+From: https://www.kernel.org
+Desc: Linux 4.19.231
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1230_linux-4.19.231.patch b/1230_linux-4.19.231.patch
new file mode 100644
index 00000000..4f497003
--- /dev/null
+++ b/1230_linux-4.19.231.patch
@@ -0,0 +1,2055 @@
+diff --git a/Documentation/ABI/stable/sysfs-bus-vmbus b/Documentation/ABI/stable/sysfs-bus-vmbus
+index 3fed8fdb873d7..c4ffdfc324b41 100644
+--- a/Documentation/ABI/stable/sysfs-bus-vmbus
++++ b/Documentation/ABI/stable/sysfs-bus-vmbus
+@@ -81,7 +81,9 @@ What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/latency
+ Date: September. 2017
+ KernelVersion: 4.14
+ Contact: Stephen Hemminger <sthemmin@microsoft.com>
+-Description: Channel signaling latency
++Description: Channel signaling latency. This file is available only for
++ performance critical channels (storage, network, etc.) that use
++ the monitor page mechanism.
+ Users: Debugging tools
+
+ What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/out_mask
+@@ -95,7 +97,9 @@ What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/pending
+ Date: September. 2017
+ KernelVersion: 4.14
+ Contact: Stephen Hemminger <sthemmin@microsoft.com>
+-Description: Channel interrupt pending state
++Description: Channel interrupt pending state. This file is available only for
++ performance critical channels (storage, network, etc.) that use
++ the monitor page mechanism.
+ Users: Debugging tools
+
+ What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/read_avail
+@@ -137,7 +141,9 @@ What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/monitor_id
+ Date: January. 2018
+ KernelVersion: 4.16
+ Contact: Stephen Hemminger <sthemmin@microsoft.com>
+-Description: Monitor bit associated with channel
++Description: Monitor bit associated with channel. This file is available only
++ for performance critical channels (storage, network, etc.) that
++ use the monitor page mechanism.
+ Users: Debugging tools and userspace drivers
+
+ What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/ring
+diff --git a/Makefile b/Makefile
+index 1a19d5d8840ff..165660cd857e1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 230
++SUBLEVEL = 231
+ EXTRAVERSION =
+ NAME = "People's Front"
+
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index a8269f0a87ced..47c55351df033 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -754,8 +754,10 @@ static int __init _init_clkctrl_providers(void)
+
+ for_each_matching_node(np, ti_clkctrl_match_table) {
+ ret = _setup_clkctrl_provider(np);
+- if (ret)
++ if (ret) {
++ of_node_put(np);
+ break;
++ }
+ }
+
+ return ret;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+index b8dc4dbb391b6..4252119bfd901 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+@@ -41,6 +41,12 @@
+ no-map;
+ };
+
++ /* 32 MiB reserved for ARM Trusted Firmware (BL32) */
++ secmon_reserved_bl32: secmon@5300000 {
++ reg = <0x0 0x05300000 0x0 0x2000000>;
++ no-map;
++ };
++
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
+index d81568f783e5c..e87ae0dabce12 100644
+--- a/arch/powerpc/lib/sstep.c
++++ b/arch/powerpc/lib/sstep.c
+@@ -2681,12 +2681,14 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
+ case BARRIER_EIEIO:
+ eieio();
+ break;
++#ifdef CONFIG_PPC64
+ case BARRIER_LWSYNC:
+ asm volatile("lwsync" : : : "memory");
+ break;
+ case BARRIER_PTESYNC:
+ asm volatile("ptesync" : : : "memory");
+ break;
++#endif
+ }
+ break;
+
+diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
+index acc8d217f6565..ad3d39c00d7fc 100644
+--- a/arch/x86/kvm/pmu.c
++++ b/arch/x86/kvm/pmu.c
+@@ -171,7 +171,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+ }
+
+ if (type == PERF_TYPE_RAW)
+- config = eventsel & X86_RAW_EVENT_MASK;
++ config = eventsel & AMD64_RAW_EVENT_MASK;
+
+ pmc_reprogram_counter(pmc, type, config,
+ !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index c2529dfda3e53..11686e7684016 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -5417,6 +5417,8 @@ static void bfq_exit_queue(struct elevator_queue *e)
+ spin_unlock_irq(&bfqd->lock);
+ #endif
+
++ wbt_enable_default(bfqd->queue);
++
+ kfree(bfqd);
+ }
+
+diff --git a/block/elevator.c b/block/elevator.c
+index 5b51bc5fad9fe..72ce7a4a2632e 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -877,8 +877,6 @@ void elv_unregister_queue(struct request_queue *q)
+ kobject_del(&e->kobj);
+
+ e->registered = 0;
+- /* Re-enable throttling in case elevator disabled it */
+- wbt_enable_default(q);
+ }
+ }
+
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 46eacba2613b8..33d3728f36222 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4613,6 +4613,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+
+ /* devices that don't properly handle TRIM commands */
+ { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
++ { "M88V29*", NULL, ATA_HORKAGE_NOTRIM, },
+
+ /*
+ * As defined, the DRAT (Deterministic Read After Trim) and RZAT
+diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
+index 80ff95f75199f..29c51762336d7 100644
+--- a/drivers/dma/sh/rcar-dmac.c
++++ b/drivers/dma/sh/rcar-dmac.c
+@@ -1817,7 +1817,9 @@ static int rcar_dmac_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, dmac);
+ dmac->dev->dma_parms = &dmac->parms;
+ dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
+- dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
++ ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
++ if (ret)
++ return ret;
+
+ ret = rcar_dmac_parse_of(&pdev->dev, dmac);
+ if (ret < 0)
+diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
+index fd440b35d76ed..61d72bd967543 100644
+--- a/drivers/edac/edac_mc.c
++++ b/drivers/edac/edac_mc.c
+@@ -265,7 +265,7 @@ void *edac_align_ptr(void **p, unsigned size, int n_elems)
+ else
+ return (char *)ptr;
+
+- r = (unsigned long)p % align;
++ r = (unsigned long)ptr % align;
+
+ if (r == 0)
+ return (char *)ptr;
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index e67ed383e11b1..79aef5c063fae 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -193,7 +193,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+ * so don't register a backlight device
+ */
+ if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
+- (rdev->pdev->device == 0x6741))
++ (rdev->pdev->device == 0x6741) &&
++ !dmi_match(DMI_PRODUCT_NAME, "iMac12,1"))
+ return;
+
+ if (!radeon_encoder->enc_priv)
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index cdd4392c589d3..a3f6933f94e30 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -350,6 +350,7 @@ static struct vmbus_channel *alloc_channel(void)
+ static void free_channel(struct vmbus_channel *channel)
+ {
+ tasklet_kill(&channel->callback_event);
++ vmbus_remove_channel_attr_group(channel);
+
+ kobject_put(&channel->kobj);
+ }
+diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
+index 7e7c8debbd285..c4ad518890243 100644
+--- a/drivers/hv/hyperv_vmbus.h
++++ b/drivers/hv/hyperv_vmbus.h
+@@ -392,6 +392,8 @@ void vmbus_device_unregister(struct hv_device *device_obj);
+ int vmbus_add_channel_kobj(struct hv_device *device_obj,
+ struct vmbus_channel *channel);
+
++void vmbus_remove_channel_attr_group(struct vmbus_channel *channel);
++
+ struct vmbus_channel *relid2channel(u32 relid);
+
+ void vmbus_free_channels(void);
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 0699c60188895..51fe219c91fc3 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -609,7 +609,36 @@ static struct attribute *vmbus_dev_attrs[] = {
+ &dev_attr_device.attr,
+ NULL,
+ };
+-ATTRIBUTE_GROUPS(vmbus_dev);
++
++/*
++ * Device-level attribute_group callback function. Returns the permission for
++ * each attribute, and returns 0 if an attribute is not visible.
++ */
++static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
++ struct attribute *attr, int idx)
++{
++ struct device *dev = kobj_to_dev(kobj);
++ const struct hv_device *hv_dev = device_to_hv_device(dev);
++
++ /* Hide the monitor attributes if the monitor mechanism is not used. */
++ if (!hv_dev->channel->offermsg.monitor_allocated &&
++ (attr == &dev_attr_monitor_id.attr ||
++ attr == &dev_attr_server_monitor_pending.attr ||
++ attr == &dev_attr_client_monitor_pending.attr ||
++ attr == &dev_attr_server_monitor_latency.attr ||
++ attr == &dev_attr_client_monitor_latency.attr ||
++ attr == &dev_attr_server_monitor_conn_id.attr ||
++ attr == &dev_attr_client_monitor_conn_id.attr))
++ return 0;
++
++ return attr->mode;
++}
++
++static const struct attribute_group vmbus_dev_group = {
++ .attrs = vmbus_dev_attrs,
++ .is_visible = vmbus_dev_attr_is_visible
++};
++__ATTRIBUTE_GROUPS(vmbus_dev);
+
+ /*
+ * vmbus_uevent - add uevent for our device
+@@ -1484,10 +1513,34 @@ static struct attribute *vmbus_chan_attrs[] = {
+ NULL
+ };
+
++/*
++ * Channel-level attribute_group callback function. Returns the permission for
++ * each attribute, and returns 0 if an attribute is not visible.
++ */
++static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
++ struct attribute *attr, int idx)
++{
++ const struct vmbus_channel *channel =
++ container_of(kobj, struct vmbus_channel, kobj);
++
++ /* Hide the monitor attributes if the monitor mechanism is not used. */
++ if (!channel->offermsg.monitor_allocated &&
++ (attr == &chan_attr_pending.attr ||
++ attr == &chan_attr_latency.attr ||
++ attr == &chan_attr_monitor_id.attr))
++ return 0;
++
++ return attr->mode;
++}
++
++static struct attribute_group vmbus_chan_group = {
++ .attrs = vmbus_chan_attrs,
++ .is_visible = vmbus_chan_attr_is_visible
++};
++
+ static struct kobj_type vmbus_chan_ktype = {
+ .sysfs_ops = &vmbus_chan_sysfs_ops,
+ .release = vmbus_chan_release,
+- .default_attrs = vmbus_chan_attrs,
+ };
+
+ /*
+@@ -1495,6 +1548,7 @@ static struct kobj_type vmbus_chan_ktype = {
+ */
+ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
+ {
++ const struct device *device = &dev->device;
+ struct kobject *kobj = &channel->kobj;
+ u32 relid = channel->offermsg.child_relid;
+ int ret;
+@@ -1502,14 +1556,36 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
+ kobj->kset = dev->channels_kset;
+ ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
+ "%u", relid);
+- if (ret)
++ if (ret) {
++ kobject_put(kobj);
++ return ret;
++ }
++
++ ret = sysfs_create_group(kobj, &vmbus_chan_group);
++
++ if (ret) {
++ /*
++ * The calling functions' error handling paths will cleanup the
++ * empty channel directory.
++ */
++ kobject_put(kobj);
++ dev_err(device, "Unable to set up channel sysfs files\n");
+ return ret;
++ }
+
+ kobject_uevent(kobj, KOBJ_ADD);
+
+ return 0;
+ }
+
++/*
++ * vmbus_remove_channel_attr_group - remove the channel's attribute group
++ */
++void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
++{
++ sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
++}
++
+ /*
+ * vmbus_device_create - Creates and registers a new child device
+ * on the vmbus.
+diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
+index 2086a96307bf9..1e8890601ca3b 100644
+--- a/drivers/i2c/busses/i2c-brcmstb.c
++++ b/drivers/i2c/busses/i2c-brcmstb.c
+@@ -645,7 +645,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
+
+ /* set the data in/out register size for compatible SoCs */
+ if (of_device_is_compatible(dev->device->of_node,
+- "brcmstb,brcmper-i2c"))
++ "brcm,brcmper-i2c"))
+ dev->data_regsz = sizeof(u8);
+ else
+ dev->data_regsz = sizeof(u32);
+diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
+index 532e9d68c7042..767cdd3f773bb 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -258,3 +258,4 @@ out_iounmap:
+
+ IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
+ IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
++IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index d1cc0fdbc51c8..630f3bcba56dd 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -1678,31 +1678,31 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ blk_status_t error = BLK_STS_OK;
+- int retries = 0;
+
+ do {
+ u32 status;
+ int err;
++ int retries = 0;
+
+- mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
++ while (retries++ <= MMC_READ_SINGLE_RETRIES) {
++ mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
+
+- mmc_wait_for_req(host, mrq);
++ mmc_wait_for_req(host, mrq);
+
+- err = mmc_send_status(card, &status);
+- if (err)
+- goto error_exit;
+-
+- if (!mmc_host_is_spi(host) &&
+- !mmc_blk_in_tran_state(status)) {
+- err = mmc_blk_fix_state(card, req);
++ err = mmc_send_status(card, &status);
+ if (err)
+ goto error_exit;
+- }
+
+- if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
+- continue;
++ if (!mmc_host_is_spi(host) &&
++ !mmc_blk_in_tran_state(status)) {
++ err = mmc_blk_fix_state(card, req);
++ if (err)
++ goto error_exit;
++ }
+
+- retries = 0;
++ if (!mrq->cmd->error)
++ break;
++ }
+
+ if (mrq->cmd->error ||
+ mrq->data->error ||
+diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+index 774ffa9e23f38..27bafb8fc35af 100644
+--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
++++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+@@ -589,6 +589,54 @@ static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
+ __raw_writel(val, ctrl->nand_fc + word * 4);
+ }
+
++static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
++{
++
++ /* Clear error addresses */
++ brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
++ brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
++ brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
++ brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
++}
++
++static u64 brcmnand_get_uncorrecc_addr(struct brcmnand_controller *ctrl)
++{
++ u64 err_addr;
++
++ err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR);
++ err_addr |= ((u64)(brcmnand_read_reg(ctrl,
++ BRCMNAND_UNCORR_EXT_ADDR)
++ & 0xffff) << 32);
++
++ return err_addr;
++}
++
++static u64 brcmnand_get_correcc_addr(struct brcmnand_controller *ctrl)
++{
++ u64 err_addr;
++
++ err_addr = brcmnand_read_reg(ctrl, BRCMNAND_CORR_ADDR);
++ err_addr |= ((u64)(brcmnand_read_reg(ctrl,
++ BRCMNAND_CORR_EXT_ADDR)
++ & 0xffff) << 32);
++
++ return err_addr;
++}
++
++static void brcmnand_set_cmd_addr(struct mtd_info *mtd, u64 addr)
++{
++ struct nand_chip *chip = mtd_to_nand(mtd);
++ struct brcmnand_host *host = nand_get_controller_data(chip);
++ struct brcmnand_controller *ctrl = host->ctrl;
++
++ brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
++ (host->cs << 16) | ((addr >> 32) & 0xffff));
++ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
++ brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
++ lower_32_bits(addr));
++ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
++}
++
+ static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
+ enum brcmnand_cs_reg reg)
+ {
+@@ -1217,9 +1265,12 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
+ {
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int ret;
++ u64 cmd_addr;
++
++ cmd_addr = brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
++
++ dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
+
+- dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd,
+- brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS));
+ BUG_ON(ctrl->cmd_pending != 0);
+ ctrl->cmd_pending = cmd;
+
+@@ -1380,12 +1431,7 @@ static void brcmnand_cmdfunc(struct mtd_info *mtd, unsigned command,
+ if (!native_cmd)
+ return;
+
+- brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
+- (host->cs << 16) | ((addr >> 32) & 0xffff));
+- (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
+- brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, lower_32_bits(addr));
+- (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+-
++ brcmnand_set_cmd_addr(mtd, addr);
+ brcmnand_send_cmd(host, native_cmd);
+ brcmnand_waitfunc(mtd, chip);
+
+@@ -1605,20 +1651,10 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int i, j, ret = 0;
+
+- /* Clear error addresses */
+- brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
+- brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
+- brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
+- brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
+-
+- brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
+- (host->cs << 16) | ((addr >> 32) & 0xffff));
+- (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
++ brcmnand_clear_ecc_addr(ctrl);
+
+ for (i = 0; i < trans; i++, addr += FC_BYTES) {
+- brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
+- lower_32_bits(addr));
+- (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
++ brcmnand_set_cmd_addr(mtd, addr);
+ /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
+ brcmnand_send_cmd(host, CMD_PAGE_READ);
+ brcmnand_waitfunc(mtd, chip);
+@@ -1637,22 +1673,16 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
+ mtd->oobsize / trans,
+ host->hwcfg.sector_size_1k);
+
+- if (!ret) {
+- *err_addr = brcmnand_read_reg(ctrl,
+- BRCMNAND_UNCORR_ADDR) |
+- ((u64)(brcmnand_read_reg(ctrl,
+- BRCMNAND_UNCORR_EXT_ADDR)
+- & 0xffff) << 32);
++ if (ret != -EBADMSG) {
++ *err_addr = brcmnand_get_uncorrecc_addr(ctrl);
++
+ if (*err_addr)
+ ret = -EBADMSG;
+ }
+
+ if (!ret) {
+- *err_addr = brcmnand_read_reg(ctrl,
+- BRCMNAND_CORR_ADDR) |
+- ((u64)(brcmnand_read_reg(ctrl,
+- BRCMNAND_CORR_EXT_ADDR)
+- & 0xffff) << 32);
++ *err_addr = brcmnand_get_correcc_addr(ctrl);
++
+ if (*err_addr)
+ ret = -EUCLEAN;
+ }
+@@ -1722,7 +1752,7 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
+ dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
+
+ try_dmaread:
+- brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0);
++ brcmnand_clear_ecc_addr(ctrl);
+
+ if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
+ err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
+@@ -1866,15 +1896,9 @@ static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
+ goto out;
+ }
+
+- brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
+- (host->cs << 16) | ((addr >> 32) & 0xffff));
+- (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
+-
+ for (i = 0; i < trans; i++, addr += FC_BYTES) {
+ /* full address MUST be set before populating FC */
+- brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
+- lower_32_bits(addr));
+- (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
++ brcmnand_set_cmd_addr(mtd, addr);
+
+ if (buf) {
+ brcmnand_soc_data_bus_prepare(ctrl->soc, false);
+diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
+index 148c7a16f3188..c64b408f080a9 100644
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -10,7 +10,6 @@
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+-
+ #include <linux/clk.h>
+ #include <linux/slab.h>
+ #include <linux/bitops.h>
+@@ -2959,10 +2958,6 @@ static int qcom_nandc_probe(struct platform_device *pdev)
+ if (!nandc->base_dma)
+ return -ENXIO;
+
+- ret = qcom_nandc_alloc(nandc);
+- if (ret)
+- goto err_nandc_alloc;
+-
+ ret = clk_prepare_enable(nandc->core_clk);
+ if (ret)
+ goto err_core_clk;
+@@ -2971,6 +2966,10 @@ static int qcom_nandc_probe(struct platform_device *pdev)
+ if (ret)
+ goto err_aon_clk;
+
++ ret = qcom_nandc_alloc(nandc);
++ if (ret)
++ goto err_nandc_alloc;
++
+ ret = qcom_nandc_setup(nandc);
+ if (ret)
+ goto err_setup;
+@@ -2982,15 +2981,14 @@ static int qcom_nandc_probe(struct platform_device *pdev)
+ return 0;
+
+ err_setup:
++ qcom_nandc_unalloc(nandc);
++err_nandc_alloc:
+ clk_disable_unprepare(nandc->aon_clk);
+ err_aon_clk:
+ clk_disable_unprepare(nandc->core_clk);
+ err_core_clk:
+- qcom_nandc_unalloc(nandc);
+-err_nandc_alloc:
+ dma_unmap_resource(dev, res->start, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+-
+ return ret;
+ }
+
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index 035923876c617..e3f814e83d9c8 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -249,7 +249,7 @@ static inline int __check_agg_selection_timer(struct port *port)
+ if (bond == NULL)
+ return 0;
+
+- return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0;
++ return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0;
+ }
+
+ /**
+@@ -1965,7 +1965,7 @@ static void ad_marker_response_received(struct bond_marker *marker,
+ */
+ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
+ {
+- BOND_AD_INFO(bond).agg_select_timer = timeout;
++ atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout);
+ }
+
+ /**
+@@ -2249,6 +2249,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond)
+ spin_unlock_bh(&bond->mode_lock);
+ }
+
++/**
++ * bond_agg_timer_advance - advance agg_select_timer
++ * @bond: bonding structure
++ *
++ * Return true when agg_select_timer reaches 0.
++ */
++static bool bond_agg_timer_advance(struct bonding *bond)
++{
++ int val, nval;
++
++ while (1) {
++ val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer);
++ if (!val)
++ return false;
++ nval = val - 1;
++ if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer,
++ val, nval) == val)
++ break;
++ }
++ return nval == 0;
++}
++
+ /**
+ * bond_3ad_state_machine_handler - handle state machines timeout
+ * @bond: bonding struct to work on
+@@ -2284,9 +2306,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
+ if (!bond_has_slaves(bond))
+ goto re_arm;
+
+- /* check if agg_select_timer timer after initialize is timed out */
+- if (BOND_AD_INFO(bond).agg_select_timer &&
+- !(--BOND_AD_INFO(bond).agg_select_timer)) {
++ if (bond_agg_timer_advance(bond)) {
+ slave = bond_first_slave_rcu(bond);
+ port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
+
+diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
+index b89c474e6b6bf..70ce37cd94af5 100644
+--- a/drivers/net/dsa/lan9303-core.c
++++ b/drivers/net/dsa/lan9303-core.c
+@@ -1307,7 +1307,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
+ struct device_node *np)
+ {
+ chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
+- GPIOD_OUT_LOW);
++ GPIOD_OUT_HIGH);
+ if (IS_ERR(chip->reset_gpio))
+ return PTR_ERR(chip->reset_gpio);
+
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index d110aa616a957..f162ac7d74e59 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -4073,7 +4073,7 @@ static int macb_probe(struct platform_device *pdev)
+
+ #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
+- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
++ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ bp->hw_dma_cap |= HW_DMA_CAP_64B;
+ }
+ #endif
+diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
+index 3d9e915798668..1bc09b6c308f8 100644
+--- a/drivers/net/ieee802154/at86rf230.c
++++ b/drivers/net/ieee802154/at86rf230.c
+@@ -108,6 +108,7 @@ struct at86rf230_local {
+ unsigned long cal_timeout;
+ bool is_tx;
+ bool is_tx_from_off;
++ bool was_tx;
+ u8 tx_retry;
+ struct sk_buff *tx_skb;
+ struct at86rf230_state_change tx;
+@@ -351,7 +352,11 @@ at86rf230_async_error_recover_complete(void *context)
+ if (ctx->free)
+ kfree(ctx);
+
+- ieee802154_wake_queue(lp->hw);
++ if (lp->was_tx) {
++ lp->was_tx = 0;
++ dev_kfree_skb_any(lp->tx_skb);
++ ieee802154_wake_queue(lp->hw);
++ }
+ }
+
+ static void
+@@ -360,7 +365,11 @@ at86rf230_async_error_recover(void *context)
+ struct at86rf230_state_change *ctx = context;
+ struct at86rf230_local *lp = ctx->lp;
+
+- lp->is_tx = 0;
++ if (lp->is_tx) {
++ lp->was_tx = 1;
++ lp->is_tx = 0;
++ }
++
+ at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
+ at86rf230_async_error_recover_complete);
+ }
+diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
+index 3169c174006ab..7c5db4f73cce6 100644
+--- a/drivers/net/ieee802154/ca8210.c
++++ b/drivers/net/ieee802154/ca8210.c
+@@ -2975,8 +2975,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw)
+ ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND;
+ ca8210_hw->phy->cca_ed_level = -9800;
+ ca8210_hw->phy->symbol_duration = 16;
+- ca8210_hw->phy->lifs_period = 40;
+- ca8210_hw->phy->sifs_period = 12;
++ ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration;
++ ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration;
+ ca8210_hw->flags =
+ IEEE802154_HW_AFILT |
+ IEEE802154_HW_OMIT_CKSUM |
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index b2434b4798468..684eec0aa0d63 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -1373,59 +1373,69 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ u16 hdr_off;
+ u32 *pkt_hdr;
+
+- /* This check is no longer done by usbnet */
+- if (skb->len < dev->net->hard_header_len)
++ /* At the end of the SKB, there's a header telling us how many packets
++ * are bundled into this buffer and where we can find an array of
++ * per-packet metadata (which contains elements encoded into u16).
++ */
++ if (skb->len < 4)
+ return 0;
+-
+ skb_trim(skb, skb->len - 4);
+ memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
+ le32_to_cpus(&rx_hdr);
+-
+ pkt_cnt = (u16)rx_hdr;
+ hdr_off = (u16)(rx_hdr >> 16);
++
++ if (pkt_cnt == 0)
++ return 0;
++
++ /* Make sure that the bounds of the metadata array are inside the SKB
++ * (and in front of the counter at the end).
++ */
++ if (pkt_cnt * 2 + hdr_off > skb->len)
++ return 0;
+ pkt_hdr = (u32 *)(skb->data + hdr_off);
+
+- while (pkt_cnt--) {
++ /* Packets must not overlap the metadata array */
++ skb_trim(skb, hdr_off);
++
++ for (; ; pkt_cnt--, pkt_hdr++) {
+ u16 pkt_len;
+
+ le32_to_cpus(pkt_hdr);
+ pkt_len = (*pkt_hdr >> 16) & 0x1fff;
+
+- /* Check CRC or runt packet */
+- if ((*pkt_hdr & AX_RXHDR_CRC_ERR) ||
+- (*pkt_hdr & AX_RXHDR_DROP_ERR)) {
+- skb_pull(skb, (pkt_len + 7) & 0xFFF8);
+- pkt_hdr++;
+- continue;
+- }
+-
+- if (pkt_cnt == 0) {
+- skb->len = pkt_len;
+- /* Skip IP alignment pseudo header */
+- skb_pull(skb, 2);
+- skb_set_tail_pointer(skb, skb->len);
+- skb->truesize = pkt_len + sizeof(struct sk_buff);
+- ax88179_rx_checksum(skb, pkt_hdr);
+- return 1;
+- }
++ if (pkt_len > skb->len)
++ return 0;
+
+- ax_skb = skb_clone(skb, GFP_ATOMIC);
+- if (ax_skb) {
++ /* Check CRC or runt packet */
++ if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) &&
++ pkt_len >= 2 + ETH_HLEN) {
++ bool last = (pkt_cnt == 0);
++
++ if (last) {
++ ax_skb = skb;
++ } else {
++ ax_skb = skb_clone(skb, GFP_ATOMIC);
++ if (!ax_skb)
++ return 0;
++ }
+ ax_skb->len = pkt_len;
+ /* Skip IP alignment pseudo header */
+ skb_pull(ax_skb, 2);
+ skb_set_tail_pointer(ax_skb, ax_skb->len);
+ ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
+ ax88179_rx_checksum(ax_skb, pkt_hdr);
++
++ if (last)
++ return 1;
++
+ usbnet_skb_return(dev, ax_skb);
+- } else {
+- return 0;
+ }
+
+- skb_pull(skb, (pkt_len + 7) & 0xFFF8);
+- pkt_hdr++;
++ /* Trim this packet away from the SKB */
++ if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8))
++ return 0;
+ }
+- return 1;
+ }
+
+ static struct sk_buff *
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 54b37a30df18b..c2e872f926f1c 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1358,6 +1358,8 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
+ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
+ {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
++ {QMI_FIXED_INTF(0x413c, 0x81e4, 0)}, /* Dell Wireless 5829e with eSIM support*/
++ {QMI_FIXED_INTF(0x413c, 0x81e6, 0)}, /* Dell Wireless 5829e */
+ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
+ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
+ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+index c44108823f796..a43333512edc9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+@@ -1549,6 +1549,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
+ out_unbind:
+ complete(&drv->request_firmware_complete);
+ device_release_driver(drv->trans->dev);
++ /* drv has just been freed by the release */
++ failure = false;
+ free:
+ if (failure)
+ iwl_dealloc_ucode(drv);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+index 31e72e1ff1e26..80ae33bba6a86 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+@@ -310,8 +310,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
+ /* This may fail if AMT took ownership of the device */
+ if (iwl_pcie_prepare_card_hw(trans)) {
+ IWL_WARN(trans, "Exit HW not ready\n");
+- ret = -EIO;
+- goto out;
++ return -EIO;
+ }
+
+ iwl_enable_rfkill_int(trans);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index fcda33482887b..2d2afc1758304 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -1363,8 +1363,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+ /* This may fail if AMT took ownership of the device */
+ if (iwl_pcie_prepare_card_hw(trans)) {
+ IWL_WARN(trans, "Exit HW not ready\n");
+- ret = -EIO;
+- goto out;
++ return -EIO;
+ }
+
+ iwl_enable_rfkill_int(trans);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index e64310f2296f8..a0a805a5ab6b2 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -3463,7 +3463,14 @@ static void nvme_async_event_work(struct work_struct *work)
+ container_of(work, struct nvme_ctrl, async_event_work);
+
+ nvme_aen_uevent(ctrl);
+- ctrl->ops->submit_async_event(ctrl);
++
++ /*
++ * The transport drivers must guarantee AER submission here is safe by
++ * flushing ctrl async_event_work after changing the controller state
++ * from LIVE and before freeing the admin queue.
++ */
++ if (ctrl->state == NVME_CTRL_LIVE)
++ ctrl->ops->submit_async_event(ctrl);
+ }
+
+ static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 1f41cf80f827c..55f4999525037 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -1050,6 +1050,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
+ struct nvme_rdma_ctrl, err_work);
+
+ nvme_stop_keep_alive(&ctrl->ctrl);
++ flush_work(&ctrl->ctrl.async_event_work);
+ nvme_rdma_teardown_io_queues(ctrl, false);
+ nvme_start_queues(&ctrl->ctrl);
+ nvme_rdma_teardown_admin_queue(ctrl, false);
+diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
+index b7b2e811d5471..6efab7a06c5fc 100644
+--- a/drivers/parisc/ccio-dma.c
++++ b/drivers/parisc/ccio-dma.c
+@@ -1010,7 +1010,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
+ ioc->usg_calls++;
+ #endif
+
+- while(sg_dma_len(sglist) && nents--) {
++ while (nents && sg_dma_len(sglist)) {
+
+ #ifdef CCIO_COLLECT_STATS
+ ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
+@@ -1018,6 +1018,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
+ ccio_unmap_page(dev, sg_dma_address(sglist),
+ sg_dma_len(sglist), direction, 0);
+ ++sglist;
++ nents--;
+ }
+
+ DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
+diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
+index 0f19cc75cc0cf..f675dfe045118 100644
+--- a/drivers/parisc/sba_iommu.c
++++ b/drivers/parisc/sba_iommu.c
+@@ -1063,7 +1063,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
+ spin_unlock_irqrestore(&ioc->res_lock, flags);
+ #endif
+
+- while (sg_dma_len(sglist) && nents--) {
++ while (nents && sg_dma_len(sglist)) {
+
+ sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
+ direction, 0);
+@@ -1072,6 +1072,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
+ ioc->usingle_calls--; /* kluge since call is unmap_sg() */
+ #endif
+ ++sglist;
++ nents--;
+ }
+
+ DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
+diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
+index 0809ae2aa9b14..51cc985216ff3 100644
+--- a/drivers/tty/serial/8250/8250_gsc.c
++++ b/drivers/tty/serial/8250/8250_gsc.c
+@@ -26,7 +26,7 @@ static int __init serial_init_chip(struct parisc_device *dev)
+ unsigned long address;
+ int err;
+
+-#ifdef CONFIG_64BIT
++#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC)
+ if (!dev->irq && (dev->id.sversion == 0xad))
+ dev->irq = iosapic_serial_irq(dev);
+ #endif
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 1967d5fa681ad..eb2f8e84ffc9e 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -4956,6 +4956,10 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
+ lock_page(page);
+ if (!PageUptodate(page)) {
+ unlock_page(page);
++ btrfs_err(fs_info,
++ "send: IO error at offset %llu for inode %llu root %llu",
++ page_offset(page), sctx->cur_ino,
++ sctx->send_root->root_key.objectid);
+ put_page(page);
+ ret = -EIO;
+ break;
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 2a45e0d3e593c..6c492fca60c41 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -390,9 +390,12 @@ static int ext4_valid_extent_idx(struct inode *inode,
+
+ static int ext4_valid_extent_entries(struct inode *inode,
+ struct ext4_extent_header *eh,
+- int depth)
++ ext4_fsblk_t *pblk, int depth)
+ {
+ unsigned short entries;
++ ext4_lblk_t lblock = 0;
++ ext4_lblk_t prev = 0;
++
+ if (eh->eh_entries == 0)
+ return 1;
+
+@@ -403,32 +406,36 @@ static int ext4_valid_extent_entries(struct inode *inode,
+ struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
+ struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
+ ext4_fsblk_t pblock = 0;
+- ext4_lblk_t lblock = 0;
+- ext4_lblk_t prev = 0;
+- int len = 0;
+ while (entries) {
+ if (!ext4_valid_extent(inode, ext))
+ return 0;
+
+ /* Check for overlapping extents */
+ lblock = le32_to_cpu(ext->ee_block);
+- len = ext4_ext_get_actual_len(ext);
+ if ((lblock <= prev) && prev) {
+ pblock = ext4_ext_pblock(ext);
+ es->s_last_error_block = cpu_to_le64(pblock);
+ return 0;
+ }
++ prev = lblock + ext4_ext_get_actual_len(ext) - 1;
+ ext++;
+ entries--;
+- prev = lblock + len - 1;
+ }
+ } else {
+ struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
+ while (entries) {
+ if (!ext4_valid_extent_idx(inode, ext_idx))
+ return 0;
++
++ /* Check for overlapping index extents */
++ lblock = le32_to_cpu(ext_idx->ei_block);
++ if ((lblock <= prev) && prev) {
++ *pblk = ext4_idx_pblock(ext_idx);
++ return 0;
++ }
+ ext_idx++;
+ entries--;
++ prev = lblock;
+ }
+ }
+ return 1;
+@@ -462,7 +469,7 @@ static int __ext4_ext_check(const char *function, unsigned int line,
+ error_msg = "invalid eh_entries";
+ goto corrupted;
+ }
+- if (!ext4_valid_extent_entries(inode, eh, depth)) {
++ if (!ext4_valid_extent_entries(inode, eh, &pblk, depth)) {
+ error_msg = "invalid extent entries";
+ goto corrupted;
+ }
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index a968b8b4b982f..10bc04af28824 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1629,14 +1629,14 @@ no_open:
+ if (!res) {
+ inode = d_inode(dentry);
+ if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
+- !S_ISDIR(inode->i_mode))
++ !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
+ res = ERR_PTR(-ENOTDIR);
+ else if (inode && S_ISREG(inode->i_mode))
+ res = ERR_PTR(-EOPENSTALE);
+ } else if (!IS_ERR(res)) {
+ inode = d_inode(res);
+ if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
+- !S_ISDIR(inode->i_mode)) {
++ !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) {
+ dput(res);
+ res = ERR_PTR(-ENOTDIR);
+ } else if (inode && S_ISREG(inode->i_mode)) {
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 2cdd8883b7c5d..592b95ab378bd 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -787,12 +787,9 @@ int nfs_getattr(const struct path *path, struct kstat *stat,
+ goto out_no_update;
+
+ /* Flush out writes to the server in order to update c/mtime. */
+- if ((request_mask & (STATX_CTIME|STATX_MTIME)) &&
+- S_ISREG(inode->i_mode)) {
+- err = filemap_write_and_wait(inode->i_mapping);
+- if (err)
+- goto out;
+- }
++ if ((request_mask & (STATX_CTIME | STATX_MTIME)) &&
++ S_ISREG(inode->i_mode))
++ filemap_write_and_wait(inode->i_mapping);
+
+ /*
+ * We may force a getattr if the user cares about atime.
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 1d1d393f4208d..ddb379abd919d 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -687,9 +687,14 @@ int dquot_quota_sync(struct super_block *sb, int type)
+ /* This is not very clever (and fast) but currently I don't know about
+ * any other simple way of getting quota data to disk and we must get
+ * them there for userspace to be visible... */
+- if (sb->s_op->sync_fs)
+- sb->s_op->sync_fs(sb, 1);
+- sync_blockdev(sb->s_bdev);
++ if (sb->s_op->sync_fs) {
++ ret = sb->s_op->sync_fs(sb, 1);
++ if (ret)
++ return ret;
++ }
++ ret = sync_blockdev(sb->s_bdev);
++ if (ret)
++ return ret;
+
+ /*
+ * Now when everything is written we can discard the pagecache so
+diff --git a/fs/super.c b/fs/super.c
+index 9fb4553c46e63..8dc26e23f1a35 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -1404,11 +1404,9 @@ static void lockdep_sb_freeze_acquire(struct super_block *sb)
+ percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
+ }
+
+-static void sb_freeze_unlock(struct super_block *sb)
++static void sb_freeze_unlock(struct super_block *sb, int level)
+ {
+- int level;
+-
+- for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
++ for (level--; level >= 0; level--)
+ percpu_up_write(sb->s_writers.rw_sem + level);
+ }
+
+@@ -1479,7 +1477,14 @@ int freeze_super(struct super_block *sb)
+ sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
+
+ /* All writers are done so after syncing there won't be dirty data */
+- sync_filesystem(sb);
++ ret = sync_filesystem(sb);
++ if (ret) {
++ sb->s_writers.frozen = SB_UNFROZEN;
++ sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
++ wake_up(&sb->s_writers.wait_unfrozen);
++ deactivate_locked_super(sb);
++ return ret;
++ }
+
+ /* Now wait for internal filesystem counter */
+ sb->s_writers.frozen = SB_FREEZE_FS;
+@@ -1491,7 +1496,7 @@ int freeze_super(struct super_block *sb)
+ printk(KERN_ERR
+ "VFS:Filesystem freeze failed\n");
+ sb->s_writers.frozen = SB_UNFROZEN;
+- sb_freeze_unlock(sb);
++ sb_freeze_unlock(sb, SB_FREEZE_FS);
+ wake_up(&sb->s_writers.wait_unfrozen);
+ deactivate_locked_super(sb);
+ return ret;
+@@ -1542,7 +1547,7 @@ static int thaw_super_locked(struct super_block *sb)
+ }
+
+ sb->s_writers.frozen = SB_UNFROZEN;
+- sb_freeze_unlock(sb);
++ sb_freeze_unlock(sb, SB_FREEZE_FS);
+ out:
+ wake_up(&sb->s_writers.wait_unfrozen);
+ deactivate_locked_super(sb);
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 761d0f85c4a50..f92d5ae6d04e7 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1389,7 +1389,6 @@ extern struct pid *cad_pid;
+ #define PF_MEMALLOC 0x00000800 /* Allocating memory */
+ #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
+ #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
+-#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */
+ #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
+ #define PF_FROZEN 0x00010000 /* Frozen for system suspend */
+ #define PF_KSWAPD 0x00020000 /* I am kswapd */
+diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
+index fc3111515f5cb..732bc3b4606bf 100644
+--- a/include/net/bond_3ad.h
++++ b/include/net/bond_3ad.h
+@@ -265,7 +265,7 @@ struct ad_system {
+
+ struct ad_bond_info {
+ struct ad_system system; /* 802.3ad system structure */
+- u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
++ atomic_t agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
+ u16 aggregator_identifier;
+ };
+
+diff --git a/kernel/async.c b/kernel/async.c
+index a893d6170944f..4bf1b00a28d86 100644
+--- a/kernel/async.c
++++ b/kernel/async.c
+@@ -191,9 +191,6 @@ static async_cookie_t __async_schedule(async_func_t func, void *data, struct asy
+ atomic_inc(&entry_count);
+ spin_unlock_irqrestore(&async_lock, flags);
+
+- /* mark that this task has queued an async job, used by module init */
+- current->flags |= PF_USED_ASYNC;
+-
+ /* schedule for execution */
+ queue_work(system_unbound_wq, &entry->work);
+
+diff --git a/kernel/module.c b/kernel/module.c
+index 68637e661d75c..42a604401c4dd 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -3526,12 +3526,6 @@ static noinline int do_init_module(struct module *mod)
+ }
+ freeinit->module_init = mod->init_layout.base;
+
+- /*
+- * We want to find out whether @mod uses async during init. Clear
+- * PF_USED_ASYNC. async_schedule*() will set it.
+- */
+- current->flags &= ~PF_USED_ASYNC;
+-
+ do_mod_ctors(mod);
+ /* Start the module */
+ if (mod->init != NULL)
+@@ -3557,22 +3551,13 @@ static noinline int do_init_module(struct module *mod)
+
+ /*
+ * We need to finish all async code before the module init sequence
+- * is done. This has potential to deadlock. For example, a newly
+- * detected block device can trigger request_module() of the
+- * default iosched from async probing task. Once userland helper
+- * reaches here, async_synchronize_full() will wait on the async
+- * task waiting on request_module() and deadlock.
+- *
+- * This deadlock is avoided by perfomring async_synchronize_full()
+- * iff module init queued any async jobs. This isn't a full
+- * solution as it will deadlock the same if module loading from
+- * async jobs nests more than once; however, due to the various
+- * constraints, this hack seems to be the best option for now.
+- * Please refer to the following thread for details.
++ * is done. This has potential to deadlock if synchronous module
++ * loading is requested from async (which is not allowed!).
+ *
+- * http://thread.gmane.org/gmane.linux.kernel/1420814
++ * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous
++ * request_module() from async workers") for more details.
+ */
+- if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
++ if (!mod->async_probe_requested)
+ async_synchronize_full();
+
+ ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 17e337a22c239..19a6b088f1e72 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -232,6 +232,10 @@ __setup("trace_clock=", set_trace_boot_clock);
+
+ static int __init set_tracepoint_printk(char *str)
+ {
++ /* Ignore the "tp_printk_stop_on_boot" param */
++ if (*str == '_')
++ return 0;
++
+ if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
+ tracepoint_printk = 1;
+ return 1;
+diff --git a/kernel/tsacct.c b/kernel/tsacct.c
+index 370724b453918..8d7c6d3f1daa7 100644
+--- a/kernel/tsacct.c
++++ b/kernel/tsacct.c
+@@ -46,11 +46,10 @@ void bacct_add_tsk(struct user_namespace *user_ns,
+ /* Convert to seconds for btime */
+ do_div(delta, USEC_PER_SEC);
+ stats->ac_btime = get_seconds() - delta;
+- if (thread_group_leader(tsk)) {
++ if (tsk->flags & PF_EXITING)
+ stats->ac_exitcode = tsk->exit_code;
+- if (tsk->flags & PF_FORKNOEXEC)
+- stats->ac_flag |= AFORK;
+- }
++ if (thread_group_leader(tsk) && (tsk->flags & PF_FORKNOEXEC))
++ stats->ac_flag |= AFORK;
+ if (tsk->flags & PF_SUPERPRIV)
+ stats->ac_flag |= ASU;
+ if (tsk->flags & PF_DUMPCORE)
+diff --git a/lib/iov_iter.c b/lib/iov_iter.c
+index 39e6e978029c1..4391962230319 100644
+--- a/lib/iov_iter.c
++++ b/lib/iov_iter.c
+@@ -393,6 +393,7 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by
+ return 0;
+ pipe->nrbufs++;
+ buf->ops = &page_cache_pipe_buf_ops;
++ buf->flags = 0;
+ get_page(buf->page = page);
+ buf->offset = offset;
+ buf->len = bytes;
+@@ -517,6 +518,7 @@ static size_t push_pipe(struct iov_iter *i, size_t size,
+ break;
+ pipe->nrbufs++;
+ pipe->bufs[idx].ops = &default_pipe_buf_ops;
++ pipe->bufs[idx].flags = 0;
+ pipe->bufs[idx].page = page;
+ pipe->bufs[idx].offset = 0;
+ if (left <= PAGE_SIZE) {
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index 567fdfd9678d5..a2bf5e4e9fbee 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -80,6 +80,7 @@ static void ax25_kill_by_device(struct net_device *dev)
+ {
+ ax25_dev *ax25_dev;
+ ax25_cb *s;
++ struct sock *sk;
+
+ if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
+ return;
+@@ -88,13 +89,15 @@ static void ax25_kill_by_device(struct net_device *dev)
+ again:
+ ax25_for_each(s, &ax25_list) {
+ if (s->ax25_dev == ax25_dev) {
++ sk = s->sk;
++ sock_hold(sk);
+ spin_unlock_bh(&ax25_list_lock);
+- lock_sock(s->sk);
++ lock_sock(sk);
+ s->ax25_dev = NULL;
+- release_sock(s->sk);
++ release_sock(sk);
+ ax25_disconnect(s, ENETUNREACH);
+ spin_lock_bh(&ax25_list_lock);
+-
++ sock_put(sk);
+ /* The entry could have been deleted from the
+ * list meanwhile and thus the next pointer is
+ * no longer valid. Play it safe and restart
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index 3978a5e8d261c..2ed6000126408 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -219,13 +219,17 @@ static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi,
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(new_stat, &hw_stats_list, list) {
++ struct net_device *dev;
++
+ /*
+ * only add a note to our monitor buffer if:
+ * 1) this is the dev we received on
+ * 2) its after the last_rx delta
+ * 3) our rx_dropped count has gone up
+ */
+- if ((new_stat->dev == napi->dev) &&
++ /* Paired with WRITE_ONCE() in dropmon_net_event() */
++ dev = READ_ONCE(new_stat->dev);
++ if ((dev == napi->dev) &&
+ (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) &&
+ (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) {
+ trace_drop_common(NULL, NULL);
+@@ -340,7 +344,10 @@ static int dropmon_net_event(struct notifier_block *ev_block,
+ mutex_lock(&trace_state_mutex);
+ list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
+ if (new_stat->dev == dev) {
+- new_stat->dev = NULL;
++
++ /* Paired with READ_ONCE() in trace_napi_poll_hit() */
++ WRITE_ONCE(new_stat->dev, NULL);
++
+ if (trace_state == TRACE_OFF) {
+ list_del_rcu(&new_stat->list);
+ kfree_rcu(new_stat, rcu);
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 276442443d322..28d171e6e10b9 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -177,16 +177,23 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
+ struct sock *sk = NULL;
+ struct inet_sock *isk;
+ struct hlist_nulls_node *hnode;
+- int dif = skb->dev->ifindex;
++ int dif, sdif;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
++ dif = inet_iif(skb);
++ sdif = inet_sdif(skb);
+ pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
+ (int)ident, &ip_hdr(skb)->daddr, dif);
+ #if IS_ENABLED(CONFIG_IPV6)
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
++ dif = inet6_iif(skb);
++ sdif = inet6_sdif(skb);
+ pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n",
+ (int)ident, &ipv6_hdr(skb)->daddr, dif);
+ #endif
++ } else {
++ pr_err("ping: protocol(%x) is not supported\n", ntohs(skb->protocol));
++ return NULL;
+ }
+
+ read_lock_bh(&ping_table.lock);
+@@ -226,7 +233,7 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
+ }
+
+ if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
+- sk->sk_bound_dev_if != inet_sdif(skb))
++ sk->sk_bound_dev_if != sdif)
+ continue;
+
+ sock_hold(sk);
+diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
+index 1e5e2e4be0b27..e85b5f57d3e92 100644
+--- a/net/ipv4/xfrm4_policy.c
++++ b/net/ipv4/xfrm4_policy.c
+@@ -17,6 +17,7 @@
+ #include <net/xfrm.h>
+ #include <net/ip.h>
+ #include <net/l3mdev.h>
++#include <net/inet_ecn.h>
+
+ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
+ int tos, int oif,
+@@ -126,7 +127,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
+ fl4->flowi4_proto = iph->protocol;
+ fl4->daddr = reverse ? iph->saddr : iph->daddr;
+ fl4->saddr = reverse ? iph->daddr : iph->saddr;
+- fl4->flowi4_tos = iph->tos;
++ fl4->flowi4_tos = iph->tos & ~INET_ECN_MASK;
+
+ if (!ip_is_fragment(iph)) {
+ switch (iph->protocol) {
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index a937d4f75613f..8cb62805fd684 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -394,6 +394,15 @@ static int sctp_packet(struct nf_conn *ct,
+ pr_debug("Setting vtag %x for dir %d\n",
+ ih->init_tag, !dir);
+ ct->proto.sctp.vtag[!dir] = ih->init_tag;
++
++ /* don't renew timeout on init retransmit so
++ * port reuse by client or NAT middlebox cannot
++ * keep entry alive indefinitely (incl. nat info).
++ */
++ if (new_state == SCTP_CONNTRACK_CLOSED &&
++ old_state == SCTP_CONNTRACK_CLOSED &&
++ nf_ct_is_confirmed(ct))
++ ignore = true;
+ }
+
+ ct->proto.sctp.state = new_state;
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index 26710b297dcbb..ad0773b20d831 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -609,15 +609,24 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
+ restart_act_graph:
+ for (i = 0; i < nr_actions; i++) {
+ const struct tc_action *a = actions[i];
++ int repeat_ttl;
+
+ if (jmp_prgcnt > 0) {
+ jmp_prgcnt -= 1;
+ continue;
+ }
++
++ repeat_ttl = 32;
+ repeat:
+ ret = a->ops->act(skb, a, res);
+- if (ret == TC_ACT_REPEAT)
+- goto repeat; /* we need a ttl - JHS */
++
++ if (unlikely(ret == TC_ACT_REPEAT)) {
++ if (--repeat_ttl != 0)
++ goto repeat;
++ /* suspicious opcode, stop pipeline */
++ net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
++ return TC_ACT_OK;
++ }
+
+ if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
+ jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 37329e11dc3cc..22931a5f62bc6 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1230,6 +1230,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
+ sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
+ sock->state = SS_UNCONNECTED;
+ vsock_transport_cancel_pkt(vsk);
++ vsock_remove_connected(vsk);
+ goto out_wait;
+ } else if (timeout == 0) {
+ err = -ETIMEDOUT;
+diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
+index 486e135d3e30a..65df6141397fe 100644
+--- a/scripts/Makefile.extrawarn
++++ b/scripts/Makefile.extrawarn
+@@ -73,5 +73,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, sign-compare)
+ KBUILD_CFLAGS += $(call cc-disable-warning, format-zero-length)
+ KBUILD_CFLAGS += $(call cc-disable-warning, uninitialized)
+ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
++KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access)
+ endif
+ endif
+diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c
+index 389814b02d06b..8c7e51a6273cc 100644
+--- a/scripts/kconfig/preprocess.c
++++ b/scripts/kconfig/preprocess.c
+@@ -138,7 +138,7 @@ static char *do_lineno(int argc, char *argv[])
+ static char *do_shell(int argc, char *argv[])
+ {
+ FILE *p;
+- char buf[256];
++ char buf[4096];
+ char *cmd;
+ size_t nread;
+ int i;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 7d4b6c31dfe70..686f0fed6f6e4 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1674,6 +1674,7 @@ static struct snd_pci_quirk probe_mask_list[] = {
+ /* forced codec slots */
+ SND_PCI_QUIRK(0x1043, 0x1262, "ASUS W5Fm", 0x103),
+ SND_PCI_QUIRK(0x1046, 0x1262, "ASUS W5F", 0x103),
++ SND_PCI_QUIRK(0x1558, 0x0351, "Schenker Dock 15", 0x105),
+ /* WinFast VP200 H (Teradici) user reported broken communication */
+ SND_PCI_QUIRK(0x3a21, 0x040d, "WinFast VP200 H", 0x101),
+ {}
+@@ -1859,8 +1860,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
+
+ assign_position_fix(chip, check_position_fix(chip, position_fix[dev]));
+
+- check_probe_mask(chip, dev);
+-
+ if (single_cmd < 0) /* allow fallback to single_cmd at errors */
+ chip->fallback_to_single_cmd = 1;
+ else /* explicitly set to single_cmd or not */
+@@ -1889,6 +1888,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
+ chip->bus.needs_damn_long_delay = 1;
+ }
+
++ check_probe_mask(chip, dev);
++
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
+ dev_err(card->dev, "Error creating device [card]!\n");
+diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
+index ef8fd331526b0..f5dcd625e4355 100644
+--- a/sound/soc/soc-ops.c
++++ b/sound/soc/soc-ops.c
+@@ -314,7 +314,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
+ unsigned int sign_bit = mc->sign_bit;
+ unsigned int mask = (1 << fls(max)) - 1;
+ unsigned int invert = mc->invert;
+- int err;
++ int err, ret;
+ bool type_2r = false;
+ unsigned int val2 = 0;
+ unsigned int val, val_mask;
+@@ -356,12 +356,18 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
+ err = snd_soc_component_update_bits(component, reg, val_mask, val);
+ if (err < 0)
+ return err;
++ ret = err;
+
+- if (type_2r)
++ if (type_2r) {
+ err = snd_soc_component_update_bits(component, reg2, val_mask,
+- val2);
++ val2);
++ /* Don't discard any error code or drop change flag */
++ if (ret == 0 || err < 0) {
++ ret = err;
++ }
++ }
+
+- return err;
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(snd_soc_put_volsw);
+
+@@ -517,7 +523,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
+ unsigned int mask = (1 << fls(max)) - 1;
+ unsigned int invert = mc->invert;
+ unsigned int val, val_mask;
+- int ret;
++ int err, ret;
+
+ if (invert)
+ val = (max - ucontrol->value.integer.value[0]) & mask;
+@@ -526,9 +532,10 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
+ val_mask = mask << shift;
+ val = val << shift;
+
+- ret = snd_soc_component_update_bits(component, reg, val_mask, val);
+- if (ret < 0)
+- return ret;
++ err = snd_soc_component_update_bits(component, reg, val_mask, val);
++ if (err < 0)
++ return err;
++ ret = err;
+
+ if (snd_soc_volsw_is_stereo(mc)) {
+ if (invert)
+@@ -538,8 +545,12 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
+ val_mask = mask << shift;
+ val = val << shift;
+
+- ret = snd_soc_component_update_bits(component, rreg, val_mask,
++ err = snd_soc_component_update_bits(component, rreg, val_mask,
+ val);
++ /* Don't discard any error code or drop change flag */
++ if (ret == 0 || err < 0) {
++ ret = err;
++ }
+ }
+
+ return ret;
+diff --git a/tools/lib/subcmd/subcmd-util.h b/tools/lib/subcmd/subcmd-util.h
+index 794a375dad360..b2aec04fce8f6 100644
+--- a/tools/lib/subcmd/subcmd-util.h
++++ b/tools/lib/subcmd/subcmd-util.h
+@@ -50,15 +50,8 @@ static NORETURN inline void die(const char *err, ...)
+ static inline void *xrealloc(void *ptr, size_t size)
+ {
+ void *ret = realloc(ptr, size);
+- if (!ret && !size)
+- ret = realloc(ptr, 1);
+- if (!ret) {
+- ret = realloc(ptr, size);
+- if (!ret && !size)
+- ret = realloc(ptr, 1);
+- if (!ret)
+- die("Out of memory, realloc failed");
+- }
++ if (!ret)
++ die("Out of memory, realloc failed");
+ return ret;
+ }
+
+diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh
+index 232e958ec4547..b0b91d9b0dc21 100755
+--- a/tools/testing/selftests/zram/zram.sh
++++ b/tools/testing/selftests/zram/zram.sh
+@@ -2,9 +2,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ TCID="zram.sh"
+
+-# Kselftest framework requirement - SKIP code is 4.
+-ksft_skip=4
+-
+ . ./zram_lib.sh
+
+ run_zram () {
+@@ -18,14 +15,4 @@ echo ""
+
+ check_prereqs
+
+-# check zram module exists
+-MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko
+-if [ -f $MODULE_PATH ]; then
+- run_zram
+-elif [ -b /dev/zram0 ]; then
+- run_zram
+-else
+- echo "$TCID : No zram.ko module or /dev/zram0 device file not found"
+- echo "$TCID : CONFIG_ZRAM is not set"
+- exit $ksft_skip
+-fi
++run_zram
+diff --git a/tools/testing/selftests/zram/zram01.sh b/tools/testing/selftests/zram/zram01.sh
+index b9566a6478a9c..8abc9965089d1 100755
+--- a/tools/testing/selftests/zram/zram01.sh
++++ b/tools/testing/selftests/zram/zram01.sh
+@@ -42,9 +42,7 @@ zram_algs="lzo"
+
+ zram_fill_fs()
+ {
+- local mem_free0=$(free -m | awk 'NR==2 {print $4}')
+-
+- for i in $(seq 0 $(($dev_num - 1))); do
++ for i in $(seq $dev_start $dev_end); do
+ echo "fill zram$i..."
+ local b=0
+ while [ true ]; do
+@@ -54,29 +52,17 @@ zram_fill_fs()
+ b=$(($b + 1))
+ done
+ echo "zram$i can be filled with '$b' KB"
+- done
+
+- local mem_free1=$(free -m | awk 'NR==2 {print $4}')
+- local used_mem=$(($mem_free0 - $mem_free1))
++ local mem_used_total=`awk '{print $3}' "/sys/block/zram$i/mm_stat"`
++ local v=$((100 * 1024 * $b / $mem_used_total))
++ if [ "$v" -lt 100 ]; then
++ echo "FAIL compression ratio: 0.$v:1"
++ ERR_CODE=-1
++ return
++ fi
+
+- local total_size=0
+- for sm in $zram_sizes; do
+- local s=$(echo $sm | sed 's/M//')
+- total_size=$(($total_size + $s))
++ echo "zram compression ratio: $(echo "scale=2; $v / 100 " | bc):1: OK"
+ done
+-
+- echo "zram used ${used_mem}M, zram disk sizes ${total_size}M"
+-
+- local v=$((100 * $total_size / $used_mem))
+-
+- if [ "$v" -lt 100 ]; then
+- echo "FAIL compression ratio: 0.$v:1"
+- ERR_CODE=-1
+- zram_cleanup
+- return
+- fi
+-
+- echo "zram compression ratio: $(echo "scale=2; $v / 100 " | bc):1: OK"
+ }
+
+ check_prereqs
+@@ -90,7 +76,6 @@ zram_mount
+
+ zram_fill_fs
+ zram_cleanup
+-zram_unload
+
+ if [ $ERR_CODE -ne 0 ]; then
+ echo "$TCID : [FAIL]"
+diff --git a/tools/testing/selftests/zram/zram02.sh b/tools/testing/selftests/zram/zram02.sh
+index 74569b883737f..3768cfd2e5f83 100755
+--- a/tools/testing/selftests/zram/zram02.sh
++++ b/tools/testing/selftests/zram/zram02.sh
+@@ -45,7 +45,6 @@ zram_set_memlimit
+ zram_makeswap
+ zram_swapoff
+ zram_cleanup
+-zram_unload
+
+ if [ $ERR_CODE -ne 0 ]; then
+ echo "$TCID : [FAIL]"
+diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh
+index 9e73a4fb9b0aa..130d193cbd727 100755
+--- a/tools/testing/selftests/zram/zram_lib.sh
++++ b/tools/testing/selftests/zram/zram_lib.sh
+@@ -14,12 +14,17 @@
+ # Author: Alexey Kodanev <alexey.kodanev@oracle.com>
+ # Modified: Naresh Kamboju <naresh.kamboju@linaro.org>
+
+-MODULE=0
+ dev_makeswap=-1
+ dev_mounted=-1
+-
++dev_start=0
++dev_end=-1
++module_load=-1
++sys_control=-1
+ # Kselftest framework requirement - SKIP code is 4.
+ ksft_skip=4
++kernel_version=`uname -r | cut -d'.' -f1,2`
++kernel_major=${kernel_version%.*}
++kernel_minor=${kernel_version#*.}
+
+ trap INT
+
+@@ -34,68 +39,104 @@ check_prereqs()
+ fi
+ }
+
++kernel_gte()
++{
++ major=${1%.*}
++ minor=${1#*.}
++
++ if [ $kernel_major -gt $major ]; then
++ return 0
++ elif [[ $kernel_major -eq $major && $kernel_minor -ge $minor ]]; then
++ return 0
++ fi
++
++ return 1
++}
++
+ zram_cleanup()
+ {
+ echo "zram cleanup"
+ local i=
+- for i in $(seq 0 $dev_makeswap); do
++ for i in $(seq $dev_start $dev_makeswap); do
+ swapoff /dev/zram$i
+ done
+
+- for i in $(seq 0 $dev_mounted); do
++ for i in $(seq $dev_start $dev_mounted); do
+ umount /dev/zram$i
+ done
+
+- for i in $(seq 0 $(($dev_num - 1))); do
++ for i in $(seq $dev_start $dev_end); do
+ echo 1 > /sys/block/zram${i}/reset
+ rm -rf zram$i
+ done
+
+-}
++ if [ $sys_control -eq 1 ]; then
++ for i in $(seq $dev_start $dev_end); do
++ echo $i > /sys/class/zram-control/hot_remove
++ done
++ fi
+
+-zram_unload()
+-{
+- if [ $MODULE -ne 0 ] ; then
+- echo "zram rmmod zram"
++ if [ $module_load -eq 1 ]; then
+ rmmod zram > /dev/null 2>&1
+ fi
+ }
+
+ zram_load()
+ {
+- # check zram module exists
+- MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko
+- if [ -f $MODULE_PATH ]; then
+- MODULE=1
+- echo "create '$dev_num' zram device(s)"
+- modprobe zram num_devices=$dev_num
+- if [ $? -ne 0 ]; then
+- echo "failed to insert zram module"
+- exit 1
+- fi
+-
+- dev_num_created=$(ls /dev/zram* | wc -w)
++ echo "create '$dev_num' zram device(s)"
++
++ # zram module loaded, new kernel
++ if [ -d "/sys/class/zram-control" ]; then
++ echo "zram modules already loaded, kernel supports" \
++ "zram-control interface"
++ dev_start=$(ls /dev/zram* | wc -w)
++ dev_end=$(($dev_start + $dev_num - 1))
++ sys_control=1
++
++ for i in $(seq $dev_start $dev_end); do
++ cat /sys/class/zram-control/hot_add > /dev/null
++ done
++
++ echo "all zram devices (/dev/zram$dev_start~$dev_end" \
++ "successfully created"
++ return 0
++ fi
+
+- if [ "$dev_num_created" -ne "$dev_num" ]; then
+- echo "unexpected num of devices: $dev_num_created"
+- ERR_CODE=-1
++ # detect old kernel or built-in
++ modprobe zram num_devices=$dev_num
++ if [ ! -d "/sys/class/zram-control" ]; then
++ if grep -q '^zram' /proc/modules; then
++ rmmod zram > /dev/null 2>&1
++ if [ $? -ne 0 ]; then
++ echo "zram module is being used on old kernel" \
++ "without zram-control interface"
++ exit $ksft_skip
++ fi
+ else
+- echo "zram load module successful"
++ echo "test needs CONFIG_ZRAM=m on old kernel without" \
++ "zram-control interface"
++ exit $ksft_skip
+ fi
+- elif [ -b /dev/zram0 ]; then
+- echo "/dev/zram0 device file found: OK"
+- else
+- echo "ERROR: No zram.ko module or no /dev/zram0 device found"
+- echo "$TCID : CONFIG_ZRAM is not set"
+- exit 1
++ modprobe zram num_devices=$dev_num
+ fi
++
++ module_load=1
++ dev_end=$(($dev_num - 1))
++ echo "all zram devices (/dev/zram0~$dev_end) successfully created"
+ }
+
+ zram_max_streams()
+ {
+ echo "set max_comp_streams to zram device(s)"
+
+- local i=0
++ kernel_gte 4.7
++ if [ $? -eq 0 ]; then
++ echo "The device attribute max_comp_streams was"\
++ "deprecated in 4.7"
++ return 0
++ fi
++
++ local i=$dev_start
+ for max_s in $zram_max_streams; do
+ local sys_path="/sys/block/zram${i}/max_comp_streams"
+ echo $max_s > $sys_path || \
+@@ -107,7 +148,7 @@ zram_max_streams()
+ echo "FAIL can't set max_streams '$max_s', get $max_stream"
+
+ i=$(($i + 1))
+- echo "$sys_path = '$max_streams' ($i/$dev_num)"
++ echo "$sys_path = '$max_streams'"
+ done
+
+ echo "zram max streams: OK"
+@@ -117,15 +158,16 @@ zram_compress_alg()
+ {
+ echo "test that we can set compression algorithm"
+
+- local algs=$(cat /sys/block/zram0/comp_algorithm)
++ local i=$dev_start
++ local algs=$(cat /sys/block/zram${i}/comp_algorithm)
+ echo "supported algs: $algs"
+- local i=0
++
+ for alg in $zram_algs; do
+ local sys_path="/sys/block/zram${i}/comp_algorithm"
+ echo "$alg" > $sys_path || \
+ echo "FAIL can't set '$alg' to $sys_path"
+ i=$(($i + 1))
+- echo "$sys_path = '$alg' ($i/$dev_num)"
++ echo "$sys_path = '$alg'"
+ done
+
+ echo "zram set compression algorithm: OK"
+@@ -134,14 +176,14 @@ zram_compress_alg()
+ zram_set_disksizes()
+ {
+ echo "set disk size to zram device(s)"
+- local i=0
++ local i=$dev_start
+ for ds in $zram_sizes; do
+ local sys_path="/sys/block/zram${i}/disksize"
+ echo "$ds" > $sys_path || \
+ echo "FAIL can't set '$ds' to $sys_path"
+
+ i=$(($i + 1))
+- echo "$sys_path = '$ds' ($i/$dev_num)"
++ echo "$sys_path = '$ds'"
+ done
+
+ echo "zram set disksizes: OK"
+@@ -151,14 +193,14 @@ zram_set_memlimit()
+ {
+ echo "set memory limit to zram device(s)"
+
+- local i=0
++ local i=$dev_start
+ for ds in $zram_mem_limits; do
+ local sys_path="/sys/block/zram${i}/mem_limit"
+ echo "$ds" > $sys_path || \
+ echo "FAIL can't set '$ds' to $sys_path"
+
+ i=$(($i + 1))
+- echo "$sys_path = '$ds' ($i/$dev_num)"
++ echo "$sys_path = '$ds'"
+ done
+
+ echo "zram set memory limit: OK"
+@@ -167,8 +209,8 @@ zram_set_memlimit()
+ zram_makeswap()
+ {
+ echo "make swap with zram device(s)"
+- local i=0
+- for i in $(seq 0 $(($dev_num - 1))); do
++ local i=$dev_start
++ for i in $(seq $dev_start $dev_end); do
+ mkswap /dev/zram$i > err.log 2>&1
+ if [ $? -ne 0 ]; then
+ cat err.log
+@@ -191,7 +233,7 @@ zram_makeswap()
+ zram_swapoff()
+ {
+ local i=
+- for i in $(seq 0 $dev_makeswap); do
++ for i in $(seq $dev_start $dev_end); do
+ swapoff /dev/zram$i > err.log 2>&1
+ if [ $? -ne 0 ]; then
+ cat err.log
+@@ -205,7 +247,7 @@ zram_swapoff()
+
+ zram_makefs()
+ {
+- local i=0
++ local i=$dev_start
+ for fs in $zram_filesystems; do
+ # if requested fs not supported default it to ext2
+ which mkfs.$fs > /dev/null 2>&1 || fs=ext2
+@@ -224,7 +266,7 @@ zram_makefs()
+ zram_mount()
+ {
+ local i=0
+- for i in $(seq 0 $(($dev_num - 1))); do
++ for i in $(seq $dev_start $dev_end); do
+ echo "mount /dev/zram$i"
+ mkdir zram$i
+ mount /dev/zram$i zram$i > /dev/null || \