diff options
author | Mike Pagano <mpagano@gentoo.org> | 2021-03-11 09:08:06 -0500 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2021-03-11 09:08:06 -0500 |
commit | b63a5b91be775cc41ad9d0017ff1d80514d42771 (patch) | |
tree | 6bce29707051db8325fb3bdcbe8850ff404ce4df | |
parent | Linux patch 5.4.104 (diff) | |
download | linux-patches-b63a5b91be775cc41ad9d0017ff1d80514d42771.tar.gz linux-patches-b63a5b91be775cc41ad9d0017ff1d80514d42771.tar.bz2 linux-patches-b63a5b91be775cc41ad9d0017ff1d80514d42771.zip |
Linux patch 5.4.1055.4-108
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1104_linux-5.4.105.patch | 1283 |
2 files changed, 1287 insertions, 0 deletions
diff --git a/0000_README b/0000_README index af6a29b7..3db6e30d 100644 --- a/0000_README +++ b/0000_README @@ -459,6 +459,10 @@ Patch: 1103_linux-5.4.104.patch From: http://www.kernel.org Desc: Linux 5.4.104 +Patch: 1104_linux-5.4.105.patch +From: http://www.kernel.org +Desc: Linux 5.4.105 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1104_linux-5.4.105.patch b/1104_linux-5.4.105.patch new file mode 100644 index 00000000..d24dd8b0 --- /dev/null +++ b/1104_linux-5.4.105.patch @@ -0,0 +1,1283 @@ +diff --git a/Makefile b/Makefile +index e94dcf2d77f55..e27d031f3241e 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 104 ++SUBLEVEL = 105 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h +index 8def0e3d690fd..b0b9bb31c3364 100644 +--- a/drivers/acpi/acpica/acobject.h ++++ b/drivers/acpi/acpica/acobject.h +@@ -283,6 +283,7 @@ struct acpi_object_addr_handler { + acpi_adr_space_handler handler; + struct acpi_namespace_node *node; /* Parent device */ + void *context; ++ acpi_mutex context_mutex; + acpi_adr_space_setup setup; + union acpi_operand_object *region_list; /* Regions using this handler */ + union acpi_operand_object *next; +diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c +index 3ef4e27995f0d..78550f5004c9e 100644 +--- a/drivers/acpi/acpica/evhandler.c ++++ b/drivers/acpi/acpica/evhandler.c +@@ -489,6 +489,13 @@ acpi_ev_install_space_handler(struct acpi_namespace_node *node, + + /* Init handler obj */ + ++ status = ++ acpi_os_create_mutex(&handler_obj->address_space.context_mutex); ++ if (ACPI_FAILURE(status)) { ++ acpi_ut_remove_reference(handler_obj); ++ goto unlock_and_exit; ++ } ++ + handler_obj->address_space.space_id = (u8)space_id; + handler_obj->address_space.handler_flags = flags; + handler_obj->address_space.region_list = NULL; +diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c +index 45dc797df05de..50782033012b2 100644 +--- a/drivers/acpi/acpica/evregion.c ++++ b/drivers/acpi/acpica/evregion.c +@@ -111,6 +111,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, + union acpi_operand_object *region_obj2; + void *region_context = NULL; + struct acpi_connection_info *context; ++ acpi_mutex context_mutex; ++ u8 context_locked; + acpi_physical_address address; + + ACPI_FUNCTION_TRACE(ev_address_space_dispatch); +@@ -135,6 +137,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, + } + + context = handler_desc->address_space.context; ++ context_mutex = handler_desc->address_space.context_mutex; ++ context_locked = FALSE; + + /* + * It may be the case that the region has never been initialized. +@@ -203,6 +207,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, + handler = handler_desc->address_space.handler; + address = (region_obj->region.address + region_offset); + ++ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, ++ "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", ++ ®ion_obj->region.handler->address_space, handler, ++ ACPI_FORMAT_UINT64(address), ++ acpi_ut_get_region_name(region_obj->region. ++ space_id))); ++ ++ if (!(handler_desc->address_space.handler_flags & ++ ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { ++ /* ++ * For handlers other than the default (supplied) handlers, we must ++ * exit the interpreter because the handler *might* block -- we don't ++ * know what it will do, so we can't hold the lock on the interpreter. ++ */ ++ acpi_ex_exit_interpreter(); ++ } ++ + /* + * Special handling for generic_serial_bus and general_purpose_io: + * There are three extra parameters that must be passed to the +@@ -211,6 +232,11 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, + * 2) Length of the above buffer + * 3) Actual access length from the access_as() op + * ++ * Since we pass these extra parameters via the context, which is ++ * shared between threads, we must lock the context to avoid these ++ * parameters being changed from another thread before the handler ++ * has completed running. ++ * + * In addition, for general_purpose_io, the Address and bit_width fields + * are defined as follows: + * 1) Address is the pin number index of the field (bit offset from +@@ -220,6 +246,14 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, + if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) && + context && field_obj) { + ++ status = ++ acpi_os_acquire_mutex(context_mutex, ACPI_WAIT_FOREVER); ++ if (ACPI_FAILURE(status)) { ++ goto re_enter_interpreter; ++ } ++ ++ context_locked = TRUE; ++ + /* Get the Connection (resource_template) buffer */ + + context->connection = field_obj->field.resource_buffer; +@@ -229,6 +263,14 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, + if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) && + context && field_obj) { + ++ status = ++ acpi_os_acquire_mutex(context_mutex, ACPI_WAIT_FOREVER); ++ if (ACPI_FAILURE(status)) { ++ goto re_enter_interpreter; ++ } ++ ++ context_locked = TRUE; ++ + /* Get the Connection (resource_template) buffer */ + + context->connection = field_obj->field.resource_buffer; +@@ -238,28 +280,15 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, + bit_width = field_obj->field.bit_length; + } + +- ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, +- "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", +- ®ion_obj->region.handler->address_space, handler, +- ACPI_FORMAT_UINT64(address), +- acpi_ut_get_region_name(region_obj->region. +- space_id))); +- +- if (!(handler_desc->address_space.handler_flags & +- ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { +- /* +- * For handlers other than the default (supplied) handlers, we must +- * exit the interpreter because the handler *might* block -- we don't +- * know what it will do, so we can't hold the lock on the interpreter. +- */ +- acpi_ex_exit_interpreter(); +- } +- + /* Call the handler */ + + status = handler(function, address, bit_width, value, context, + region_obj2->extra.region_context); + ++ if (context_locked) { ++ acpi_os_release_mutex(context_mutex); ++ } ++ + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]", + acpi_ut_get_region_name(region_obj->region. +@@ -276,6 +305,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, + } + } + ++re_enter_interpreter: + if (!(handler_desc->address_space.handler_flags & + ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { + /* +diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c +index 47265b073e6ff..6e0d2a98c4ade 100644 +--- a/drivers/acpi/acpica/evxfregn.c ++++ b/drivers/acpi/acpica/evxfregn.c +@@ -201,6 +201,8 @@ acpi_remove_address_space_handler(acpi_handle device, + + /* Now we can delete the handler object */ + ++ acpi_os_release_mutex(handler_obj->address_space. ++ context_mutex); + acpi_ut_remove_reference(handler_obj); + goto unlock_and_exit; + } +diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c +index 55af78b55c513..301ffe5b8feb0 100644 +--- a/drivers/acpi/video_detect.c ++++ b/drivers/acpi/video_detect.c +@@ -143,6 +143,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = { + }, + { + .callback = video_detect_force_vendor, ++ .ident = "GIGABYTE GB-BXBT-2807", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "GB-BXBT-2807"), ++ }, ++ }, ++ { + .ident = "Sony VPCEH3U1E", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), +diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +index c8fb21cc0d6ff..f84049119f1c1 100644 +--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +@@ -581,8 +581,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu) + if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) + gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); + +- gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100); +- + /* Enable USE_RETENTION_FLOPS */ + gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000); + +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 33183933337af..d004f5645b30c 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -365,6 +365,7 @@ + #define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803 + #define USB_DEVICE_ID_DRAGONRISE_GAMECUBE1 0x1843 + #define USB_DEVICE_ID_DRAGONRISE_GAMECUBE2 0x1844 ++#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE3 0x1846 + + #define USB_VENDOR_ID_DWAV 0x0eef + #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001 +@@ -640,6 +641,8 @@ + #define USB_DEVICE_ID_INNEX_GENESIS_ATARI 0x4745 + + #define USB_VENDOR_ID_ITE 0x048d ++#define I2C_VENDOR_ID_ITE 0x103c ++#define I2C_DEVICE_ID_ITE_VOYO_WINPAD_A15 0x184f + #define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386 + #define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350 + #define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a +diff --git a/drivers/hid/hid-mf.c b/drivers/hid/hid-mf.c +index fc75f30f537c9..92d7ecd41a78f 100644 +--- a/drivers/hid/hid-mf.c ++++ b/drivers/hid/hid-mf.c +@@ -153,6 +153,8 @@ static const struct hid_device_id mf_devices[] = { + .driver_data = HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2), + .driver_data = 0 }, /* No quirk required */ ++ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3), ++ .driver_data = HID_QUIRK_MULTI_INPUT }, + { } + }; + MODULE_DEVICE_TABLE(hid, mf_devices); +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c +index 60d188a704e5e..f35d919c4ebab 100644 +--- a/drivers/hid/hid-quirks.c ++++ b/drivers/hid/hid-quirks.c +@@ -72,6 +72,7 @@ static const struct hid_device_id hid_quirks[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_REDRAGON_SEYMUR2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), HID_QUIRK_MULTI_INPUT }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER), HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET }, +@@ -491,6 +492,7 @@ static const struct hid_device_id hid_have_special_driver[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3) }, + #endif + #if IS_ENABLED(CONFIG_HID_MICROSOFT) + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) }, +diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c +index 592176aff0270..96898983db990 100644 +--- a/drivers/hid/i2c-hid/i2c-hid-core.c ++++ b/drivers/hid/i2c-hid/i2c-hid-core.c +@@ -173,6 +173,8 @@ static const struct i2c_hid_quirks { + I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, + { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, ++ { I2C_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_VOYO_WINPAD_A15, ++ I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, + { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118, + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, + { USB_VENDOR_ID_ELAN, HID_ANY_ID, +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c +index 7b724f7b27a99..c392930253a30 100644 +--- a/drivers/iommu/amd_iommu.c ++++ b/drivers/iommu/amd_iommu.c +@@ -1469,25 +1469,27 @@ static bool increase_address_space(struct protection_domain *domain, + bool ret = false; + u64 *pte; + ++ pte = (void *)get_zeroed_page(gfp); ++ if (!pte) ++ return false; ++ + spin_lock_irqsave(&domain->lock, flags); + + if (address <= PM_LEVEL_SIZE(domain->mode) || + WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL)) + goto out; + +- pte = (void *)get_zeroed_page(gfp); +- if (!pte) +- goto out; +- + *pte = PM_LEVEL_PDE(domain->mode, + iommu_virt_to_phys(domain->pt_root)); + domain->pt_root = pte; + domain->mode += 1; + ++ pte = NULL; + ret = true; + + out: + spin_unlock_irqrestore(&domain->lock, flags); ++ free_page((unsigned long)pte); + + return ret; + } +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c +index 98be040cf958c..06b382304d926 100644 +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -888,24 +888,24 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) + EXPORT_SYMBOL_GPL(dm_table_set_type); + + /* validate the dax capability of the target device span */ +-int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, ++int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) + { + int blocksize = *(int *) data, id; + bool rc; + + id = dax_read_lock(); +- rc = dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len); ++ rc = !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len); + dax_read_unlock(id); + + return rc; + } + + /* Check devices support synchronous DAX */ +-static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev, +- sector_t start, sector_t len, void *data) ++static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev, ++ sector_t start, sector_t len, void *data) + { +- return dev->dax_dev && dax_synchronous(dev->dax_dev); ++ return !dev->dax_dev || !dax_synchronous(dev->dax_dev); + } + + bool dm_table_supports_dax(struct dm_table *t, +@@ -922,7 +922,7 @@ bool dm_table_supports_dax(struct dm_table *t, + return false; + + if (!ti->type->iterate_devices || +- !ti->type->iterate_devices(ti, iterate_fn, blocksize)) ++ ti->type->iterate_devices(ti, iterate_fn, blocksize)) + return false; + } + +@@ -996,7 +996,7 @@ static int dm_table_determine_type(struct dm_table *t) + verify_bio_based: + /* We must use this table as bio-based */ + t->type = DM_TYPE_BIO_BASED; +- if (dm_table_supports_dax(t, device_supports_dax, &page_size) || ++ if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) || + (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { + t->type = DM_TYPE_DAX_BIO_BASED; + } else { +@@ -1376,6 +1376,46 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) + return &t->targets[(KEYS_PER_NODE * n) + k]; + } + ++/* ++ * type->iterate_devices() should be called when the sanity check needs to ++ * iterate and check all underlying data devices. iterate_devices() will ++ * iterate all underlying data devices until it encounters a non-zero return ++ * code, returned by whether the input iterate_devices_callout_fn, or ++ * iterate_devices() itself internally. ++ * ++ * For some target type (e.g. dm-stripe), one call of iterate_devices() may ++ * iterate multiple underlying devices internally, in which case a non-zero ++ * return code returned by iterate_devices_callout_fn will stop the iteration ++ * in advance. ++ * ++ * Cases requiring _any_ underlying device supporting some kind of attribute, ++ * should use the iteration structure like dm_table_any_dev_attr(), or call ++ * it directly. @func should handle semantics of positive examples, e.g. ++ * capable of something. ++ * ++ * Cases requiring _all_ underlying devices supporting some kind of attribute, ++ * should use the iteration structure like dm_table_supports_nowait() or ++ * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that ++ * uses an @anti_func that handle semantics of counter examples, e.g. not ++ * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data); ++ */ ++static bool dm_table_any_dev_attr(struct dm_table *t, ++ iterate_devices_callout_fn func, void *data) ++{ ++ struct dm_target *ti; ++ unsigned int i; ++ ++ for (i = 0; i < dm_table_get_num_targets(t); i++) { ++ ti = dm_table_get_target(t, i); ++ ++ if (ti->type->iterate_devices && ++ ti->type->iterate_devices(ti, func, data)) ++ return true; ++ } ++ ++ return false; ++} ++ + static int count_device(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) + { +@@ -1412,13 +1452,13 @@ bool dm_table_has_no_data_devices(struct dm_table *table) + return true; + } + +-static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev, +- sector_t start, sector_t len, void *data) ++static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev, ++ sector_t start, sector_t len, void *data) + { + struct request_queue *q = bdev_get_queue(dev->bdev); + enum blk_zoned_model *zoned_model = data; + +- return q && blk_queue_zoned_model(q) == *zoned_model; ++ return !q || blk_queue_zoned_model(q) != *zoned_model; + } + + static bool dm_table_supports_zoned_model(struct dm_table *t, +@@ -1435,37 +1475,20 @@ static bool dm_table_supports_zoned_model(struct dm_table *t, + return false; + + if (!ti->type->iterate_devices || +- !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model)) ++ ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model)) + return false; + } + + return true; + } + +-static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, +- sector_t start, sector_t len, void *data) ++static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, ++ sector_t start, sector_t len, void *data) + { + struct request_queue *q = bdev_get_queue(dev->bdev); + unsigned int *zone_sectors = data; + +- return q && blk_queue_zone_sectors(q) == *zone_sectors; +-} +- +-static bool dm_table_matches_zone_sectors(struct dm_table *t, +- unsigned int zone_sectors) +-{ +- struct dm_target *ti; +- unsigned i; +- +- for (i = 0; i < dm_table_get_num_targets(t); i++) { +- ti = dm_table_get_target(t, i); +- +- if (!ti->type->iterate_devices || +- !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors)) +- return false; +- } +- +- return true; ++ return !q || blk_queue_zone_sectors(q) != *zone_sectors; + } + + static int validate_hardware_zoned_model(struct dm_table *table, +@@ -1485,7 +1508,7 @@ static int validate_hardware_zoned_model(struct dm_table *table, + if (!zone_sectors || !is_power_of_2(zone_sectors)) + return -EINVAL; + +- if (!dm_table_matches_zone_sectors(table, zone_sectors)) { ++ if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) { + DMERR("%s: zone sectors is not consistent across all devices", + dm_device_name(table->md)); + return -EINVAL; +@@ -1675,29 +1698,12 @@ static int device_dax_write_cache_enabled(struct dm_target *ti, + return false; + } + +-static int dm_table_supports_dax_write_cache(struct dm_table *t) +-{ +- struct dm_target *ti; +- unsigned i; +- +- for (i = 0; i < dm_table_get_num_targets(t); i++) { +- ti = dm_table_get_target(t, i); +- +- if (ti->type->iterate_devices && +- ti->type->iterate_devices(ti, +- device_dax_write_cache_enabled, NULL)) +- return true; +- } +- +- return false; +-} +- +-static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, +- sector_t start, sector_t len, void *data) ++static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, ++ sector_t start, sector_t len, void *data) + { + struct request_queue *q = bdev_get_queue(dev->bdev); + +- return q && blk_queue_nonrot(q); ++ return q && !blk_queue_nonrot(q); + } + + static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, +@@ -1708,35 +1714,18 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, + return q && !blk_queue_add_random(q); + } + +-static bool dm_table_all_devices_attribute(struct dm_table *t, +- iterate_devices_callout_fn func) +-{ +- struct dm_target *ti; +- unsigned i; +- +- for (i = 0; i < dm_table_get_num_targets(t); i++) { +- ti = dm_table_get_target(t, i); +- +- if (!ti->type->iterate_devices || +- !ti->type->iterate_devices(ti, func, NULL)) +- return false; +- } +- +- return true; +-} +- +-static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev, ++static int device_is_partial_completion(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) + { + char b[BDEVNAME_SIZE]; + + /* For now, NVMe devices are the only devices of this class */ +- return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0); ++ return (strncmp(bdevname(dev->bdev, b), "nvme", 4) != 0); + } + + static bool dm_table_does_not_support_partial_completion(struct dm_table *t) + { +- return dm_table_all_devices_attribute(t, device_no_partial_completion); ++ return !dm_table_any_dev_attr(t, device_is_partial_completion, NULL); + } + + static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, +@@ -1863,27 +1852,6 @@ static int device_requires_stable_pages(struct dm_target *ti, + return q && bdi_cap_stable_pages_required(q->backing_dev_info); + } + +-/* +- * If any underlying device requires stable pages, a table must require +- * them as well. Only targets that support iterate_devices are considered: +- * don't want error, zero, etc to require stable pages. +- */ +-static bool dm_table_requires_stable_pages(struct dm_table *t) +-{ +- struct dm_target *ti; +- unsigned i; +- +- for (i = 0; i < dm_table_get_num_targets(t); i++) { +- ti = dm_table_get_target(t, i); +- +- if (ti->type->iterate_devices && +- ti->type->iterate_devices(ti, device_requires_stable_pages, NULL)) +- return true; +- } +- +- return false; +-} +- + void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, + struct queue_limits *limits) + { +@@ -1916,22 +1884,22 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, + } + blk_queue_write_cache(q, wc, fua); + +- if (dm_table_supports_dax(t, device_supports_dax, &page_size)) { ++ if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) { + blk_queue_flag_set(QUEUE_FLAG_DAX, q); +- if (dm_table_supports_dax(t, device_dax_synchronous, NULL)) ++ if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL)) + set_dax_synchronous(t->md->dax_dev); + } + else + blk_queue_flag_clear(QUEUE_FLAG_DAX, q); + +- if (dm_table_supports_dax_write_cache(t)) ++ if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL)) + dax_write_cache(t->md->dax_dev, true); + + /* Ensure that all underlying devices are non-rotational. */ +- if (dm_table_all_devices_attribute(t, device_is_nonrot)) +- blk_queue_flag_set(QUEUE_FLAG_NONROT, q); +- else ++ if (dm_table_any_dev_attr(t, device_is_rotational, NULL)) + blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); ++ else ++ blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + + if (!dm_table_supports_write_same(t)) + q->limits.max_write_same_sectors = 0; +@@ -1943,8 +1911,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, + /* + * Some devices don't use blk_integrity but still want stable pages + * because they do their own checksumming. ++ * If any underlying device requires stable pages, a table must require ++ * them as well. Only targets that support iterate_devices are considered: ++ * don't want error, zero, etc to require stable pages. + */ +- if (dm_table_requires_stable_pages(t)) ++ if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL)) + q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; + else + q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; +@@ -1955,7 +1926,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, + * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not + * have it set. + */ +- if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) ++ if (blk_queue_add_random(q) && ++ dm_table_any_dev_attr(t, device_is_not_random, NULL)) + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); + + /* +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index de32f8553735f..530c0fe142291 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -1139,7 +1139,7 @@ static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bd + if (!map) + goto out; + +- ret = dm_table_supports_dax(map, device_supports_dax, &blocksize); ++ ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize); + + out: + dm_put_live_table(md, srcu_idx); +diff --git a/drivers/md/dm.h b/drivers/md/dm.h +index d7c4f6606b5fc..9fbf87e04019c 100644 +--- a/drivers/md/dm.h ++++ b/drivers/md/dm.h +@@ -74,7 +74,7 @@ void dm_table_free_md_mempools(struct dm_table *t); + struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); + bool dm_table_supports_dax(struct dm_table *t, iterate_devices_callout_fn fn, + int *blocksize); +-int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, ++int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data); + + void dm_lock_md_type(struct mapped_device *md); +diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c +index 7e0b0b7cc2a35..ead0acb7807c8 100644 +--- a/drivers/media/pci/cx23885/cx23885-core.c ++++ b/drivers/media/pci/cx23885/cx23885-core.c +@@ -2074,6 +2074,10 @@ static struct { + * 0x1451 is PCI ID for the IOMMU found on Ryzen + */ + { PCI_VENDOR_ID_AMD, 0x1451 }, ++ /* According to sudo lspci -nn, ++ * 0x1423 is the PCI ID for the IOMMU found on Kaveri ++ */ ++ { PCI_VENDOR_ID_AMD, 0x1423 }, + }; + + static bool cx23885_does_need_dma_reset(void) +diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c +index 414dcbd3c3c25..8b355fc0607b7 100644 +--- a/drivers/misc/eeprom/eeprom_93xx46.c ++++ b/drivers/misc/eeprom/eeprom_93xx46.c +@@ -35,6 +35,10 @@ static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = { + EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH, + }; + ++static const struct eeprom_93xx46_devtype_data microchip_93lc46b_data = { ++ .quirks = EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE, ++}; ++ + struct eeprom_93xx46_dev { + struct spi_device *spi; + struct eeprom_93xx46_platform_data *pdata; +@@ -55,6 +59,11 @@ static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev) + return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH; + } + ++static inline bool has_quirk_extra_read_cycle(struct eeprom_93xx46_dev *edev) ++{ ++ return edev->pdata->quirks & EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE; ++} ++ + static int eeprom_93xx46_read(void *priv, unsigned int off, + void *val, size_t count) + { +@@ -96,6 +105,11 @@ static int eeprom_93xx46_read(void *priv, unsigned int off, + dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n", + cmd_addr, edev->spi->max_speed_hz); + ++ if (has_quirk_extra_read_cycle(edev)) { ++ cmd_addr <<= 1; ++ bits += 1; ++ } ++ + spi_message_init(&m); + + t[0].tx_buf = (char *)&cmd_addr; +@@ -363,6 +377,7 @@ static void select_deassert(void *context) + static const struct of_device_id eeprom_93xx46_of_table[] = { + { .compatible = "eeprom-93xx46", }, + { .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, }, ++ { .compatible = "microchip,93lc46b", .data = µchip_93lc46b_data, }, + {} + }; + MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table); +diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c +index a5137845a1c78..6793fb8fe976b 100644 +--- a/drivers/mmc/host/sdhci-of-dwcmshc.c ++++ b/drivers/mmc/host/sdhci-of-dwcmshc.c +@@ -58,6 +58,7 @@ static const struct sdhci_ops sdhci_dwcmshc_ops = { + static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = { + .ops = &sdhci_dwcmshc_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, ++ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + }; + + static int dwcmshc_probe(struct platform_device *pdev) +diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c +index fc1706d0647d7..58c9623c3a916 100644 +--- a/drivers/net/wireless/marvell/mwifiex/pcie.c ++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c +@@ -377,6 +377,8 @@ static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev) + clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); + clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags); + mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); ++ ++ card->pci_reset_ongoing = true; + } + + /* +@@ -405,6 +407,8 @@ static void mwifiex_pcie_reset_done(struct pci_dev *pdev) + dev_err(&pdev->dev, "reinit failed: %d\n", ret); + else + mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); ++ ++ card->pci_reset_ongoing = false; + } + + static const struct pci_error_handlers mwifiex_pcie_err_handler = { +@@ -2995,7 +2999,19 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) + int ret; + u32 fw_status; + +- cancel_work_sync(&card->work); ++ /* Perform the cancel_work_sync() only when we're not resetting ++ * the card. It's because that function never returns if we're ++ * in reset path. If we're here when resetting the card, it means ++ * that we failed to reset the card (reset failure path). ++ */ ++ if (!card->pci_reset_ongoing) { ++ mwifiex_dbg(adapter, MSG, "performing cancel_work_sync()...\n"); ++ cancel_work_sync(&card->work); ++ mwifiex_dbg(adapter, MSG, "cancel_work_sync() done\n"); ++ } else { ++ mwifiex_dbg(adapter, MSG, ++ "skipped cancel_work_sync() because we're in card reset failure path\n"); ++ } + + ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status); + if (fw_status == FIRMWARE_READY_PCIE) { +diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h +index f7ce9b6db6b41..72d0c01ff3592 100644 +--- a/drivers/net/wireless/marvell/mwifiex/pcie.h ++++ b/drivers/net/wireless/marvell/mwifiex/pcie.h +@@ -391,6 +391,8 @@ struct pcie_service_card { + struct mwifiex_msix_context share_irq_ctx; + struct work_struct work; + unsigned long work_flags; ++ ++ bool pci_reset_ongoing; + }; + + static inline int +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index abc342db3b337..fc18738dcf8ff 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -3164,7 +3164,8 @@ static const struct pci_device_id nvme_id_table[] = { + { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, + { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ +- .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, ++ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | ++ NVME_QUIRK_NO_NS_DESC_LIST, }, + { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, + { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ +@@ -3178,6 +3179,9 @@ static const struct pci_device_id nvme_id_table[] = { + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, ++ { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ ++ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | ++ NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */ + .driver_data = NVME_QUIRK_LIGHTNVM, }, + { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index c98067579e9f3..53376bcda1f3f 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -4055,6 +4055,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9183, + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */ + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0, + quirk_dma_func1_alias); ++/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c135 */ ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9215, ++ quirk_dma_func1_alias); + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */ + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220, + quirk_dma_func1_alias); +diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c +index 7fa27e7536917..d27a564389a47 100644 +--- a/drivers/platform/x86/acer-wmi.c ++++ b/drivers/platform/x86/acer-wmi.c +@@ -30,6 +30,7 @@ + #include <linux/input/sparse-keymap.h> + #include <acpi/video.h> + ++ACPI_MODULE_NAME(KBUILD_MODNAME); + MODULE_AUTHOR("Carlos Corbacho"); + MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver"); + MODULE_LICENSE("GPL"); +@@ -80,7 +81,7 @@ MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026"); + + enum acer_wmi_event_ids { + WMID_HOTKEY_EVENT = 0x1, +- WMID_ACCEL_EVENT = 0x5, ++ WMID_ACCEL_OR_KBD_DOCK_EVENT = 0x5, + }; + + static const struct key_entry acer_wmi_keymap[] __initconst = { +@@ -128,7 +129,9 @@ struct event_return_value { + u8 function; + u8 key_num; + u16 device_state; +- u32 reserved; ++ u16 reserved1; ++ u8 kbd_dock_state; ++ u8 reserved2; + } __attribute__((packed)); + + /* +@@ -206,14 +209,13 @@ struct hotkey_function_type_aa { + /* + * Interface capability flags + */ +-#define ACER_CAP_MAILLED (1<<0) +-#define ACER_CAP_WIRELESS (1<<1) +-#define ACER_CAP_BLUETOOTH (1<<2) +-#define ACER_CAP_BRIGHTNESS (1<<3) +-#define ACER_CAP_THREEG (1<<4) +-#define ACER_CAP_ACCEL (1<<5) +-#define ACER_CAP_RFBTN (1<<6) +-#define ACER_CAP_ANY (0xFFFFFFFF) ++#define ACER_CAP_MAILLED BIT(0) ++#define ACER_CAP_WIRELESS BIT(1) ++#define ACER_CAP_BLUETOOTH BIT(2) ++#define ACER_CAP_BRIGHTNESS BIT(3) ++#define ACER_CAP_THREEG BIT(4) ++#define ACER_CAP_SET_FUNCTION_MODE BIT(5) ++#define ACER_CAP_KBD_DOCK BIT(6) + + /* + * Interface type flags +@@ -236,6 +238,7 @@ static int mailled = -1; + static int brightness = -1; + static int threeg = -1; + static int force_series; ++static int force_caps = -1; + static bool ec_raw_mode; + static bool has_type_aa; + static u16 commun_func_bitmap; +@@ -245,11 +248,13 @@ module_param(mailled, int, 0444); + module_param(brightness, int, 0444); + module_param(threeg, int, 0444); + module_param(force_series, int, 0444); ++module_param(force_caps, int, 0444); + module_param(ec_raw_mode, bool, 0444); + MODULE_PARM_DESC(mailled, "Set initial state of Mail LED"); + MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness"); + MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware"); + MODULE_PARM_DESC(force_series, "Force a different laptop series"); ++MODULE_PARM_DESC(force_caps, "Force the capability bitmask to this value"); + MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode"); + + struct acer_data { +@@ -319,6 +324,15 @@ static int __init dmi_matched(const struct dmi_system_id *dmi) + return 1; + } + ++static int __init set_force_caps(const struct dmi_system_id *dmi) ++{ ++ if (force_caps == -1) { ++ force_caps = (uintptr_t)dmi->driver_data; ++ pr_info("Found %s, set force_caps to 0x%x\n", dmi->ident, force_caps); ++ } ++ return 1; ++} ++ + static struct quirk_entry quirk_unknown = { + }; + +@@ -497,6 +511,33 @@ static const struct dmi_system_id acer_quirks[] __initconst = { + }, + .driver_data = &quirk_acer_travelmate_2490, + }, ++ { ++ .callback = set_force_caps, ++ .ident = "Acer Aspire Switch 10E SW3-016", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW3-016"), ++ }, ++ .driver_data = (void *)ACER_CAP_KBD_DOCK, ++ }, ++ { ++ .callback = set_force_caps, ++ .ident = "Acer Aspire Switch 10 SW5-012", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"), ++ }, ++ .driver_data = (void *)ACER_CAP_KBD_DOCK, ++ }, ++ { ++ .callback = set_force_caps, ++ .ident = "Acer One 10 (S1003)", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"), ++ }, ++ .driver_data = (void *)ACER_CAP_KBD_DOCK, ++ }, + {} + }; + +@@ -1253,10 +1294,8 @@ static void __init type_aa_dmi_decode(const struct dmi_header *header, void *d) + interface->capability |= ACER_CAP_THREEG; + if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_BLUETOOTH) + interface->capability |= ACER_CAP_BLUETOOTH; +- if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_RFBTN) { +- interface->capability |= ACER_CAP_RFBTN; ++ if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_RFBTN) + commun_func_bitmap &= ~ACER_WMID3_GDS_RFBTN; +- } + + commun_fn_key_number = type_aa->commun_fn_key_number; + } +@@ -1520,7 +1559,7 @@ static int acer_gsensor_event(void) + struct acpi_buffer output; + union acpi_object out_obj[5]; + +- if (!has_cap(ACER_CAP_ACCEL)) ++ if (!acer_wmi_accel_dev) + return -1; + + output.length = sizeof(out_obj); +@@ -1543,6 +1582,71 @@ static int acer_gsensor_event(void) + return 0; + } + ++/* ++ * Switch series keyboard dock status ++ */ ++static int acer_kbd_dock_state_to_sw_tablet_mode(u8 kbd_dock_state) ++{ ++ switch (kbd_dock_state) { ++ case 0x01: /* Docked, traditional clamshell laptop mode */ ++ return 0; ++ case 0x04: /* Stand-alone tablet */ ++ case 0x40: /* Docked, tent mode, keyboard not usable */ ++ return 1; ++ default: ++ pr_warn("Unknown kbd_dock_state 0x%02x\n", kbd_dock_state); ++ } ++ ++ return 0; ++} ++ ++static void acer_kbd_dock_get_initial_state(void) ++{ ++ u8 *output, input[8] = { 0x05, 0x00, }; ++ struct acpi_buffer input_buf = { sizeof(input), input }; ++ struct acpi_buffer output_buf = { ACPI_ALLOCATE_BUFFER, NULL }; ++ union acpi_object *obj; ++ acpi_status status; ++ int sw_tablet_mode; ++ ++ status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input_buf, &output_buf); ++ if (ACPI_FAILURE(status)) { ++ ACPI_EXCEPTION((AE_INFO, status, "Error getting keyboard-dock initial status")); ++ return; ++ } ++ ++ obj = output_buf.pointer; ++ if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length != 8) { ++ pr_err("Unexpected output format getting keyboard-dock initial status\n"); ++ goto out_free_obj; ++ } ++ ++ output = obj->buffer.pointer; ++ if (output[0] != 0x00 || (output[3] != 0x05 && output[3] != 0x45)) { ++ pr_err("Unexpected output [0]=0x%02x [3]=0x%02x getting keyboard-dock initial status\n", ++ output[0], output[3]); ++ goto out_free_obj; ++ } ++ ++ sw_tablet_mode = acer_kbd_dock_state_to_sw_tablet_mode(output[4]); ++ input_report_switch(acer_wmi_input_dev, SW_TABLET_MODE, sw_tablet_mode); ++ ++out_free_obj: ++ kfree(obj); ++} ++ ++static void acer_kbd_dock_event(const struct event_return_value *event) ++{ ++ int sw_tablet_mode; ++ ++ if (!has_cap(ACER_CAP_KBD_DOCK)) ++ return; ++ ++ sw_tablet_mode = acer_kbd_dock_state_to_sw_tablet_mode(event->kbd_dock_state); ++ input_report_switch(acer_wmi_input_dev, SW_TABLET_MODE, sw_tablet_mode); ++ input_sync(acer_wmi_input_dev); ++} ++ + /* + * Rfkill devices + */ +@@ -1770,8 +1874,9 @@ static void acer_wmi_notify(u32 value, void *context) + sparse_keymap_report_event(acer_wmi_input_dev, scancode, 1, true); + } + break; +- case WMID_ACCEL_EVENT: ++ case WMID_ACCEL_OR_KBD_DOCK_EVENT: + acer_gsensor_event(); ++ acer_kbd_dock_event(&return_value); + break; + default: + pr_warn("Unknown function number - %d - %d\n", +@@ -1894,8 +1999,6 @@ static int __init acer_wmi_accel_setup(void) + gsensor_handle = acpi_device_handle(adev); + acpi_dev_put(adev); + +- interface->capability |= ACER_CAP_ACCEL; +- + acer_wmi_accel_dev = input_allocate_device(); + if (!acer_wmi_accel_dev) + return -ENOMEM; +@@ -1921,11 +2024,6 @@ err_free_dev: + return err; + } + +-static void acer_wmi_accel_destroy(void) +-{ +- input_unregister_device(acer_wmi_accel_dev); +-} +- + static int __init acer_wmi_input_setup(void) + { + acpi_status status; +@@ -1943,6 +2041,9 @@ static int __init acer_wmi_input_setup(void) + if (err) + goto err_free_dev; + ++ if (has_cap(ACER_CAP_KBD_DOCK)) ++ input_set_capability(acer_wmi_input_dev, EV_SW, SW_TABLET_MODE); ++ + status = wmi_install_notify_handler(ACERWMID_EVENT_GUID, + acer_wmi_notify, NULL); + if (ACPI_FAILURE(status)) { +@@ -1950,6 +2051,9 @@ static int __init acer_wmi_input_setup(void) + goto err_free_dev; + } + ++ if (has_cap(ACER_CAP_KBD_DOCK)) ++ acer_kbd_dock_get_initial_state(); ++ + err = input_register_device(acer_wmi_input_dev); + if (err) + goto err_uninstall_notifier; +@@ -2080,7 +2184,7 @@ static int acer_resume(struct device *dev) + if (has_cap(ACER_CAP_BRIGHTNESS)) + set_u32(data->brightness, ACER_CAP_BRIGHTNESS); + +- if (has_cap(ACER_CAP_ACCEL)) ++ if (acer_wmi_accel_dev) + acer_gsensor_init(); + + return 0; +@@ -2181,7 +2285,7 @@ static int __init acer_wmi_init(void) + } + /* WMID always provides brightness methods */ + interface->capability |= ACER_CAP_BRIGHTNESS; +- } else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa) { ++ } else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa && force_caps == -1) { + pr_err("No WMID device detection method found\n"); + return -ENODEV; + } +@@ -2211,7 +2315,14 @@ static int __init acer_wmi_init(void) + if (acpi_video_get_backlight_type() != acpi_backlight_vendor) + interface->capability &= ~ACER_CAP_BRIGHTNESS; + +- if (wmi_has_guid(WMID_GUID3)) { ++ if (wmi_has_guid(WMID_GUID3)) ++ interface->capability |= ACER_CAP_SET_FUNCTION_MODE; ++ ++ if (force_caps != -1) ++ interface->capability = force_caps; ++ ++ if (wmi_has_guid(WMID_GUID3) && ++ (interface->capability & ACER_CAP_SET_FUNCTION_MODE)) { + if (ACPI_FAILURE(acer_wmi_enable_rf_button())) + pr_warn("Cannot enable RF Button Driver\n"); + +@@ -2270,8 +2381,8 @@ error_device_alloc: + error_platform_register: + if (wmi_has_guid(ACERWMID_EVENT_GUID)) + acer_wmi_input_destroy(); +- if (has_cap(ACER_CAP_ACCEL)) +- acer_wmi_accel_destroy(); ++ if (acer_wmi_accel_dev) ++ input_unregister_device(acer_wmi_accel_dev); + + return err; + } +@@ -2281,8 +2392,8 @@ static void __exit acer_wmi_exit(void) + if (wmi_has_guid(ACERWMID_EVENT_GUID)) + acer_wmi_input_destroy(); + +- if (has_cap(ACER_CAP_ACCEL)) +- acer_wmi_accel_destroy(); ++ if (acer_wmi_accel_dev) ++ input_unregister_device(acer_wmi_accel_dev); + + remove_debugfs(); + platform_device_unregister(acer_platform_device); +diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h +index eec7928ff8fe0..99580c22f91a4 100644 +--- a/include/linux/eeprom_93xx46.h ++++ b/include/linux/eeprom_93xx46.h +@@ -16,6 +16,8 @@ struct eeprom_93xx46_platform_data { + #define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0) + /* Instructions such as EWEN are (addrlen + 2) in length. */ + #define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1) ++/* Add extra cycle after address during a read */ ++#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2) + + /* + * optional hooks to control additional logic +diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig +index 29e2bd5cc5af7..7dce11ab28062 100644 +--- a/net/dsa/Kconfig ++++ b/net/dsa/Kconfig +@@ -9,6 +9,7 @@ menuconfig NET_DSA + tristate "Distributed Switch Architecture" + depends on HAVE_NET_DSA + depends on BRIDGE || BRIDGE=n ++ select GRO_CELLS + select NET_SWITCHDEV + select PHYLINK + select NET_DEVLINK +diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c +index 43120a3fb06f3..ca80f86995e68 100644 +--- a/net/dsa/dsa.c ++++ b/net/dsa/dsa.c +@@ -238,7 +238,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, + if (dsa_skb_defer_rx_timestamp(p, skb)) + return 0; + +- netif_receive_skb(skb); ++ gro_cells_receive(&p->gcells, skb); + + return 0; + } +diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h +index bf9947c577b6d..d8e850724d13c 100644 +--- a/net/dsa/dsa_priv.h ++++ b/net/dsa/dsa_priv.h +@@ -11,6 +11,7 @@ + #include <linux/netdevice.h> + #include <linux/netpoll.h> + #include <net/dsa.h> ++#include <net/gro_cells.h> + + enum { + DSA_NOTIFIER_AGEING_TIME, +@@ -68,6 +69,8 @@ struct dsa_slave_priv { + + struct pcpu_sw_netstats *stats64; + ++ struct gro_cells gcells; ++ + /* DSA port data, such as switch, port index, etc. */ + struct dsa_port *dp; + +diff --git a/net/dsa/slave.c b/net/dsa/slave.c +index f734ce0bcb56e..06f8874d53eea 100644 +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -1431,6 +1431,11 @@ int dsa_slave_create(struct dsa_port *port) + free_netdev(slave_dev); + return -ENOMEM; + } ++ ++ ret = gro_cells_init(&p->gcells, slave_dev); ++ if (ret) ++ goto out_free; ++ + p->dp = port; + INIT_LIST_HEAD(&p->mall_tc_list); + INIT_WORK(&port->xmit_work, dsa_port_xmit_work); +@@ -1443,7 +1448,7 @@ int dsa_slave_create(struct dsa_port *port) + ret = dsa_slave_phy_setup(slave_dev); + if (ret) { + netdev_err(master, "error %d setting up slave phy\n", ret); +- goto out_free; ++ goto out_gcells; + } + + dsa_slave_notify(slave_dev, DSA_PORT_REGISTER); +@@ -1462,6 +1467,8 @@ out_phy: + phylink_disconnect_phy(p->dp->pl); + rtnl_unlock(); + phylink_destroy(p->dp->pl); ++out_gcells: ++ gro_cells_destroy(&p->gcells); + out_free: + free_percpu(p->stats64); + free_netdev(slave_dev); +@@ -1482,6 +1489,7 @@ void dsa_slave_destroy(struct net_device *slave_dev) + dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER); + unregister_netdev(slave_dev); + phylink_destroy(dp->pl); ++ gro_cells_destroy(&p->gcells); + free_percpu(p->stats64); + free_netdev(slave_dev); + } +diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c +index 9ee610504bac9..cfd3077174731 100644 +--- a/sound/soc/intel/boards/bytcr_rt5640.c ++++ b/sound/soc/intel/boards/bytcr_rt5640.c +@@ -435,6 +435,18 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, ++ { ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ARCHOS 140 CESIUM"), ++ }, ++ .driver_data = (void *)(BYT_RT5640_IN1_MAP | ++ BYT_RT5640_JD_SRC_JD2_IN4N | ++ BYT_RT5640_OVCD_TH_2000UA | ++ BYT_RT5640_OVCD_SF_0P75 | ++ BYT_RT5640_SSP0_AIF1 | ++ BYT_RT5640_MCLK_EN), ++ }, + { + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |